Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCampbell Barton <ideasman42@gmail.com>2019-04-22 02:32:37 +0300
committerCampbell Barton <ideasman42@gmail.com>2019-04-22 12:48:17 +0300
commit14a49950ff11f43b7a5a73da545339b969de97f0 (patch)
treeb935489e6d39db98ecac8784d327ab9c451c8d01 /source/blender/gpu/intern/gpu_batch.c
parent620b960d3d8cfd90b9f0df6ba3671c33eccb8309 (diff)
Cleanup: style, use braces for gpu
Diffstat (limited to 'source/blender/gpu/intern/gpu_batch.c')
-rw-r--r--source/blender/gpu/intern/gpu_batch.c45
1 files changed, 30 insertions, 15 deletions
diff --git a/source/blender/gpu/intern/gpu_batch.c b/source/blender/gpu/intern/gpu_batch.c
index 4f5215ce9e6..f179f9ef22c 100644
--- a/source/blender/gpu/intern/gpu_batch.c
+++ b/source/blender/gpu/intern/gpu_batch.c
@@ -194,8 +194,9 @@ int GPU_batch_vertbuf_add_ex(GPUBatch *batch, GPUVertBuf *verts, bool own_vbo)
#endif
batch->verts[v] = verts;
/* TODO: mark dirty so we can keep attribute bindings up-to-date */
- if (own_vbo)
+ if (own_vbo) {
batch->owns_flag |= (1 << v);
+ }
return v;
}
}
@@ -211,14 +212,18 @@ static GLuint batch_vao_get(GPUBatch *batch)
{
/* Search through cache */
if (batch->is_dynamic_vao_count) {
- for (int i = 0; i < batch->dynamic_vaos.count; ++i)
- if (batch->dynamic_vaos.interfaces[i] == batch->interface)
+ for (int i = 0; i < batch->dynamic_vaos.count; ++i) {
+ if (batch->dynamic_vaos.interfaces[i] == batch->interface) {
return batch->dynamic_vaos.vao_ids[i];
+ }
+ }
}
else {
- for (int i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i)
- if (batch->static_vaos.interfaces[i] == batch->interface)
+ for (int i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i) {
+ if (batch->static_vaos.interfaces[i] == batch->interface) {
return batch->static_vaos.vao_ids[i];
+ }
+ }
}
/* Set context of this batch.
@@ -239,9 +244,11 @@ static GLuint batch_vao_get(GPUBatch *batch)
GLuint new_vao = 0;
if (!batch->is_dynamic_vao_count) {
int i; /* find first unused slot */
- for (i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i)
- if (batch->static_vaos.vao_ids[i] == 0)
+ for (i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i) {
+ if (batch->static_vaos.vao_ids[i] == 0) {
break;
+ }
+ }
if (i < GPU_BATCH_VAO_STATIC_LEN) {
batch->static_vaos.interfaces[i] = batch->interface;
@@ -267,9 +274,11 @@ static GLuint batch_vao_get(GPUBatch *batch)
if (batch->is_dynamic_vao_count) {
int i; /* find first unused slot */
- for (i = 0; i < batch->dynamic_vaos.count; ++i)
- if (batch->dynamic_vaos.vao_ids[i] == 0)
+ for (i = 0; i < batch->dynamic_vaos.count; ++i) {
+ if (batch->dynamic_vaos.vao_ids[i] == 0) {
break;
+ }
+ }
if (i == batch->dynamic_vaos.count) {
/* Not enough place, realloc the array. */
@@ -362,8 +371,9 @@ static void create_bindings(GPUVertBuf *verts,
for (uint n_idx = 0; n_idx < a->name_len; ++n_idx) {
const GPUShaderInput *input = GPU_shaderinterface_attr(interface, a->name[n_idx]);
- if (input == NULL)
+ if (input == NULL) {
continue;
+ }
if (a->comp_len == 16 || a->comp_len == 12 || a->comp_len == 8) {
#if TRUST_NO_ONE
@@ -540,10 +550,12 @@ static void primitive_restart_enable(const GPUIndexBuf *el)
GLuint restart_index = (GLuint)0xFFFFFFFF;
#if GPU_TRACK_INDEX_RANGE
- if (el->index_type == GPU_INDEX_U8)
+ if (el->index_type == GPU_INDEX_U8) {
restart_index = (GLuint)0xFF;
- else if (el->index_type == GPU_INDEX_U16)
+ }
+ else if (el->index_type == GPU_INDEX_U16) {
restart_index = (GLuint)0xFFFF;
+ }
#endif
glPrimitiveRestartIndex(restart_index);
@@ -557,13 +569,16 @@ static void primitive_restart_disable(void)
static void *elem_offset(const GPUIndexBuf *el, int v_first)
{
#if GPU_TRACK_INDEX_RANGE
- if (el->index_type == GPU_INDEX_U8)
+ if (el->index_type == GPU_INDEX_U8) {
return (GLubyte *)0 + v_first;
- else if (el->index_type == GPU_INDEX_U16)
+ }
+ else if (el->index_type == GPU_INDEX_U16) {
return (GLushort *)0 + v_first;
- else
+ }
+ else {
#endif
return (GLuint *)0 + v_first;
+ }
}
void GPU_batch_draw(GPUBatch *batch)