diff options
author | Campbell Barton <ideasman42@gmail.com> | 2019-04-17 07:17:24 +0300 |
---|---|---|
committer | Campbell Barton <ideasman42@gmail.com> | 2019-04-17 07:21:24 +0300 |
commit | e12c08e8d170b7ca40f204a5b0423c23a9fbc2c1 (patch) | |
tree | 8cf3453d12edb177a218ef8009357518ec6cab6a /source/blender/gpu/intern/gpu_batch_presets.c | |
parent | b3dabc200a4b0399ec6b81f2ff2730d07b44fcaa (diff) |
ClangFormat: apply to source, most of intern
Apply clang format as proposed in T53211.
For details on usage and instructions for migrating branches
without conflicts, see:
https://wiki.blender.org/wiki/Tools/ClangFormat
Diffstat (limited to 'source/blender/gpu/intern/gpu_batch_presets.c')
-rw-r--r-- | source/blender/gpu/intern/gpu_batch_presets.c | 314 |
1 files changed, 158 insertions, 156 deletions
diff --git a/source/blender/gpu/intern/gpu_batch_presets.c b/source/blender/gpu/intern/gpu_batch_presets.c index 8d23d4be297..9018582f6dd 100644 --- a/source/blender/gpu/intern/gpu_batch_presets.c +++ b/source/blender/gpu/intern/gpu_batch_presets.c @@ -31,83 +31,85 @@ #include "GPU_batch.h" #include "GPU_batch_utils.h" -#include "GPU_batch_presets.h" /* own include */ +#include "GPU_batch_presets.h" /* own include */ #include "gpu_shader_private.h" /* Struct to store 3D Batches and their format */ static struct { - struct { - GPUBatch *sphere_high; - GPUBatch *sphere_med; - GPUBatch *sphere_low; - GPUBatch *sphere_wire_low; - GPUBatch *sphere_wire_med; - } batch; + struct { + GPUBatch *sphere_high; + GPUBatch *sphere_med; + GPUBatch *sphere_low; + GPUBatch *sphere_wire_low; + GPUBatch *sphere_wire_med; + } batch; - GPUVertFormat format; + GPUVertFormat format; - struct { - uint pos, nor; - } attr_id; + struct { + uint pos, nor; + } attr_id; - ThreadMutex mutex; + ThreadMutex mutex; } g_presets_3d = {{0}}; static ListBase presets_list = {NULL, NULL}; - /* -------------------------------------------------------------------- */ /** \name 3D Primitives * \{ */ static GPUVertFormat *preset_3d_format(void) { - if (g_presets_3d.format.attr_len == 0) { - GPUVertFormat *format = &g_presets_3d.format; - g_presets_3d.attr_id.pos = GPU_vertformat_attr_add(format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT); - g_presets_3d.attr_id.nor = GPU_vertformat_attr_add(format, "nor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT); - } - return &g_presets_3d.format; + if (g_presets_3d.format.attr_len == 0) { + GPUVertFormat *format = &g_presets_3d.format; + g_presets_3d.attr_id.pos = GPU_vertformat_attr_add( + format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT); + g_presets_3d.attr_id.nor = GPU_vertformat_attr_add( + format, "nor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT); + } + return &g_presets_3d.format; } -static void batch_sphere_lat_lon_vert( - GPUVertBufRaw *pos_step, GPUVertBufRaw *nor_step, - float lat, float lon) +static void batch_sphere_lat_lon_vert(GPUVertBufRaw *pos_step, + GPUVertBufRaw *nor_step, + float lat, + float lon) { - float pos[3]; - pos[0] = sinf(lat) * cosf(lon); - pos[1] = cosf(lat); - pos[2] = sinf(lat) * sinf(lon); - copy_v3_v3(GPU_vertbuf_raw_step(pos_step), pos); - copy_v3_v3(GPU_vertbuf_raw_step(nor_step), pos); + float pos[3]; + pos[0] = sinf(lat) * cosf(lon); + pos[1] = cosf(lat); + pos[2] = sinf(lat) * sinf(lon); + copy_v3_v3(GPU_vertbuf_raw_step(pos_step), pos); + copy_v3_v3(GPU_vertbuf_raw_step(nor_step), pos); } GPUBatch *GPU_batch_preset_sphere(int lod) { - BLI_assert(lod >= 0 && lod <= 2); - BLI_assert(BLI_thread_is_main()); - - if (lod == 0) { - return g_presets_3d.batch.sphere_low; - } - else if (lod == 1) { - return g_presets_3d.batch.sphere_med; - } - else { - return g_presets_3d.batch.sphere_high; - } + BLI_assert(lod >= 0 && lod <= 2); + BLI_assert(BLI_thread_is_main()); + + if (lod == 0) { + return g_presets_3d.batch.sphere_low; + } + else if (lod == 1) { + return g_presets_3d.batch.sphere_med; + } + else { + return g_presets_3d.batch.sphere_high; + } } GPUBatch *GPU_batch_preset_sphere_wire(int lod) { - BLI_assert(lod >= 0 && lod <= 1); - BLI_assert(BLI_thread_is_main()); - - if (lod == 0) { - return g_presets_3d.batch.sphere_wire_low; - } - else { - return g_presets_3d.batch.sphere_wire_med; - } + BLI_assert(lod >= 0 && lod <= 1); + BLI_assert(BLI_thread_is_main()); + + if (lod == 0) { + return g_presets_3d.batch.sphere_wire_low; + } + else { + return g_presets_3d.batch.sphere_wire_med; + } } /** \} */ @@ -119,141 +121,141 @@ GPUBatch *GPU_batch_preset_sphere_wire(int lod) /* Replacement for gluSphere */ GPUBatch *gpu_batch_sphere(int lat_res, int lon_res) { - const float lon_inc = 2 * M_PI / lon_res; - const float lat_inc = M_PI / lat_res; - float lon, lat; - - GPUVertBuf *vbo = GPU_vertbuf_create_with_format(preset_3d_format()); - const uint vbo_len = (lat_res - 1) * lon_res * 6; - GPU_vertbuf_data_alloc(vbo, vbo_len); - - GPUVertBufRaw pos_step, nor_step; - GPU_vertbuf_attr_get_raw_data(vbo, g_presets_3d.attr_id.pos, &pos_step); - GPU_vertbuf_attr_get_raw_data(vbo, g_presets_3d.attr_id.nor, &nor_step); - - lon = 0.0f; - for (int i = 0; i < lon_res; i++, lon += lon_inc) { - lat = 0.0f; - for (int j = 0; j < lat_res; j++, lat += lat_inc) { - if (j != lat_res - 1) { /* Pole */ - batch_sphere_lat_lon_vert(&pos_step, &nor_step, lat + lat_inc, lon + lon_inc); - batch_sphere_lat_lon_vert(&pos_step, &nor_step, lat + lat_inc, lon); - batch_sphere_lat_lon_vert(&pos_step, &nor_step, lat, lon); - } - - if (j != 0) { /* Pole */ - batch_sphere_lat_lon_vert(&pos_step, &nor_step, lat, lon + lon_inc); - batch_sphere_lat_lon_vert(&pos_step, &nor_step, lat + lat_inc, lon + lon_inc); - batch_sphere_lat_lon_vert(&pos_step, &nor_step, lat, lon); - } - } - } - - BLI_assert(vbo_len == GPU_vertbuf_raw_used(&pos_step)); - BLI_assert(vbo_len == GPU_vertbuf_raw_used(&nor_step)); - - return GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO); + const float lon_inc = 2 * M_PI / lon_res; + const float lat_inc = M_PI / lat_res; + float lon, lat; + + GPUVertBuf *vbo = GPU_vertbuf_create_with_format(preset_3d_format()); + const uint vbo_len = (lat_res - 1) * lon_res * 6; + GPU_vertbuf_data_alloc(vbo, vbo_len); + + GPUVertBufRaw pos_step, nor_step; + GPU_vertbuf_attr_get_raw_data(vbo, g_presets_3d.attr_id.pos, &pos_step); + GPU_vertbuf_attr_get_raw_data(vbo, g_presets_3d.attr_id.nor, &nor_step); + + lon = 0.0f; + for (int i = 0; i < lon_res; i++, lon += lon_inc) { + lat = 0.0f; + for (int j = 0; j < lat_res; j++, lat += lat_inc) { + if (j != lat_res - 1) { /* Pole */ + batch_sphere_lat_lon_vert(&pos_step, &nor_step, lat + lat_inc, lon + lon_inc); + batch_sphere_lat_lon_vert(&pos_step, &nor_step, lat + lat_inc, lon); + batch_sphere_lat_lon_vert(&pos_step, &nor_step, lat, lon); + } + + if (j != 0) { /* Pole */ + batch_sphere_lat_lon_vert(&pos_step, &nor_step, lat, lon + lon_inc); + batch_sphere_lat_lon_vert(&pos_step, &nor_step, lat + lat_inc, lon + lon_inc); + batch_sphere_lat_lon_vert(&pos_step, &nor_step, lat, lon); + } + } + } + + BLI_assert(vbo_len == GPU_vertbuf_raw_used(&pos_step)); + BLI_assert(vbo_len == GPU_vertbuf_raw_used(&nor_step)); + + return GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO); } static GPUBatch *batch_sphere_wire(int lat_res, int lon_res) { - const float lon_inc = 2 * M_PI / lon_res; - const float lat_inc = M_PI / lat_res; - float lon, lat; - - GPUVertBuf *vbo = GPU_vertbuf_create_with_format(preset_3d_format()); - const uint vbo_len = (lat_res * lon_res * 2) + ((lat_res - 1) * lon_res * 2); - GPU_vertbuf_data_alloc(vbo, vbo_len); - - GPUVertBufRaw pos_step, nor_step; - GPU_vertbuf_attr_get_raw_data(vbo, g_presets_3d.attr_id.pos, &pos_step); - GPU_vertbuf_attr_get_raw_data(vbo, g_presets_3d.attr_id.nor, &nor_step); - - lon = 0.0f; - for (int i = 0; i < lon_res; i++, lon += lon_inc) { - lat = 0.0f; - for (int j = 0; j < lat_res; j++, lat += lat_inc) { - batch_sphere_lat_lon_vert(&pos_step, &nor_step, lat + lat_inc, lon); - batch_sphere_lat_lon_vert(&pos_step, &nor_step, lat, lon); - - if (j != lat_res - 1) { /* Pole */ - batch_sphere_lat_lon_vert(&pos_step, &nor_step, lat + lat_inc, lon + lon_inc); - batch_sphere_lat_lon_vert(&pos_step, &nor_step, lat + lat_inc, lon); - } - } - } - - BLI_assert(vbo_len == GPU_vertbuf_raw_used(&pos_step)); - BLI_assert(vbo_len == GPU_vertbuf_raw_used(&nor_step)); - - return GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO); + const float lon_inc = 2 * M_PI / lon_res; + const float lat_inc = M_PI / lat_res; + float lon, lat; + + GPUVertBuf *vbo = GPU_vertbuf_create_with_format(preset_3d_format()); + const uint vbo_len = (lat_res * lon_res * 2) + ((lat_res - 1) * lon_res * 2); + GPU_vertbuf_data_alloc(vbo, vbo_len); + + GPUVertBufRaw pos_step, nor_step; + GPU_vertbuf_attr_get_raw_data(vbo, g_presets_3d.attr_id.pos, &pos_step); + GPU_vertbuf_attr_get_raw_data(vbo, g_presets_3d.attr_id.nor, &nor_step); + + lon = 0.0f; + for (int i = 0; i < lon_res; i++, lon += lon_inc) { + lat = 0.0f; + for (int j = 0; j < lat_res; j++, lat += lat_inc) { + batch_sphere_lat_lon_vert(&pos_step, &nor_step, lat + lat_inc, lon); + batch_sphere_lat_lon_vert(&pos_step, &nor_step, lat, lon); + + if (j != lat_res - 1) { /* Pole */ + batch_sphere_lat_lon_vert(&pos_step, &nor_step, lat + lat_inc, lon + lon_inc); + batch_sphere_lat_lon_vert(&pos_step, &nor_step, lat + lat_inc, lon); + } + } + } + + BLI_assert(vbo_len == GPU_vertbuf_raw_used(&pos_step)); + BLI_assert(vbo_len == GPU_vertbuf_raw_used(&nor_step)); + + return GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO); } /** \} */ void gpu_batch_presets_init(void) { - BLI_mutex_init(&g_presets_3d.mutex); + BLI_mutex_init(&g_presets_3d.mutex); - /* Hard coded resolution */ - g_presets_3d.batch.sphere_low = gpu_batch_sphere(8, 16); - gpu_batch_presets_register(g_presets_3d.batch.sphere_low); + /* Hard coded resolution */ + g_presets_3d.batch.sphere_low = gpu_batch_sphere(8, 16); + gpu_batch_presets_register(g_presets_3d.batch.sphere_low); - g_presets_3d.batch.sphere_med = gpu_batch_sphere(16, 10); - gpu_batch_presets_register(g_presets_3d.batch.sphere_med); + g_presets_3d.batch.sphere_med = gpu_batch_sphere(16, 10); + gpu_batch_presets_register(g_presets_3d.batch.sphere_med); - g_presets_3d.batch.sphere_high = gpu_batch_sphere(32, 24); - gpu_batch_presets_register(g_presets_3d.batch.sphere_high); + g_presets_3d.batch.sphere_high = gpu_batch_sphere(32, 24); + gpu_batch_presets_register(g_presets_3d.batch.sphere_high); - g_presets_3d.batch.sphere_wire_low = batch_sphere_wire(6, 8); - gpu_batch_presets_register(g_presets_3d.batch.sphere_wire_low); + g_presets_3d.batch.sphere_wire_low = batch_sphere_wire(6, 8); + gpu_batch_presets_register(g_presets_3d.batch.sphere_wire_low); - g_presets_3d.batch.sphere_wire_med = batch_sphere_wire(8, 16); - gpu_batch_presets_register(g_presets_3d.batch.sphere_wire_med); + g_presets_3d.batch.sphere_wire_med = batch_sphere_wire(8, 16); + gpu_batch_presets_register(g_presets_3d.batch.sphere_wire_med); } void gpu_batch_presets_register(GPUBatch *preset_batch) { - BLI_mutex_lock(&g_presets_3d.mutex); - BLI_addtail(&presets_list, BLI_genericNodeN(preset_batch)); - BLI_mutex_unlock(&g_presets_3d.mutex); + BLI_mutex_lock(&g_presets_3d.mutex); + BLI_addtail(&presets_list, BLI_genericNodeN(preset_batch)); + BLI_mutex_unlock(&g_presets_3d.mutex); } bool gpu_batch_presets_unregister(GPUBatch *preset_batch) { - BLI_mutex_lock(&g_presets_3d.mutex); - for (LinkData *link = presets_list.last; link; link = link->prev) { - if (preset_batch == link->data) { - BLI_remlink(&presets_list, link); - BLI_mutex_unlock(&g_presets_3d.mutex); - MEM_freeN(link); - return true; - } - } - BLI_mutex_unlock(&g_presets_3d.mutex); - return false; + BLI_mutex_lock(&g_presets_3d.mutex); + for (LinkData *link = presets_list.last; link; link = link->prev) { + if (preset_batch == link->data) { + BLI_remlink(&presets_list, link); + BLI_mutex_unlock(&g_presets_3d.mutex); + MEM_freeN(link); + return true; + } + } + BLI_mutex_unlock(&g_presets_3d.mutex); + return false; } void gpu_batch_presets_reset(void) { - BLI_mutex_lock(&g_presets_3d.mutex); - /* Reset vao caches for these every time we switch opengl context. - * This way they will draw correctly for each window. */ - for (LinkData *link = presets_list.first; link; link = link->next) { - GPUBatch *preset = link->data; - GPU_batch_vao_cache_clear(preset); - } - BLI_mutex_unlock(&g_presets_3d.mutex); + BLI_mutex_lock(&g_presets_3d.mutex); + /* Reset vao caches for these every time we switch opengl context. + * This way they will draw correctly for each window. */ + for (LinkData *link = presets_list.first; link; link = link->next) { + GPUBatch *preset = link->data; + GPU_batch_vao_cache_clear(preset); + } + BLI_mutex_unlock(&g_presets_3d.mutex); } void gpu_batch_presets_exit(void) { - LinkData *link; - while ((link = BLI_pophead(&presets_list))) { - GPUBatch *preset = link->data; - GPU_batch_discard(preset); - MEM_freeN(link); - } - - BLI_mutex_end(&g_presets_3d.mutex); + LinkData *link; + while ((link = BLI_pophead(&presets_list))) { + GPUBatch *preset = link->data; + GPU_batch_discard(preset); + MEM_freeN(link); + } + + BLI_mutex_end(&g_presets_3d.mutex); } |