diff options
author | Clément Foucault <foucault.clem@gmail.com> | 2019-05-13 18:56:20 +0300 |
---|---|---|
committer | Clément Foucault <foucault.clem@gmail.com> | 2019-05-14 11:57:03 +0300 |
commit | 8bc8a62c57f91326ab3f8850785dce5452b5d703 (patch) | |
tree | c578b9786bc3e519f36f782cd74abd77ad52b344 /source/blender/draw/intern/draw_manager_data.c | |
parent | 20d9cd3a1fbd763dbe002e9baf2e3ba7fbb66f2f (diff) |
DRW: Refactor: Use DRWCall to accumulate per instance attributes
This is a big change that cleanup a lot of confusing code.
- The instancing/batching data buffer distribution in draw_instance_data.c.
- The selection & drawing code in draw_manager_exec.c
- Prety much every non-meshes object drawing (object_mode.c).
Most of the changes are just renaming but there still a chance a typo might
have sneek through.
The Batching/Instancing Shading groups are replace by DRWCallBuffers. This
is cleaner and conceptually more in line with what a DRWShadingGroup should
be.
There is still some little confusion in draw_common.c where some function
takes shgroup as input and some don't.
Diffstat (limited to 'source/blender/draw/intern/draw_manager_data.c')
-rw-r--r-- | source/blender/draw/intern/draw_manager_data.c | 332 |
1 files changed, 110 insertions, 222 deletions
diff --git a/source/blender/draw/intern/draw_manager_data.c b/source/blender/draw/intern/draw_manager_data.c index 3a10543c8ef..151ab469e59 100644 --- a/source/blender/draw/intern/draw_manager_data.c +++ b/source/blender/draw/intern/draw_manager_data.c @@ -43,8 +43,6 @@ #include "intern/gpu_codegen.h" -struct GPUVertFormat *g_pos_format = NULL; - /* -------------------------------------------------------------------- */ /** \name Uniform Buffer Object (DRW_uniformbuffer) * \{ */ @@ -453,7 +451,6 @@ static DRWCallState *drw_call_state_object(DRWShadingGroup *shgroup, float (*obm void DRW_shgroup_call_add(DRWShadingGroup *shgroup, GPUBatch *geom, float (*obmat)[4]) { BLI_assert(geom != NULL); - BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM)); DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls); BLI_LINKS_APPEND(&shgroup->calls, call); @@ -465,6 +462,7 @@ void DRW_shgroup_call_add(DRWShadingGroup *shgroup, GPUBatch *geom, float (*obma call->inst_count = 0; #ifdef USE_GPU_SELECT call->select_id = DST.select_id; + call->inst_selectid = NULL; #endif } @@ -472,7 +470,6 @@ void DRW_shgroup_call_range_add( DRWShadingGroup *shgroup, GPUBatch *geom, float (*obmat)[4], uint v_sta, uint v_count) { BLI_assert(geom != NULL); - BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM)); BLI_assert(v_count); DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls); @@ -485,6 +482,7 @@ void DRW_shgroup_call_range_add( call->inst_count = 0; #ifdef USE_GPU_SELECT call->select_id = DST.select_id; + call->inst_selectid = NULL; #endif } @@ -493,7 +491,6 @@ static void drw_shgroup_call_procedural_add_ex(DRWShadingGroup *shgroup, uint vert_count, float (*obmat)[4]) { - BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM)); DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls); BLI_LINKS_APPEND(&shgroup->calls, call); @@ -505,6 +502,7 @@ static void drw_shgroup_call_procedural_add_ex(DRWShadingGroup *shgroup, call->inst_count = 0; #ifdef USE_GPU_SELECT call->select_id = DST.select_id; + call->inst_selectid = NULL; #endif } @@ -539,7 +537,6 @@ void DRW_shgroup_call_object_add_ex(DRWShadingGroup *shgroup, bool bypass_culling) { BLI_assert(geom != NULL); - BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM)); DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls); BLI_LINKS_APPEND(&shgroup->calls, call); @@ -553,6 +550,7 @@ void DRW_shgroup_call_object_add_ex(DRWShadingGroup *shgroup, call->inst_count = 0; #ifdef USE_GPU_SELECT call->select_id = DST.select_id; + call->inst_selectid = NULL; #endif } @@ -563,7 +561,6 @@ void DRW_shgroup_call_object_add_with_callback(DRWShadingGroup *shgroup, void *user_data) { BLI_assert(geom != NULL); - BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM)); DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls); BLI_LINKS_APPEND(&shgroup->calls, call); @@ -577,6 +574,7 @@ void DRW_shgroup_call_object_add_with_callback(DRWShadingGroup *shgroup, call->inst_count = 0; #ifdef USE_GPU_SELECT call->select_id = DST.select_id; + call->inst_selectid = NULL; #endif } @@ -586,7 +584,6 @@ void DRW_shgroup_call_instances_add(DRWShadingGroup *shgroup, uint count) { BLI_assert(geom != NULL); - BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM)); DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls); BLI_LINKS_APPEND(&shgroup->calls, call); @@ -598,6 +595,31 @@ void DRW_shgroup_call_instances_add(DRWShadingGroup *shgroup, call->inst_count = count; #ifdef USE_GPU_SELECT call->select_id = DST.select_id; + call->inst_selectid = NULL; +#endif +} + +void DRW_shgroup_call_instances_with_attribs_add(DRWShadingGroup *shgroup, + struct GPUBatch *geom, + float (*obmat)[4], + struct GPUBatch *inst_attributes) +{ + BLI_assert(geom != NULL); + BLI_assert(inst_attributes->verts[0] != NULL); + + GPUVertBuf *buf_inst = inst_attributes->verts[0]; + + DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls); + BLI_LINKS_APPEND(&shgroup->calls, call); + + call->state = drw_call_state_create(shgroup, obmat, NULL); + call->batch = DRW_temp_batch_instance_request(DST.idatalist, buf_inst, geom); + call->vert_first = 0; + call->vert_count = 0; /* Auto from batch. */ + call->inst_count = buf_inst->vertex_len; +#ifdef USE_GPU_SELECT + call->select_id = DST.select_id; + call->inst_selectid = NULL; #endif } @@ -741,30 +763,95 @@ void DRW_shgroup_call_sculpt_with_materials_add(DRWShadingGroup **shgroups, drw_sculpt_generate_calls(&scd, use_vcol); } -void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup, - const void *attr[], - uint attr_len) +static GPUVertFormat inst_select_format = {0}; + +DRWCallBuffer *DRW_shgroup_call_buffer_add(DRWShadingGroup *shgroup, + struct GPUVertFormat *format, + GPUPrimType prim_type) +{ + BLI_assert(ELEM(prim_type, GPU_PRIM_POINTS, GPU_PRIM_LINES, GPU_PRIM_TRI_FAN)); + BLI_assert(format != NULL); + + DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls); + BLI_LINKS_APPEND(&shgroup->calls, call); + + call->state = drw_call_state_create(shgroup, NULL, NULL); + GPUVertBuf *buf = DRW_temp_buffer_request(DST.idatalist, format, &call->vert_count); + call->batch = DRW_temp_batch_request(DST.idatalist, buf, prim_type); + call->vert_first = 0; + call->vert_count = 0; + call->inst_count = 0; + +#ifdef USE_GPU_SELECT + if (G.f & G_FLAG_PICKSEL) { + /* Not actually used for rendering but alloced in one chunk. */ + if (inst_select_format.attr_len == 0) { + GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT); + } + call->inst_selectid = DRW_temp_buffer_request( + DST.idatalist, &inst_select_format, &call->vert_count); + } +#endif + return (DRWCallBuffer *)call; +} + +DRWCallBuffer *DRW_shgroup_call_buffer_instance_add(DRWShadingGroup *shgroup, + struct GPUVertFormat *format, + GPUBatch *geom) { + BLI_assert(geom != NULL); + BLI_assert(format != NULL); + + DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls); + BLI_LINKS_APPEND(&shgroup->calls, call); + + call->state = drw_call_state_create(shgroup, NULL, NULL); + GPUVertBuf *buf = DRW_temp_buffer_request(DST.idatalist, format, &call->inst_count); + call->batch = DRW_temp_batch_instance_request(DST.idatalist, buf, geom); + call->vert_first = 0; + call->vert_count = 0; /* Auto from batch. */ + call->inst_count = 0; + #ifdef USE_GPU_SELECT if (G.f & G_FLAG_PICKSEL) { - if (shgroup->instance_count == shgroup->inst_selectid->vertex_len) { - GPU_vertbuf_data_resize(shgroup->inst_selectid, shgroup->instance_count + 32); + /* Not actually used for rendering but alloced in one chunk. */ + if (inst_select_format.attr_len == 0) { + GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT); } - GPU_vertbuf_attr_set(shgroup->inst_selectid, 0, shgroup->instance_count, &DST.select_id); + call->inst_selectid = DRW_temp_buffer_request( + DST.idatalist, &inst_select_format, &call->inst_count); } #endif + return (DRWCallBuffer *)call; +} - BLI_assert(attr_len == shgroup->attrs_count); +void DRW_buffer_add_entry_array(DRWCallBuffer *callbuf, const void *attr[], uint attr_len) +{ + DRWCall *call = (DRWCall *)callbuf; + const bool is_instance = call->batch->inst != NULL; + GPUVertBuf *buf = is_instance ? call->batch->inst : call->batch->verts[0]; + uint count = is_instance ? call->inst_count++ : call->vert_count++; + const bool resize = (count == buf->vertex_alloc); + + BLI_assert(attr_len == buf->format.attr_len); UNUSED_VARS_NDEBUG(attr_len); + if (UNLIKELY(resize)) { + GPU_vertbuf_data_resize(buf, count + DRW_BUFFER_VERTS_CHUNK); + } + for (int i = 0; i < attr_len; ++i) { - if (shgroup->instance_count == shgroup->instance_vbo->vertex_len) { - GPU_vertbuf_data_resize(shgroup->instance_vbo, shgroup->instance_count + 32); - } - GPU_vertbuf_attr_set(shgroup->instance_vbo, i, shgroup->instance_count, attr[i]); + GPU_vertbuf_attr_set(buf, i, count, attr[i]); } - shgroup->instance_count += 1; +#ifdef USE_GPU_SELECT + if (G.f & G_FLAG_PICKSEL) { + if (UNLIKELY(resize)) { + GPU_vertbuf_data_resize(call->inst_selectid, count + DRW_BUFFER_VERTS_CHUNK); + } + GPU_vertbuf_attr_set(call->inst_selectid, 0, count, &DST.select_id); + } +#endif } /** \} */ @@ -775,17 +862,7 @@ void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup, static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader) { - shgroup->instance_geom = NULL; - shgroup->instance_vbo = NULL; - shgroup->instance_count = 0; shgroup->uniforms = NULL; -#ifdef USE_GPU_SELECT - shgroup->inst_selectid = NULL; - shgroup->override_selectid = -1; -#endif -#ifndef NDEBUG - shgroup->attrs_count = 0; -#endif int view_ubo_location = GPU_shader_get_uniform_block(shader, "viewBlock"); @@ -817,6 +894,7 @@ static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader) /* Not supported. */ BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODELVIEW_INV) == -1); BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODELVIEW) == -1); + BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_NORMAL) == -1); shgroup->model = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL); shgroup->modelinverse = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL_INV); @@ -825,9 +903,6 @@ static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader) shgroup->objectinfo = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_OBJECT_INFO); shgroup->callid = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_CALLID); - /* We do not support normal matrix anymore. */ - BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_NORMAL) == -1); - shgroup->matflag = 0; if (shgroup->modelinverse > -1) { shgroup->matflag |= DRW_CALL_MODELINVERSE; @@ -843,113 +918,19 @@ static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader) } } -static void drw_shgroup_instance_init(DRWShadingGroup *shgroup, - GPUShader *shader, - GPUBatch *batch, - GPUVertFormat *format) -{ - BLI_assert(shgroup->type == DRW_SHG_INSTANCE); - BLI_assert(batch != NULL); - BLI_assert(format != NULL); - - drw_shgroup_init(shgroup, shader); - - shgroup->instance_geom = batch; -#ifndef NDEBUG - shgroup->attrs_count = format->attr_len; -#endif - - DRW_instancing_buffer_request( - DST.idatalist, format, batch, shgroup, &shgroup->instance_geom, &shgroup->instance_vbo); - -#ifdef USE_GPU_SELECT - if (G.f & G_FLAG_PICKSEL) { - /* Not actually used for rendering but alloced in one chunk. - * Plus we don't have to care about ownership. */ - static GPUVertFormat inst_select_format = {0}; - if (inst_select_format.attr_len == 0) { - GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT); - } - GPUBatch *batch_dummy; /* Not used */ - DRW_batching_buffer_request(DST.idatalist, - &inst_select_format, - GPU_PRIM_POINTS, - shgroup, - &batch_dummy, - &shgroup->inst_selectid); - } -#endif -} - -static void drw_shgroup_batching_init(DRWShadingGroup *shgroup, - GPUShader *shader, - GPUVertFormat *format) -{ - drw_shgroup_init(shgroup, shader); - -#ifndef NDEBUG - shgroup->attrs_count = (format != NULL) ? format->attr_len : 0; -#endif - BLI_assert(format != NULL); - - GPUPrimType type; - switch (shgroup->type) { - case DRW_SHG_POINT_BATCH: - type = GPU_PRIM_POINTS; - break; - case DRW_SHG_LINE_BATCH: - type = GPU_PRIM_LINES; - break; - case DRW_SHG_TRIANGLE_BATCH: - type = GPU_PRIM_TRIS; - break; - default: - type = GPU_PRIM_NONE; - BLI_assert(0); - break; - } - - DRW_batching_buffer_request( - DST.idatalist, format, type, shgroup, &shgroup->batch_geom, &shgroup->batch_vbo); - -#ifdef USE_GPU_SELECT - if (G.f & G_FLAG_PICKSEL) { - /* Not actually used for rendering but alloced in one chunk. */ - static GPUVertFormat inst_select_format = {0}; - if (inst_select_format.attr_len == 0) { - GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT); - } - GPUBatch *batch; /* Not used */ - DRW_batching_buffer_request(DST.idatalist, - &inst_select_format, - GPU_PRIM_POINTS, - shgroup, - &batch, - &shgroup->inst_selectid); - } -#endif -} - static DRWShadingGroup *drw_shgroup_create_ex(struct GPUShader *shader, DRWPass *pass) { DRWShadingGroup *shgroup = BLI_memblock_alloc(DST.vmempool->shgroups); BLI_LINKS_APPEND(&pass->shgroups, shgroup); - shgroup->type = DRW_SHG_NORMAL; shgroup->shader = shader; shgroup->state_extra = 0; shgroup->state_extra_disable = ~0x0; shgroup->stencil_mask = 0; shgroup->calls.first = NULL; shgroup->calls.last = NULL; -#if 0 /* All the same in the union! */ - shgroup->batch_geom = NULL; - shgroup->batch_vbo = NULL; - - shgroup->instance_geom = NULL; - shgroup->instance_vbo = NULL; -#endif + shgroup->tfeedback_target = NULL; shgroup->pass_parent = pass; return shgroup; @@ -1034,7 +1015,6 @@ DRWShadingGroup *DRW_shgroup_material_create(struct GPUMaterial *material, DRWPa drw_shgroup_init(shgroup, GPU_pass_shader_get(gpupass)); drw_shgroup_material_inputs(shgroup, material); } - return shgroup; } @@ -1045,96 +1025,18 @@ DRWShadingGroup *DRW_shgroup_create(struct GPUShader *shader, DRWPass *pass) return shgroup; } -DRWShadingGroup *DRW_shgroup_instance_create(struct GPUShader *shader, - DRWPass *pass, - GPUBatch *geom, - GPUVertFormat *format) -{ - DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass); - shgroup->type = DRW_SHG_INSTANCE; - shgroup->instance_geom = geom; - drw_call_calc_orco(NULL, shgroup->instance_orcofac); - drw_shgroup_instance_init(shgroup, shader, geom, format); - - return shgroup; -} - -DRWShadingGroup *DRW_shgroup_point_batch_create(struct GPUShader *shader, DRWPass *pass) -{ - DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTR_FLOAT, 3}}); - - DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass); - shgroup->type = DRW_SHG_POINT_BATCH; - - drw_shgroup_batching_init(shgroup, shader, g_pos_format); - - return shgroup; -} - -DRWShadingGroup *DRW_shgroup_line_batch_create_with_format(struct GPUShader *shader, - DRWPass *pass, - GPUVertFormat *format) -{ - DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass); - shgroup->type = DRW_SHG_LINE_BATCH; - - drw_shgroup_batching_init(shgroup, shader, format); - - return shgroup; -} - -DRWShadingGroup *DRW_shgroup_line_batch_create(struct GPUShader *shader, DRWPass *pass) -{ - DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTR_FLOAT, 3}}); - - return DRW_shgroup_line_batch_create_with_format(shader, pass, g_pos_format); -} - DRWShadingGroup *DRW_shgroup_transform_feedback_create(struct GPUShader *shader, DRWPass *pass, GPUVertBuf *tf_target) { BLI_assert(tf_target != NULL); DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass); - shgroup->type = DRW_SHG_FEEDBACK_TRANSFORM; - drw_shgroup_init(shgroup, shader); - shgroup->tfeedback_target = tf_target; - return shgroup; } /** - * Specify an external batch instead of adding each attribute one by one. - */ -void DRW_shgroup_instance_batch(DRWShadingGroup *shgroup, struct GPUBatch *batch) -{ - BLI_assert(shgroup->type == DRW_SHG_INSTANCE); - BLI_assert(shgroup->instance_count == 0); - /* You cannot use external instancing batch without a dummy format. */ - BLI_assert(shgroup->attrs_count != 0); - - shgroup->type = DRW_SHG_INSTANCE_EXTERNAL; - drw_call_calc_orco(NULL, shgroup->instance_orcofac); - /* PERF : This destroys the vaos cache so better check if it's necessary. */ - /* Note: This WILL break if batch->verts[0] is destroyed and reallocated - * at the same address. Bindings/VAOs would remain obsolete. */ - // if (shgroup->instancing_geom->inst != batch->verts[0]) - /* XXX FIXME: THIS IS BROKEN BECAUSE OVEWRITTEN BY DRW_instance_buffer_finish(). */ - GPU_batch_instbuf_set(shgroup->instance_geom, batch->verts[0], false); - -#ifdef USE_GPU_SELECT - shgroup->override_selectid = DST.select_id; -#endif -} - -uint DRW_shgroup_get_instance_count(const DRWShadingGroup *shgroup) -{ - return shgroup->instance_count; -} - -/** * State is added to #Pass.state while drawing. * Use to temporarily enable draw options. */ @@ -1156,26 +1058,12 @@ void DRW_shgroup_stencil_mask(DRWShadingGroup *shgroup, uint mask) bool DRW_shgroup_is_empty(DRWShadingGroup *shgroup) { - switch (shgroup->type) { - case DRW_SHG_NORMAL: - case DRW_SHG_FEEDBACK_TRANSFORM: - return shgroup->calls.first == NULL; - case DRW_SHG_POINT_BATCH: - case DRW_SHG_LINE_BATCH: - case DRW_SHG_TRIANGLE_BATCH: - case DRW_SHG_INSTANCE: - case DRW_SHG_INSTANCE_EXTERNAL: - return shgroup->instance_count == 0; - } - BLI_assert(!"Shading Group type not supported"); - return true; + return shgroup->calls.first == NULL; } DRWShadingGroup *DRW_shgroup_create_sub(DRWShadingGroup *shgroup) { /* Remove this assertion if needed but implement the other cases first! */ - BLI_assert(shgroup->type == DRW_SHG_NORMAL); - DRWShadingGroup *shgroup_new = BLI_memblock_alloc(DST.vmempool->shgroups); *shgroup_new = *shgroup; |