diff options
author | Clément Foucault <foucault.clem@gmail.com> | 2019-05-30 00:52:37 +0300 |
---|---|---|
committer | Clément Foucault <foucault.clem@gmail.com> | 2019-05-30 14:43:33 +0300 |
commit | 596492e639f07b6a0a3f705a0f944dc57f94b7e4 (patch) | |
tree | 55f3dfa9ee3ad03c90954bec26f7c85157dc2d1b /source/blender/draw/intern/draw_manager_data.c | |
parent | 32a2bfddcb4b0dedbea987ae3e335dc6aecdaed8 (diff) |
DRW: Refactor to use object pointer for drawcall by default
This cleans up a bit of duplicated code and some confusion about
what was culled and what wasn't.
Now everything is culled based on the given object pointer.
If the object pointer is NULL there is no culling performed.
Diffstat (limited to 'source/blender/draw/intern/draw_manager_data.c')
-rw-r--r-- | source/blender/draw/intern/draw_manager_data.c | 153 |
1 files changed, 52 insertions, 101 deletions
diff --git a/source/blender/draw/intern/draw_manager_data.c b/source/blender/draw/intern/draw_manager_data.c index 24f2670dd2d..5aec14a3100 100644 --- a/source/blender/draw/intern/draw_manager_data.c +++ b/source/blender/draw/intern/draw_manager_data.c @@ -433,65 +433,70 @@ static DRWCallState *drw_call_state_create(DRWShadingGroup *shgroup, float (*obm static DRWCallState *drw_call_state_object(DRWShadingGroup *shgroup, float (*obmat)[4], Object *ob) { - if (DST.ob_state == NULL) { - DST.ob_state = drw_call_state_create(shgroup, obmat, ob); + if (ob == NULL) { + if (obmat == NULL) { + /* TODO return unitmat state. */ + return drw_call_state_create(shgroup, obmat, ob); + } + else { + return drw_call_state_create(shgroup, obmat, ob); + } } else { - /* If the DRWCallState is reused, add necessary matrices. */ - drw_call_state_update_matflag(DST.ob_state, shgroup, ob); - } - - return DST.ob_state; -} - -void DRW_shgroup_call(DRWShadingGroup *shgroup, GPUBatch *geom, float (*obmat)[4]) -{ - BLI_assert(geom != NULL); - - DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls); - BLI_LINKS_APPEND(&shgroup->calls, call); + if (DST.ob_state == NULL) { + DST.ob_state = drw_call_state_create(shgroup, obmat, ob); + } + else { + /* If the DRWCallState is reused, add necessary matrices. */ + drw_call_state_update_matflag(DST.ob_state, shgroup, ob); + } - call->state = drw_call_state_create(shgroup, obmat, NULL); - call->batch = geom; - call->vert_first = 0; - call->vert_count = 0; /* Auto from batch. */ - call->inst_count = 0; -#ifdef USE_GPU_SELECT - call->select_id = DST.select_id; - call->inst_selectid = NULL; -#endif + return DST.ob_state; + } } -void DRW_shgroup_call_range( - DRWShadingGroup *shgroup, GPUBatch *geom, float (*obmat)[4], uint v_sta, uint v_count) +void DRW_shgroup_call_ex(DRWShadingGroup *shgroup, + Object *ob, + float (*obmat)[4], + struct GPUBatch *geom, + uint v_sta, + uint v_ct, + bool bypass_culling, + void *user_data) { BLI_assert(geom != NULL); - BLI_assert(v_count); DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls); BLI_LINKS_APPEND(&shgroup->calls, call); - call->state = drw_call_state_create(shgroup, obmat, NULL); + call->state = drw_call_state_object(shgroup, ob ? ob->obmat : obmat, ob); call->batch = geom; call->vert_first = v_sta; - call->vert_count = v_count; + call->vert_count = v_ct; /* 0 means auto from batch. */ call->inst_count = 0; #ifdef USE_GPU_SELECT call->select_id = DST.select_id; call->inst_selectid = NULL; #endif + if (call->state->culling) { + call->state->culling->user_data = user_data; + if (bypass_culling) { + /* NOTE this will disable culling for the whole object. */ + call->state->culling->bsphere.radius = -1.0f; + } + } } static void drw_shgroup_call_procedural_add_ex(DRWShadingGroup *shgroup, GPUBatch *geom, - uint vert_count, - float (*obmat)[4]) + Object *ob, + uint vert_count) { DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls); BLI_LINKS_APPEND(&shgroup->calls, call); - call->state = drw_call_state_object(shgroup, obmat, NULL); + call->state = drw_call_state_object(shgroup, ob ? ob->obmat : NULL, ob); call->batch = geom; call->vert_first = 0; call->vert_count = vert_count; @@ -502,81 +507,27 @@ static void drw_shgroup_call_procedural_add_ex(DRWShadingGroup *shgroup, #endif } -void DRW_shgroup_call_procedural_points(DRWShadingGroup *shgroup, - uint point_len, - float (*obmat)[4]) +void DRW_shgroup_call_procedural_points(DRWShadingGroup *shgroup, Object *ob, uint point_len) { struct GPUBatch *geom = drw_cache_procedural_points_get(); - drw_shgroup_call_procedural_add_ex(shgroup, geom, point_len, obmat); + drw_shgroup_call_procedural_add_ex(shgroup, geom, ob, point_len); } -void DRW_shgroup_call_procedural_lines(DRWShadingGroup *shgroup, - uint line_count, - float (*obmat)[4]) +void DRW_shgroup_call_procedural_lines(DRWShadingGroup *shgroup, Object *ob, uint line_count) { struct GPUBatch *geom = drw_cache_procedural_lines_get(); - drw_shgroup_call_procedural_add_ex(shgroup, geom, line_count * 2, obmat); + drw_shgroup_call_procedural_add_ex(shgroup, geom, ob, line_count * 2); } -void DRW_shgroup_call_procedural_triangles(DRWShadingGroup *shgroup, - uint tria_count, - float (*obmat)[4]) +void DRW_shgroup_call_procedural_triangles(DRWShadingGroup *shgroup, Object *ob, uint tria_count) { struct GPUBatch *geom = drw_cache_procedural_triangles_get(); - drw_shgroup_call_procedural_add_ex(shgroup, geom, tria_count * 3, obmat); -} - -/* These calls can be culled and are optimized for redraw */ -void DRW_shgroup_call_object_ex(DRWShadingGroup *shgroup, - GPUBatch *geom, - Object *ob, - bool bypass_culling) -{ - BLI_assert(geom != NULL); - - DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls); - BLI_LINKS_APPEND(&shgroup->calls, call); - - call->state = drw_call_state_object(shgroup, ob->obmat, ob); - call->batch = geom; - call->vert_first = 0; - call->vert_count = 0; /* Auto from batch. */ - call->inst_count = 0; -#ifdef USE_GPU_SELECT - call->select_id = DST.select_id; - call->inst_selectid = NULL; -#endif - if (bypass_culling) { - /* NOTE this will disable culling for the whole object. */ - call->state->culling->bsphere.radius = -1.0f; - } -} - -void DRW_shgroup_call_object_with_callback(DRWShadingGroup *shgroup, - GPUBatch *geom, - Object *ob, - void *user_data) -{ - BLI_assert(geom != NULL); - - DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls); - BLI_LINKS_APPEND(&shgroup->calls, call); - - call->state = drw_call_state_object(shgroup, ob->obmat, ob); - call->state->culling->user_data = user_data; - call->batch = geom; - call->vert_first = 0; - call->vert_count = 0; /* Auto from batch. */ - call->inst_count = 0; -#ifdef USE_GPU_SELECT - call->select_id = DST.select_id; - call->inst_selectid = NULL; -#endif + drw_shgroup_call_procedural_add_ex(shgroup, geom, ob, tria_count * 3); } void DRW_shgroup_call_instances(DRWShadingGroup *shgroup, - GPUBatch *geom, - float (*obmat)[4], + Object *ob, + struct GPUBatch *geom, uint count) { BLI_assert(geom != NULL); @@ -584,7 +535,7 @@ void DRW_shgroup_call_instances(DRWShadingGroup *shgroup, DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls); BLI_LINKS_APPEND(&shgroup->calls, call); - call->state = drw_call_state_create(shgroup, obmat, NULL); + call->state = drw_call_state_object(shgroup, ob ? ob->obmat : NULL, ob); call->batch = geom; call->vert_first = 0; call->vert_count = 0; /* Auto from batch. */ @@ -596,8 +547,8 @@ void DRW_shgroup_call_instances(DRWShadingGroup *shgroup, } void DRW_shgroup_call_instances_with_attribs(DRWShadingGroup *shgroup, + Object *ob, struct GPUBatch *geom, - float (*obmat)[4], struct GPUBatch *inst_attributes) { BLI_assert(geom != NULL); @@ -608,7 +559,7 @@ void DRW_shgroup_call_instances_with_attribs(DRWShadingGroup *shgroup, DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls); BLI_LINKS_APPEND(&shgroup->calls, call); - call->state = drw_call_state_create(shgroup, obmat, NULL); + call->state = drw_call_state_object(shgroup, ob ? ob->obmat : NULL, ob); call->batch = DRW_temp_batch_instance_request(DST.idatalist, buf_inst, geom); call->vert_first = 0; call->vert_count = 0; /* Auto from batch. */ @@ -669,9 +620,9 @@ static void sculpt_draw_cb(DRWSculptCallbackData *scd, GPU_PBVH_Buffers *buffers shgrp = DRW_shgroup_create_sub(shgrp); DRW_shgroup_uniform_vec3(shgrp, "materialDiffuseColor", SCULPT_DEBUG_COLOR(scd->node_nr++), 1); #endif - /* DRW_shgroup_call_object_ex reuses matrices calculations for all the drawcalls of this + /* DRW_shgroup_call_no_cull reuses matrices calculations for all the drawcalls of this * object. */ - DRW_shgroup_call_object_ex(shgrp, geom, scd->ob, true); + DRW_shgroup_call_no_cull(shgrp, geom, scd->ob); } } @@ -769,7 +720,7 @@ DRWCallBuffer *DRW_shgroup_call_buffer(DRWShadingGroup *shgroup, DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls); BLI_LINKS_APPEND(&shgroup->calls, call); - call->state = drw_call_state_create(shgroup, NULL, NULL); + call->state = drw_call_state_object(shgroup, NULL, NULL); GPUVertBuf *buf = DRW_temp_buffer_request(DST.idatalist, format, &call->vert_count); call->batch = DRW_temp_batch_request(DST.idatalist, buf, prim_type); call->vert_first = 0; @@ -799,7 +750,7 @@ DRWCallBuffer *DRW_shgroup_call_buffer_instance(DRWShadingGroup *shgroup, DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls); BLI_LINKS_APPEND(&shgroup->calls, call); - call->state = drw_call_state_create(shgroup, NULL, NULL); + call->state = drw_call_state_object(shgroup, NULL, NULL); GPUVertBuf *buf = DRW_temp_buffer_request(DST.idatalist, format, &call->inst_count); call->batch = DRW_temp_batch_instance_request(DST.idatalist, buf, geom); call->vert_first = 0; |