Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/draw/intern/draw_cache_impl_mesh.c')
-rw-r--r--source/blender/draw/intern/draw_cache_impl_mesh.c181
1 files changed, 84 insertions, 97 deletions
diff --git a/source/blender/draw/intern/draw_cache_impl_mesh.c b/source/blender/draw/intern/draw_cache_impl_mesh.c
index e69fb795948..d6faeb16583 100644
--- a/source/blender/draw/intern/draw_cache_impl_mesh.c
+++ b/source/blender/draw/intern/draw_cache_impl_mesh.c
@@ -95,6 +95,11 @@ BLI_INLINE void mesh_cd_layers_type_clear(DRW_MeshCDMask *a)
*((uint64_t *)a) = 0;
}
+BLI_INLINE const Mesh *editmesh_final_or_this(const Mesh *me)
+{
+ return (me->edit_mesh && me->edit_mesh->mesh_eval_final) ? me->edit_mesh->mesh_eval_final : me;
+}
+
static void mesh_cd_calc_edit_uv_layer(const Mesh *UNUSED(me), DRW_MeshCDMask *cd_used)
{
cd_used->edit_uv = 1;
@@ -132,7 +137,7 @@ BLI_INLINE const CustomData *mesh_cd_vdata_get_from_mesh(const Mesh *me)
static void mesh_cd_calc_active_uv_layer(const Mesh *me, DRW_MeshCDMask *cd_used)
{
- const Mesh *me_final = (me->edit_mesh) ? me->edit_mesh->mesh_eval_final : me;
+ const Mesh *me_final = editmesh_final_or_this(me);
const CustomData *cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
int layer = CustomData_get_active_layer(cd_ldata, CD_MLOOPUV);
if (layer != -1) {
@@ -142,7 +147,7 @@ static void mesh_cd_calc_active_uv_layer(const Mesh *me, DRW_MeshCDMask *cd_used
static void mesh_cd_calc_active_mask_uv_layer(const Mesh *me, DRW_MeshCDMask *cd_used)
{
- const Mesh *me_final = (me->edit_mesh) ? me->edit_mesh->mesh_eval_final : me;
+ const Mesh *me_final = editmesh_final_or_this(me);
const CustomData *cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
int layer = CustomData_get_stencil_layer(cd_ldata, CD_MLOOPUV);
if (layer != -1) {
@@ -152,7 +157,7 @@ static void mesh_cd_calc_active_mask_uv_layer(const Mesh *me, DRW_MeshCDMask *cd
static void mesh_cd_calc_active_vcol_layer(const Mesh *me, DRW_MeshCDMask *cd_used)
{
- const Mesh *me_final = (me->edit_mesh) ? me->edit_mesh->mesh_eval_final : me;
+ const Mesh *me_final = editmesh_final_or_this(me);
const CustomData *cd_vdata = mesh_cd_vdata_get_from_mesh(me_final);
int layer = CustomData_get_active_layer(cd_vdata, CD_PROP_COLOR);
@@ -163,7 +168,7 @@ static void mesh_cd_calc_active_vcol_layer(const Mesh *me, DRW_MeshCDMask *cd_us
static void mesh_cd_calc_active_mloopcol_layer(const Mesh *me, DRW_MeshCDMask *cd_used)
{
- const Mesh *me_final = (me->edit_mesh) ? me->edit_mesh->mesh_eval_final : me;
+ const Mesh *me_final = editmesh_final_or_this(me);
const CustomData *cd_ldata = &me_final->ldata;
int layer = CustomData_get_active_layer(cd_ldata, CD_MLOOPCOL);
@@ -176,7 +181,7 @@ static DRW_MeshCDMask mesh_cd_calc_used_gpu_layers(const Mesh *me,
struct GPUMaterial **gpumat_array,
int gpumat_array_len)
{
- const Mesh *me_final = (me->edit_mesh) ? me->edit_mesh->mesh_eval_final : me;
+ const Mesh *me_final = editmesh_final_or_this(me);
const CustomData *cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
const CustomData *cd_vdata = mesh_cd_vdata_get_from_mesh(me_final);
@@ -527,14 +532,26 @@ static void mesh_batch_cache_check_vertex_group(MeshBatchCache *cache,
}
}
-static void mesh_batch_cache_discard_shaded_batches(MeshBatchCache *cache)
+static void mesh_batch_cache_request_surface_batches(MeshBatchCache *cache)
{
+ mesh_batch_cache_add_request(cache, MBC_SURFACE);
+ DRW_batch_request(&cache->batch.surface);
+ if (cache->surface_per_mat) {
+ for (int i = 0; i < cache->mat_len; i++) {
+ DRW_batch_request(&cache->surface_per_mat[i]);
+ }
+ }
+}
+
+static void mesh_batch_cache_discard_surface_batches(MeshBatchCache *cache)
+{
+ GPU_BATCH_DISCARD_SAFE(cache->batch.surface);
if (cache->surface_per_mat) {
for (int i = 0; i < cache->mat_len; i++) {
GPU_BATCH_DISCARD_SAFE(cache->surface_per_mat[i]);
}
}
- cache->batch_ready &= ~MBC_SURF_PER_MAT;
+ cache->batch_ready &= ~MBC_SURFACE;
}
static void mesh_batch_cache_discard_shaded_tri(MeshBatchCache *cache)
@@ -546,7 +563,7 @@ static void mesh_batch_cache_discard_shaded_tri(MeshBatchCache *cache)
GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.vcol);
GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.orco);
}
- mesh_batch_cache_discard_shaded_batches(cache);
+ mesh_batch_cache_discard_surface_batches(cache);
mesh_cd_layers_type_clear(&cache->cd_used);
MEM_SAFE_FREE(cache->surface_per_mat);
@@ -586,10 +603,7 @@ static void mesh_batch_cache_discard_uvedit(MeshBatchCache *cache)
cache->cd_used.edit_uv = 0;
/* Discard other batches that uses vbo.uv */
- mesh_batch_cache_discard_shaded_batches(cache);
-
- GPU_BATCH_DISCARD_SAFE(cache->batch.surface);
- cache->batch_ready &= ~MBC_SURFACE;
+ mesh_batch_cache_discard_surface_batches(cache);
}
static void mesh_batch_cache_discard_uvedit_select(MeshBatchCache *cache)
@@ -651,12 +665,8 @@ void DRW_mesh_batch_cache_dirty_tag(Mesh *me, int mode)
GPU_BATCH_DISCARD_SAFE(cache->batch.surface);
GPU_BATCH_DISCARD_SAFE(cache->batch.wire_loops);
GPU_BATCH_DISCARD_SAFE(cache->batch.wire_edges);
- if (cache->surface_per_mat) {
- for (int i = 0; i < cache->mat_len; i++) {
- GPU_BATCH_DISCARD_SAFE(cache->surface_per_mat[i]);
- }
- }
- cache->batch_ready &= ~(MBC_SURFACE | MBC_WIRE_EDGES | MBC_WIRE_LOOPS | MBC_SURF_PER_MAT);
+ mesh_batch_cache_discard_surface_batches(cache);
+ cache->batch_ready &= ~(MBC_SURFACE | MBC_WIRE_EDGES | MBC_WIRE_LOOPS);
break;
case BKE_MESH_BATCH_DIRTY_ALL:
cache->is_dirty = true;
@@ -782,8 +792,8 @@ GPUBatch *DRW_mesh_batch_cache_get_all_edges(Mesh *me)
GPUBatch *DRW_mesh_batch_cache_get_surface(Mesh *me)
{
MeshBatchCache *cache = mesh_batch_cache_get(me);
- mesh_batch_cache_add_request(cache, MBC_SURFACE);
- return DRW_batch_request(&cache->batch.surface);
+ mesh_batch_cache_request_surface_batches(cache);
+ return cache->batch.surface;
}
GPUBatch *DRW_mesh_batch_cache_get_loose_edges(Mesh *me)
@@ -793,9 +803,8 @@ GPUBatch *DRW_mesh_batch_cache_get_loose_edges(Mesh *me)
if (cache->no_loose_wire) {
return NULL;
}
- else {
- return DRW_batch_request(&cache->batch.loose_edges);
- }
+
+ return DRW_batch_request(&cache->batch.loose_edges);
}
GPUBatch *DRW_mesh_batch_cache_get_surface_weights(Mesh *me)
@@ -841,23 +850,15 @@ GPUBatch **DRW_mesh_batch_cache_get_surface_shaded(Mesh *me,
BLI_assert(gpumat_array_len == cache->mat_len);
mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
-
- mesh_batch_cache_add_request(cache, MBC_SURF_PER_MAT);
-
- for (int i = 0; i < cache->mat_len; i++) {
- DRW_batch_request(&cache->surface_per_mat[i]);
- }
+ mesh_batch_cache_request_surface_batches(cache);
return cache->surface_per_mat;
}
GPUBatch **DRW_mesh_batch_cache_get_surface_texpaint(Mesh *me)
{
MeshBatchCache *cache = mesh_batch_cache_get(me);
- mesh_batch_cache_add_request(cache, MBC_SURF_PER_MAT);
texpaint_request_active_uv(cache, me);
- for (int i = 0; i < cache->mat_len; i++) {
- DRW_batch_request(&cache->surface_per_mat[i]);
- }
+ mesh_batch_cache_request_surface_batches(cache);
return cache->surface_per_mat;
}
@@ -865,24 +866,24 @@ GPUBatch *DRW_mesh_batch_cache_get_surface_texpaint_single(Mesh *me)
{
MeshBatchCache *cache = mesh_batch_cache_get(me);
texpaint_request_active_uv(cache, me);
- mesh_batch_cache_add_request(cache, MBC_SURFACE);
- return DRW_batch_request(&cache->batch.surface);
+ mesh_batch_cache_request_surface_batches(cache);
+ return cache->batch.surface;
}
GPUBatch *DRW_mesh_batch_cache_get_surface_vertpaint(Mesh *me)
{
MeshBatchCache *cache = mesh_batch_cache_get(me);
texpaint_request_active_vcol(cache, me);
- mesh_batch_cache_add_request(cache, MBC_SURFACE);
- return DRW_batch_request(&cache->batch.surface);
+ mesh_batch_cache_request_surface_batches(cache);
+ return cache->batch.surface;
}
GPUBatch *DRW_mesh_batch_cache_get_surface_sculpt(Mesh *me)
{
MeshBatchCache *cache = mesh_batch_cache_get(me);
sculpt_request_active_vcol(cache, me);
- mesh_batch_cache_add_request(cache, MBC_SURFACE);
- return DRW_batch_request(&cache->batch.surface);
+ mesh_batch_cache_request_surface_batches(cache);
+ return cache->batch.surface;
}
int DRW_mesh_material_count_get(Mesh *me)
@@ -900,8 +901,7 @@ GPUVertBuf *DRW_mesh_batch_cache_pos_vertbuf_get(Mesh *me)
{
MeshBatchCache *cache = mesh_batch_cache_get(me);
/* Request surface to trigger the vbo filling. Otherwise it may do nothing. */
- mesh_batch_cache_add_request(cache, MBC_SURFACE);
- DRW_batch_request(&cache->batch.surface);
+ mesh_batch_cache_request_surface_batches(cache);
DRW_vbo_request(NULL, &cache->final.vbo.pos_nor);
return cache->final.vbo.pos_nor;
@@ -1122,6 +1122,40 @@ void DRW_mesh_batch_cache_free_old(Mesh *me, int ctime)
mesh_cd_layers_type_clear(&cache->cd_used_over_time);
}
+#ifdef DEBUG
+/* Sanity check function to test if all requested batches are available. */
+static void drw_mesh_batch_cache_check_available(struct TaskGraph *task_graph, Mesh *me)
+{
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ /* Make sure all requested batches have been setup. */
+ /* Note: The next line creates a different scheduling than during release builds what can lead to
+ * some issues (See T77867 where we needed to disable this function in order to debug what was
+ * happening in release builds). */
+ BLI_task_graph_work_and_wait(task_graph);
+ for (int i = 0; i < sizeof(cache->batch) / sizeof(void *); i++) {
+ BLI_assert(!DRW_batch_requested(((GPUBatch **)&cache->batch)[i], 0));
+ }
+ for (int i = 0; i < sizeof(cache->final.vbo) / sizeof(void *); i++) {
+ BLI_assert(!DRW_vbo_requested(((GPUVertBuf **)&cache->final.vbo)[i]));
+ }
+ for (int i = 0; i < sizeof(cache->final.ibo) / sizeof(void *); i++) {
+ BLI_assert(!DRW_ibo_requested(((GPUIndexBuf **)&cache->final.ibo)[i]));
+ }
+ for (int i = 0; i < sizeof(cache->cage.vbo) / sizeof(void *); i++) {
+ BLI_assert(!DRW_vbo_requested(((GPUVertBuf **)&cache->cage.vbo)[i]));
+ }
+ for (int i = 0; i < sizeof(cache->cage.ibo) / sizeof(void *); i++) {
+ BLI_assert(!DRW_ibo_requested(((GPUIndexBuf **)&cache->cage.ibo)[i]));
+ }
+ for (int i = 0; i < sizeof(cache->uv_cage.vbo) / sizeof(void *); i++) {
+ BLI_assert(!DRW_vbo_requested(((GPUVertBuf **)&cache->uv_cage.vbo)[i]));
+ }
+ for (int i = 0; i < sizeof(cache->uv_cage.ibo) / sizeof(void *); i++) {
+ BLI_assert(!DRW_ibo_requested(((GPUIndexBuf **)&cache->uv_cage.ibo)[i]));
+ }
+}
+#endif
+
/* Can be called for any surface type. Mesh *me is the final mesh. */
void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
Object *ob,
@@ -1142,10 +1176,9 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
/* Early out */
if (cache->batch_requested == 0) {
#ifdef DEBUG
- goto check;
-#else
- return;
+ drw_mesh_batch_cache_check_available(task_graph, me);
#endif
+ return;
}
/* Sanity check. */
@@ -1170,30 +1203,8 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
}
}
- /* HACK: if MBC_SURF_PER_MAT is requested and ibo.tris is already available, it won't have it's
- * index ranges initialized. So discard ibo.tris in order to recreate it.
- * This needs to happen before saved_elem_ranges is populated. */
- if ((batch_requested & MBC_SURF_PER_MAT) != 0 && (cache->batch_ready & MBC_SURF_PER_MAT) == 0) {
- FOREACH_MESH_BUFFER_CACHE (cache, mbuffercache) {
- GPU_INDEXBUF_DISCARD_SAFE(mbuffercache->ibo.tris);
- }
- /* Clear all batches that reference ibo.tris. */
- GPU_BATCH_CLEAR_SAFE(cache->batch.surface);
- GPU_BATCH_CLEAR_SAFE(cache->batch.surface_weights);
- GPU_BATCH_CLEAR_SAFE(cache->batch.edit_mesh_analysis);
- GPU_BATCH_CLEAR_SAFE(cache->batch.edit_triangles);
- GPU_BATCH_CLEAR_SAFE(cache->batch.edit_lnor);
- GPU_BATCH_CLEAR_SAFE(cache->batch.edit_selection_faces);
- for (int i = 0; i < cache->mat_len; i++) {
- GPU_BATCH_CLEAR_SAFE(cache->surface_per_mat[i]);
- }
-
- cache->batch_ready &= ~(MBC_SURFACE | MBC_SURFACE_WEIGHTS | MBC_EDIT_MESH_ANALYSIS |
- MBC_EDIT_TRIANGLES | MBC_EDIT_LNOR | MBC_EDIT_SELECTION_FACES);
- }
-
if (batch_requested &
- (MBC_SURFACE | MBC_SURF_PER_MAT | MBC_WIRE_LOOPS_UVS | MBC_EDITUV_FACES_STRETCH_AREA |
+ (MBC_SURFACE | MBC_WIRE_LOOPS_UVS | MBC_EDITUV_FACES_STRETCH_AREA |
MBC_EDITUV_FACES_STRETCH_ANGLE | MBC_EDITUV_FACES | MBC_EDITUV_EDGES | MBC_EDITUV_VERTS)) {
/* Modifiers will only generate an orco layer if the mesh is deformed. */
if (cache->cd_needed.orco != 0) {
@@ -1246,7 +1257,7 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
GPU_BATCH_CLEAR_SAFE(cache->surface_per_mat[i]);
}
GPU_BATCH_CLEAR_SAFE(cache->batch.surface);
- cache->batch_ready &= ~(MBC_SURFACE | MBC_SURF_PER_MAT);
+ cache->batch_ready &= ~(MBC_SURFACE);
mesh_cd_layers_type_merge(&cache->cd_used, cache->cd_needed);
}
@@ -1262,9 +1273,11 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
FOREACH_MESH_BUFFER_CACHE (cache, mbuffercache) {
GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.edituv_data);
GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.fdots_uv);
+ GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.fdots_edituv_data);
GPU_INDEXBUF_DISCARD_SAFE(mbuffercache->ibo.edituv_tris);
GPU_INDEXBUF_DISCARD_SAFE(mbuffercache->ibo.edituv_lines);
GPU_INDEXBUF_DISCARD_SAFE(mbuffercache->ibo.edituv_points);
+ GPU_INDEXBUF_DISCARD_SAFE(mbuffercache->ibo.edituv_fdots);
}
/* We only clear the batches as they may already have been
* referenced. */
@@ -1282,10 +1295,9 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
/* Second chance to early out */
if ((batch_requested & ~cache->batch_ready) == 0) {
#ifdef DEBUG
- goto check;
-#else
- return;
+ drw_mesh_batch_cache_check_available(task_graph, me);
#endif
+ return;
}
cache->batch_ready |= batch_requested;
@@ -1537,32 +1549,7 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
ts,
use_hide);
#ifdef DEBUG
-check:
- /* Make sure all requested batches have been setup. */
- /* TODO(jbakker): we should move this to the draw_manager but that needs refactoring and
- * additional looping.*/
- BLI_task_graph_work_and_wait(task_graph);
- for (int i = 0; i < sizeof(cache->batch) / sizeof(void *); i++) {
- BLI_assert(!DRW_batch_requested(((GPUBatch **)&cache->batch)[i], 0));
- }
- for (int i = 0; i < sizeof(cache->final.vbo) / sizeof(void *); i++) {
- BLI_assert(!DRW_vbo_requested(((GPUVertBuf **)&cache->final.vbo)[i]));
- }
- for (int i = 0; i < sizeof(cache->final.ibo) / sizeof(void *); i++) {
- BLI_assert(!DRW_ibo_requested(((GPUIndexBuf **)&cache->final.ibo)[i]));
- }
- for (int i = 0; i < sizeof(cache->cage.vbo) / sizeof(void *); i++) {
- BLI_assert(!DRW_vbo_requested(((GPUVertBuf **)&cache->cage.vbo)[i]));
- }
- for (int i = 0; i < sizeof(cache->cage.ibo) / sizeof(void *); i++) {
- BLI_assert(!DRW_ibo_requested(((GPUIndexBuf **)&cache->cage.ibo)[i]));
- }
- for (int i = 0; i < sizeof(cache->uv_cage.vbo) / sizeof(void *); i++) {
- BLI_assert(!DRW_vbo_requested(((GPUVertBuf **)&cache->uv_cage.vbo)[i]));
- }
- for (int i = 0; i < sizeof(cache->uv_cage.ibo) / sizeof(void *); i++) {
- BLI_assert(!DRW_ibo_requested(((GPUIndexBuf **)&cache->uv_cage.ibo)[i]));
- }
+ drw_mesh_batch_cache_check_available(task_graph, me);
#endif
}