Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeroen Bakker <jbakker>2020-07-17 14:47:10 +0300
committerJeroen Bakker <jeroen@blender.org>2020-07-17 14:47:53 +0300
commit9582797d4b50a18040e96ae07aa8c7643cbcc25a (patch)
treed7dc8cf7bd4b6e82cdd059699d86057d7101b330 /source/blender/draw/intern/draw_cache_impl_mesh.c
parentbf2bb6db26113d4c097d3f9201d40d295da223f5 (diff)
Fix T77867: Link Duplicate Object crashes during batch creation
When using link duplicated objects it could happen that one object is calculating the GPUBuffers and the second object is marking these buffers invalid. This introduces threading issues. This patch fixes this by combining the surface and surface per material batches. Most likely the surface per material batches are used and when requested you will most likely need the surface batch for the depth tests and overlays. During tests it slightly improves performance as batches aren't thrown away without using it. After this patch we can add a quick path for meshes with one material and two materials. Alternative approaches that have been checked: - sync extraction per object: reduced performance to much (-15%) ({D8292}) - post checks: reduced the threading issues, but didn't solve it. - separating preparation and execution of the extraction ({D8312}) Reviewed By: Clément Foucault Differential Revision: https://developer.blender.org/D8329
Diffstat (limited to 'source/blender/draw/intern/draw_cache_impl_mesh.c')
-rw-r--r--source/blender/draw/intern/draw_cache_impl_mesh.c88
1 files changed, 31 insertions, 57 deletions
diff --git a/source/blender/draw/intern/draw_cache_impl_mesh.c b/source/blender/draw/intern/draw_cache_impl_mesh.c
index 9596db5e78b..410d59b557b 100644
--- a/source/blender/draw/intern/draw_cache_impl_mesh.c
+++ b/source/blender/draw/intern/draw_cache_impl_mesh.c
@@ -527,14 +527,26 @@ static void mesh_batch_cache_check_vertex_group(MeshBatchCache *cache,
}
}
-static void mesh_batch_cache_discard_shaded_batches(MeshBatchCache *cache)
+static void mesh_batch_cache_request_surface_batches(MeshBatchCache *cache)
{
+ mesh_batch_cache_add_request(cache, MBC_SURFACE);
+ DRW_batch_request(&cache->batch.surface);
+ if (cache->surface_per_mat) {
+ for (int i = 0; i < cache->mat_len; i++) {
+ DRW_batch_request(&cache->surface_per_mat[i]);
+ }
+ }
+}
+
+static void mesh_batch_cache_discard_surface_batches(MeshBatchCache *cache)
+{
+ GPU_BATCH_DISCARD_SAFE(cache->batch.surface);
if (cache->surface_per_mat) {
for (int i = 0; i < cache->mat_len; i++) {
GPU_BATCH_DISCARD_SAFE(cache->surface_per_mat[i]);
}
}
- cache->batch_ready &= ~MBC_SURF_PER_MAT;
+ cache->batch_ready &= ~MBC_SURFACE;
}
static void mesh_batch_cache_discard_shaded_tri(MeshBatchCache *cache)
@@ -546,7 +558,7 @@ static void mesh_batch_cache_discard_shaded_tri(MeshBatchCache *cache)
GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.vcol);
GPU_VERTBUF_DISCARD_SAFE(mbufcache->vbo.orco);
}
- mesh_batch_cache_discard_shaded_batches(cache);
+ mesh_batch_cache_discard_surface_batches(cache);
mesh_cd_layers_type_clear(&cache->cd_used);
MEM_SAFE_FREE(cache->surface_per_mat);
@@ -586,10 +598,7 @@ static void mesh_batch_cache_discard_uvedit(MeshBatchCache *cache)
cache->cd_used.edit_uv = 0;
/* Discard other batches that uses vbo.uv */
- mesh_batch_cache_discard_shaded_batches(cache);
-
- GPU_BATCH_DISCARD_SAFE(cache->batch.surface);
- cache->batch_ready &= ~MBC_SURFACE;
+ mesh_batch_cache_discard_surface_batches(cache);
}
static void mesh_batch_cache_discard_uvedit_select(MeshBatchCache *cache)
@@ -651,12 +660,8 @@ void DRW_mesh_batch_cache_dirty_tag(Mesh *me, int mode)
GPU_BATCH_DISCARD_SAFE(cache->batch.surface);
GPU_BATCH_DISCARD_SAFE(cache->batch.wire_loops);
GPU_BATCH_DISCARD_SAFE(cache->batch.wire_edges);
- if (cache->surface_per_mat) {
- for (int i = 0; i < cache->mat_len; i++) {
- GPU_BATCH_DISCARD_SAFE(cache->surface_per_mat[i]);
- }
- }
- cache->batch_ready &= ~(MBC_SURFACE | MBC_WIRE_EDGES | MBC_WIRE_LOOPS | MBC_SURF_PER_MAT);
+ mesh_batch_cache_discard_surface_batches(cache);
+ cache->batch_ready &= ~(MBC_SURFACE | MBC_WIRE_EDGES | MBC_WIRE_LOOPS);
break;
case BKE_MESH_BATCH_DIRTY_ALL:
cache->is_dirty = true;
@@ -782,8 +787,8 @@ GPUBatch *DRW_mesh_batch_cache_get_all_edges(Mesh *me)
GPUBatch *DRW_mesh_batch_cache_get_surface(Mesh *me)
{
MeshBatchCache *cache = mesh_batch_cache_get(me);
- mesh_batch_cache_add_request(cache, MBC_SURFACE);
- return DRW_batch_request(&cache->batch.surface);
+ mesh_batch_cache_request_surface_batches(cache);
+ return cache->batch.surface;
}
GPUBatch *DRW_mesh_batch_cache_get_loose_edges(Mesh *me)
@@ -841,23 +846,15 @@ GPUBatch **DRW_mesh_batch_cache_get_surface_shaded(Mesh *me,
BLI_assert(gpumat_array_len == cache->mat_len);
mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
-
- mesh_batch_cache_add_request(cache, MBC_SURF_PER_MAT);
-
- for (int i = 0; i < cache->mat_len; i++) {
- DRW_batch_request(&cache->surface_per_mat[i]);
- }
+ mesh_batch_cache_request_surface_batches(cache);
return cache->surface_per_mat;
}
GPUBatch **DRW_mesh_batch_cache_get_surface_texpaint(Mesh *me)
{
MeshBatchCache *cache = mesh_batch_cache_get(me);
- mesh_batch_cache_add_request(cache, MBC_SURF_PER_MAT);
texpaint_request_active_uv(cache, me);
- for (int i = 0; i < cache->mat_len; i++) {
- DRW_batch_request(&cache->surface_per_mat[i]);
- }
+ mesh_batch_cache_request_surface_batches(cache);
return cache->surface_per_mat;
}
@@ -865,24 +862,24 @@ GPUBatch *DRW_mesh_batch_cache_get_surface_texpaint_single(Mesh *me)
{
MeshBatchCache *cache = mesh_batch_cache_get(me);
texpaint_request_active_uv(cache, me);
- mesh_batch_cache_add_request(cache, MBC_SURFACE);
- return DRW_batch_request(&cache->batch.surface);
+ mesh_batch_cache_request_surface_batches(cache);
+ return cache->batch.surface;
}
GPUBatch *DRW_mesh_batch_cache_get_surface_vertpaint(Mesh *me)
{
MeshBatchCache *cache = mesh_batch_cache_get(me);
texpaint_request_active_vcol(cache, me);
- mesh_batch_cache_add_request(cache, MBC_SURFACE);
- return DRW_batch_request(&cache->batch.surface);
+ mesh_batch_cache_request_surface_batches(cache);
+ return cache->batch.surface;
}
GPUBatch *DRW_mesh_batch_cache_get_surface_sculpt(Mesh *me)
{
MeshBatchCache *cache = mesh_batch_cache_get(me);
sculpt_request_active_vcol(cache, me);
- mesh_batch_cache_add_request(cache, MBC_SURFACE);
- return DRW_batch_request(&cache->batch.surface);
+ mesh_batch_cache_request_surface_batches(cache);
+ return cache->batch.surface;
}
int DRW_mesh_material_count_get(Mesh *me)
@@ -900,8 +897,7 @@ GPUVertBuf *DRW_mesh_batch_cache_pos_vertbuf_get(Mesh *me)
{
MeshBatchCache *cache = mesh_batch_cache_get(me);
/* Request surface to trigger the vbo filling. Otherwise it may do nothing. */
- mesh_batch_cache_add_request(cache, MBC_SURFACE);
- DRW_batch_request(&cache->batch.surface);
+ mesh_batch_cache_request_surface_batches(cache);
DRW_vbo_request(NULL, &cache->final.vbo.pos_nor);
return cache->final.vbo.pos_nor;
@@ -1203,30 +1199,8 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
}
}
- /* HACK: if MBC_SURF_PER_MAT is requested and ibo.tris is already available, it won't have it's
- * index ranges initialized. So discard ibo.tris in order to recreate it.
- * This needs to happen before saved_elem_ranges is populated. */
- if ((batch_requested & MBC_SURF_PER_MAT) != 0 && (cache->batch_ready & MBC_SURF_PER_MAT) == 0) {
- FOREACH_MESH_BUFFER_CACHE (cache, mbuffercache) {
- GPU_INDEXBUF_DISCARD_SAFE(mbuffercache->ibo.tris);
- }
- /* Clear all batches that reference ibo.tris. */
- GPU_BATCH_CLEAR_SAFE(cache->batch.surface);
- GPU_BATCH_CLEAR_SAFE(cache->batch.surface_weights);
- GPU_BATCH_CLEAR_SAFE(cache->batch.edit_mesh_analysis);
- GPU_BATCH_CLEAR_SAFE(cache->batch.edit_triangles);
- GPU_BATCH_CLEAR_SAFE(cache->batch.edit_lnor);
- GPU_BATCH_CLEAR_SAFE(cache->batch.edit_selection_faces);
- for (int i = 0; i < cache->mat_len; i++) {
- GPU_BATCH_CLEAR_SAFE(cache->surface_per_mat[i]);
- }
-
- cache->batch_ready &= ~(MBC_SURFACE | MBC_SURFACE_WEIGHTS | MBC_EDIT_MESH_ANALYSIS |
- MBC_EDIT_TRIANGLES | MBC_EDIT_LNOR | MBC_EDIT_SELECTION_FACES);
- }
-
if (batch_requested &
- (MBC_SURFACE | MBC_SURF_PER_MAT | MBC_WIRE_LOOPS_UVS | MBC_EDITUV_FACES_STRETCH_AREA |
+ (MBC_SURFACE | MBC_WIRE_LOOPS_UVS | MBC_EDITUV_FACES_STRETCH_AREA |
MBC_EDITUV_FACES_STRETCH_ANGLE | MBC_EDITUV_FACES | MBC_EDITUV_EDGES | MBC_EDITUV_VERTS)) {
/* Modifiers will only generate an orco layer if the mesh is deformed. */
if (cache->cd_needed.orco != 0) {
@@ -1279,7 +1253,7 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
GPU_BATCH_CLEAR_SAFE(cache->surface_per_mat[i]);
}
GPU_BATCH_CLEAR_SAFE(cache->batch.surface);
- cache->batch_ready &= ~(MBC_SURFACE | MBC_SURF_PER_MAT);
+ cache->batch_ready &= ~(MBC_SURFACE);
mesh_cd_layers_type_merge(&cache->cd_used, cache->cd_needed);
}