Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/draw/intern/draw_cache_impl_mesh.c')
-rw-r--r--source/blender/draw/intern/draw_cache_impl_mesh.c73
1 files changed, 36 insertions, 37 deletions
diff --git a/source/blender/draw/intern/draw_cache_impl_mesh.c b/source/blender/draw/intern/draw_cache_impl_mesh.c
index d090832dc4b..7217be106ae 100644
--- a/source/blender/draw/intern/draw_cache_impl_mesh.c
+++ b/source/blender/draw/intern/draw_cache_impl_mesh.c
@@ -395,7 +395,7 @@ static void drw_mesh_weight_state_extract(Object *ob,
wstate->flags |= DRW_MESH_WEIGHT_STATE_MULTIPAINT |
(ts->auto_normalize ? DRW_MESH_WEIGHT_STATE_AUTO_NORMALIZE : 0);
- if (me->editflag & ME_EDIT_MIRROR_X) {
+ if (me->symmetry & ME_SYMMETRY_X) {
BKE_object_defgroup_mirror_selection(ob,
wstate->defgroup_len,
wstate->defgroup_sel,
@@ -496,6 +496,8 @@ static void mesh_batch_cache_init(Mesh *me)
cache->mat_len = mesh_render_mat_len_get(me);
cache->surface_per_mat = MEM_callocN(sizeof(*cache->surface_per_mat) * cache->mat_len, __func__);
+ cache->final.tris_per_mat = MEM_callocN(sizeof(*cache->final.tris_per_mat) * cache->mat_len,
+ __func__);
cache->is_dirty = false;
cache->batch_ready = 0;
@@ -536,20 +538,16 @@ static void mesh_batch_cache_request_surface_batches(MeshBatchCache *cache)
{
mesh_batch_cache_add_request(cache, MBC_SURFACE);
DRW_batch_request(&cache->batch.surface);
- if (cache->surface_per_mat) {
- for (int i = 0; i < cache->mat_len; i++) {
- DRW_batch_request(&cache->surface_per_mat[i]);
- }
+ for (int i = 0; i < cache->mat_len; i++) {
+ DRW_batch_request(&cache->surface_per_mat[i]);
}
}
static void mesh_batch_cache_discard_surface_batches(MeshBatchCache *cache)
{
GPU_BATCH_DISCARD_SAFE(cache->batch.surface);
- if (cache->surface_per_mat) {
- for (int i = 0; i < cache->mat_len; i++) {
- GPU_BATCH_DISCARD_SAFE(cache->surface_per_mat[i]);
- }
+ for (int i = 0; i < cache->mat_len; i++) {
+ GPU_BATCH_DISCARD_SAFE(cache->surface_per_mat[i]);
}
cache->batch_ready &= ~MBC_SURFACE;
}
@@ -565,10 +563,6 @@ static void mesh_batch_cache_discard_shaded_tri(MeshBatchCache *cache)
}
mesh_batch_cache_discard_surface_batches(cache);
mesh_cd_layers_type_clear(&cache->cd_used);
-
- MEM_SAFE_FREE(cache->surface_per_mat);
-
- cache->mat_len = 0;
}
static void mesh_batch_cache_discard_uvedit(MeshBatchCache *cache)
@@ -626,7 +620,7 @@ static void mesh_batch_cache_discard_uvedit_select(MeshBatchCache *cache)
cache->batch_ready &= ~MBC_EDITUV;
}
-void DRW_mesh_batch_cache_dirty_tag(Mesh *me, int mode)
+void DRW_mesh_batch_cache_dirty_tag(Mesh *me, eMeshBatchDirtyMode mode)
{
MeshBatchCache *cache = me->runtime.batch_cache;
if (cache == NULL) {
@@ -712,17 +706,23 @@ static void mesh_batch_cache_clear(Mesh *me)
GPU_INDEXBUF_DISCARD_SAFE(ibos[i]);
}
}
+
+ for (int i = 0; i < cache->mat_len; i++) {
+ GPU_INDEXBUF_DISCARD_SAFE(cache->final.tris_per_mat[i]);
+ }
+ MEM_SAFE_FREE(cache->final.tris_per_mat);
+
for (int i = 0; i < sizeof(cache->batch) / sizeof(void *); i++) {
GPUBatch **batch = (GPUBatch **)&cache->batch;
GPU_BATCH_DISCARD_SAFE(batch[i]);
}
mesh_batch_cache_discard_shaded_tri(cache);
-
mesh_batch_cache_discard_uvedit(cache);
+ MEM_SAFE_FREE(cache->surface_per_mat);
+ cache->mat_len = 0;
cache->batch_ready = 0;
-
drw_mesh_weight_state_clear(&cache->weight_state);
}
@@ -891,6 +891,17 @@ int DRW_mesh_material_count_get(Mesh *me)
return mesh_render_mat_len_get(me);
}
+GPUBatch *DRW_mesh_batch_cache_get_sculpt_overlays(Mesh *me)
+{
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+
+ cache->cd_needed.sculpt_overlays = 1;
+ mesh_batch_cache_add_request(cache, MBC_SCULPT_OVERLAYS);
+ DRW_batch_request(&cache->batch.sculpt_overlays);
+
+ return cache->batch.sculpt_overlays;
+}
+
/** \} */
/* ---------------------------------------------------------------------- */
@@ -1165,7 +1176,6 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
const bool use_hide)
{
BLI_assert(task_graph);
- GPUIndexBuf **saved_elem_ranges = NULL;
const ToolSettings *ts = NULL;
if (scene) {
ts = scene->toolsettings;
@@ -1242,23 +1252,15 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
if (cache->cd_used.orco != cache->cd_needed.orco) {
GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.orco);
}
+ if (cache->cd_used.sculpt_overlays != cache->cd_needed.sculpt_overlays) {
+ GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.sculpt_data);
+ }
if (((cache->cd_used.vcol & cache->cd_needed.vcol) != cache->cd_needed.vcol) ||
((cache->cd_used.sculpt_vcol & cache->cd_needed.sculpt_vcol) !=
cache->cd_needed.sculpt_vcol)) {
GPU_VERTBUF_DISCARD_SAFE(mbuffercache->vbo.vcol);
}
}
- /* XXX save element buffer to avoid recreating them.
- * This is only if the cd_needed changes so it is ok to keep them.*/
- if (cache->surface_per_mat[0] && cache->surface_per_mat[0]->elem) {
- saved_elem_ranges = MEM_callocN(sizeof(saved_elem_ranges) * cache->mat_len, __func__);
- for (int i = 0; i < cache->mat_len; i++) {
- saved_elem_ranges[i] = cache->surface_per_mat[i]->elem;
- /* Avoid deletion as the batch is owner. */
- cache->surface_per_mat[i]->elem = NULL;
- cache->surface_per_mat[i]->flag &= ~GPU_BATCH_OWNS_INDEX;
- }
- }
/* We can't discard batches at this point as they have been
* referenced for drawing. Just clear them in place. */
for (int i = 0; i < cache->mat_len; i++) {
@@ -1333,6 +1335,11 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
if (DRW_batch_requested(cache->batch.all_verts, GPU_PRIM_POINTS)) {
DRW_vbo_request(cache->batch.all_verts, &mbufcache->vbo.pos_nor);
}
+ if (DRW_batch_requested(cache->batch.sculpt_overlays, GPU_PRIM_TRIS)) {
+ DRW_ibo_request(cache->batch.sculpt_overlays, &mbufcache->ibo.tris);
+ DRW_vbo_request(cache->batch.sculpt_overlays, &mbufcache->vbo.pos_nor);
+ DRW_vbo_request(cache->batch.sculpt_overlays, &mbufcache->vbo.sculpt_data);
+ }
if (DRW_batch_requested(cache->batch.all_edges, GPU_PRIM_LINES)) {
DRW_ibo_request(cache->batch.all_edges, &mbufcache->ibo.lines);
DRW_vbo_request(cache->batch.all_edges, &mbufcache->vbo.pos_nor);
@@ -1378,13 +1385,7 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
/* Per Material */
for (int i = 0; i < cache->mat_len; i++) {
if (DRW_batch_requested(cache->surface_per_mat[i], GPU_PRIM_TRIS)) {
- if (saved_elem_ranges && saved_elem_ranges[i]) {
- /* XXX assign old element buffer range (it did not change).*/
- GPU_batch_elembuf_set(cache->surface_per_mat[i], saved_elem_ranges[i], true);
- }
- else {
- DRW_ibo_request(cache->surface_per_mat[i], &mbufcache->ibo.tris);
- }
+ DRW_ibo_request(cache->surface_per_mat[i], &mbufcache->tris_per_mat[i]);
/* Order matters. First ones override latest VBO's attributes. */
DRW_vbo_request(cache->surface_per_mat[i], &mbufcache->vbo.lnor);
DRW_vbo_request(cache->surface_per_mat[i], &mbufcache->vbo.pos_nor);
@@ -1403,8 +1404,6 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
}
}
- MEM_SAFE_FREE(saved_elem_ranges);
-
mbufcache = (do_cage) ? &cache->cage : &cache->final;
/* Edit Mesh */