Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeroen Bakker <j.bakker@atmind.nl>2022-03-02 18:03:01 +0300
committerJeroen Bakker <j.bakker@atmind.nl>2022-03-02 18:03:01 +0300
commita41c2a513761e8884e92526b069ff6eed8168676 (patch)
treee624093127815a09d2807dccddaabea35510e154 /source/blender/draw
parenta23b4429915ca8597510b57353c4df331487c620 (diff)
parentc23ec04b4e30f300a670f1cb1dc882e0608d09ad (diff)
Merge branch 'master' into temp-image-buffer-rasterizertemp-image-buffer-rasterizer
Diffstat (limited to 'source/blender/draw')
-rw-r--r--source/blender/draw/CMakeLists.txt2
-rw-r--r--source/blender/draw/engines/eevee/eevee_data.c124
-rw-r--r--source/blender/draw/engines/eevee/eevee_depth_of_field.c5
-rw-r--r--source/blender/draw/engines/eevee/eevee_motion_blur.c254
-rw-r--r--source/blender/draw/engines/eevee/eevee_private.h48
-rw-r--r--source/blender/draw/engines/eevee/eevee_shadows.c6
-rw-r--r--source/blender/draw/engines/image/image_buffer_cache.hh131
-rw-r--r--source/blender/draw/engines/image/image_drawing_mode.hh74
-rw-r--r--source/blender/draw/engines/image/image_engine.cc1
-rw-r--r--source/blender/draw/engines/image/image_instance_data.hh34
-rw-r--r--source/blender/draw/engines/image/image_usage.hh49
-rw-r--r--source/blender/draw/engines/overlay/overlay_engine.c12
-rw-r--r--source/blender/draw/engines/overlay/overlay_extra.c4
-rw-r--r--source/blender/draw/engines/overlay/overlay_motion_path.c4
-rw-r--r--source/blender/draw/engines/overlay/overlay_wireframe.c4
-rw-r--r--source/blender/draw/engines/overlay/shaders/paint_texture_frag.glsl2
-rw-r--r--source/blender/draw/engines/select/select_draw_utils.c2
-rw-r--r--source/blender/draw/engines/workbench/workbench_opaque.c4
-rw-r--r--source/blender/draw/intern/DRW_gpu_wrapper.hh5
-rw-r--r--source/blender/draw/intern/draw_cache.c26
-rw-r--r--source/blender/draw/intern/draw_cache_extract_mesh.cc2
-rw-r--r--source/blender/draw/intern/draw_cache_impl_curve.cc2
-rw-r--r--source/blender/draw/intern/draw_cache_impl_curves.cc18
-rw-r--r--source/blender/draw/intern/draw_cache_impl_mesh.c8
-rw-r--r--source/blender/draw/intern/draw_cache_impl_subdivision.cc157
-rw-r--r--source/blender/draw/intern/draw_common.c2
-rw-r--r--source/blender/draw/intern/draw_manager.c2
-rw-r--r--source/blender/draw/intern/draw_manager_data.c2
-rw-r--r--source/blender/draw/intern/draw_subdivision.h9
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_attributes.cc2
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_pos_nor.cc10
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_sculpt_data.cc2
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_tan.cc166
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_vcol.cc2
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_weights.cc40
-rw-r--r--source/blender/draw/intern/shaders/common_subdiv_custom_data_interp_comp.glsl2
-rw-r--r--source/blender/draw/intern/shaders/common_subdiv_ibo_lines_comp.glsl2
-rw-r--r--source/blender/draw/intern/shaders/common_subdiv_ibo_tris_comp.glsl2
-rw-r--r--source/blender/draw/intern/shaders/common_subdiv_lib.glsl7
-rw-r--r--source/blender/draw/intern/shaders/common_subdiv_normals_accumulate_comp.glsl61
-rw-r--r--source/blender/draw/intern/shaders/common_subdiv_normals_finalize_comp.glsl2
-rw-r--r--source/blender/draw/intern/shaders/common_subdiv_patch_evaluation_comp.glsl6
-rw-r--r--source/blender/draw/intern/shaders/common_subdiv_vbo_edge_fac_comp.glsl2
-rw-r--r--source/blender/draw/intern/shaders/common_subdiv_vbo_edituv_strech_angle_comp.glsl2
-rw-r--r--source/blender/draw/intern/shaders/common_subdiv_vbo_edituv_strech_area_comp.glsl2
-rw-r--r--source/blender/draw/intern/shaders/common_subdiv_vbo_lnor_comp.glsl21
-rw-r--r--source/blender/draw/intern/shaders/common_subdiv_vbo_sculpt_data_comp.glsl2
47 files changed, 851 insertions, 475 deletions
diff --git a/source/blender/draw/CMakeLists.txt b/source/blender/draw/CMakeLists.txt
index 4103d9a7087..98f75ad6106 100644
--- a/source/blender/draw/CMakeLists.txt
+++ b/source/blender/draw/CMakeLists.txt
@@ -221,6 +221,8 @@ set(SRC
engines/image/image_space_image.hh
engines/image/image_space_node.hh
engines/image/image_space.hh
+ engines/image/image_texture_info.hh
+ engines/image/image_usage.hh
engines/image/image_wrappers.hh
engines/workbench/workbench_engine.h
engines/workbench/workbench_private.h
diff --git a/source/blender/draw/engines/eevee/eevee_data.c b/source/blender/draw/engines/eevee/eevee_data.c
index 58676caa6f9..253981d321b 100644
--- a/source/blender/draw/engines/eevee/eevee_data.c
+++ b/source/blender/draw/engines/eevee/eevee_data.c
@@ -27,25 +27,12 @@
static void eevee_motion_blur_mesh_data_free(void *val)
{
- EEVEE_GeometryMotionData *geom_mb = (EEVEE_GeometryMotionData *)val;
- EEVEE_HairMotionData *hair_mb = (EEVEE_HairMotionData *)val;
- switch (geom_mb->type) {
- case EEVEE_MOTION_DATA_HAIR:
- for (int j = 0; j < hair_mb->psys_len; j++) {
- for (int i = 0; i < ARRAY_SIZE(hair_mb->psys[0].hair_pos); i++) {
- GPU_VERTBUF_DISCARD_SAFE(hair_mb->psys[j].hair_pos[i]);
- }
- for (int i = 0; i < ARRAY_SIZE(hair_mb->psys[0].hair_pos); i++) {
- DRW_TEXTURE_FREE_SAFE(hair_mb->psys[j].hair_pos_tx[i]);
- }
- }
- break;
-
- case EEVEE_MOTION_DATA_MESH:
- for (int i = 0; i < ARRAY_SIZE(geom_mb->vbo); i++) {
- GPU_VERTBUF_DISCARD_SAFE(geom_mb->vbo[i]);
- }
- break;
+ EEVEE_ObjectMotionData *mb_data = (EEVEE_ObjectMotionData *)val;
+ if (mb_data->hair_data != NULL) {
+ MEM_freeN(mb_data->hair_data);
+ }
+ if (mb_data->geometry_data != NULL) {
+ MEM_freeN(mb_data->geometry_data);
}
MEM_freeN(val);
}
@@ -84,39 +71,57 @@ static bool eevee_object_key_cmp(const void *a, const void *b)
return false;
}
+void EEVEE_motion_hair_step_free(EEVEE_HairMotionStepData *step_data)
+{
+ GPU_vertbuf_discard(step_data->hair_pos);
+ DRW_texture_free(step_data->hair_pos_tx);
+ MEM_freeN(step_data);
+}
+
void EEVEE_motion_blur_data_init(EEVEE_MotionBlurData *mb)
{
if (mb->object == NULL) {
mb->object = BLI_ghash_new(eevee_object_key_hash, eevee_object_key_cmp, "EEVEE Object Motion");
}
- if (mb->geom == NULL) {
- mb->geom = BLI_ghash_new(BLI_ghashutil_ptrhash, BLI_ghashutil_ptrcmp, "EEVEE Mesh Motion");
+ for (int i = 0; i < 2; i++) {
+ if (mb->position_vbo_cache[i] == NULL) {
+ mb->position_vbo_cache[i] = BLI_ghash_new(
+ BLI_ghashutil_ptrhash, BLI_ghashutil_ptrcmp, "EEVEE duplicate vbo cache");
+ }
+ if (mb->hair_motion_step_cache[i] == NULL) {
+ mb->hair_motion_step_cache[i] = BLI_ghash_new(
+ BLI_ghashutil_ptrhash, BLI_ghashutil_ptrcmp, "EEVEE hair motion step cache");
+ }
}
}
void EEVEE_motion_blur_data_free(EEVEE_MotionBlurData *mb)
{
if (mb->object) {
- BLI_ghash_free(mb->object, MEM_freeN, MEM_freeN);
+ BLI_ghash_free(mb->object, MEM_freeN, eevee_motion_blur_mesh_data_free);
mb->object = NULL;
}
- if (mb->geom) {
- BLI_ghash_free(mb->geom, NULL, eevee_motion_blur_mesh_data_free);
- mb->geom = NULL;
+ for (int i = 0; i < 2; i++) {
+ if (mb->position_vbo_cache[i]) {
+ BLI_ghash_free(mb->position_vbo_cache[i], NULL, (GHashValFreeFP)GPU_vertbuf_discard);
+ }
+ if (mb->hair_motion_step_cache[i]) {
+ BLI_ghash_free(
+ mb->hair_motion_step_cache[i], NULL, (GHashValFreeFP)EEVEE_motion_hair_step_free);
+ }
}
}
-EEVEE_ObjectMotionData *EEVEE_motion_blur_object_data_get(EEVEE_MotionBlurData *mb,
- Object *ob,
- bool hair)
+EEVEE_ObjectMotionData *EEVEE_motion_blur_object_data_get(EEVEE_MotionBlurData *mb, Object *ob)
{
if (mb->object == NULL) {
return NULL;
}
EEVEE_ObjectKey key, *key_p;
- /* Small hack to avoid another comparison. */
- key.ob = (Object *)((char *)ob + hair);
+ /* Assumes that all instances have the same object pointer. This is currently the case because
+ * instance objects are temporary objects on the stack. */
+ key.ob = ob;
DupliObject *dup = DRW_object_get_dupli(ob);
if (dup) {
key.parent = DRW_object_get_dupli_parent(ob);
@@ -139,53 +144,28 @@ EEVEE_ObjectMotionData *EEVEE_motion_blur_object_data_get(EEVEE_MotionBlurData *
return ob_step;
}
-static void *motion_blur_deform_data_get(EEVEE_MotionBlurData *mb, Object *ob, bool hair)
+EEVEE_GeometryMotionData *EEVEE_motion_blur_geometry_data_get(EEVEE_ObjectMotionData *mb_data)
{
- if (mb->geom == NULL) {
- return NULL;
+ if (mb_data->geometry_data == NULL) {
+ EEVEE_GeometryMotionData *geom_step = MEM_callocN(sizeof(EEVEE_GeometryMotionData), __func__);
+ geom_step->type = EEVEE_MOTION_DATA_MESH;
+ mb_data->geometry_data = geom_step;
}
- DupliObject *dup = DRW_object_get_dupli(ob);
- void *key;
- if (dup) {
- key = dup->ob;
- }
- else {
- key = ob;
- }
- /* Only use data for object that have no modifiers. */
- if (!BKE_object_is_modified(DRW_context_state_get()->scene, ob)) {
- key = ob->data;
- }
- key = (char *)key + (int)hair;
- EEVEE_GeometryMotionData *geom_step = BLI_ghash_lookup(mb->geom, key);
- if (geom_step == NULL) {
- if (hair) {
- EEVEE_HairMotionData *hair_step;
- /* Ugly, we allocate for each modifiers and just fill based on modifier index in the list. */
- int psys_len = (ob->type != OB_CURVES) ? BLI_listbase_count(&ob->modifiers) : 1;
- hair_step = MEM_callocN(sizeof(EEVEE_HairMotionData) + sizeof(hair_step->psys[0]) * psys_len,
- __func__);
- hair_step->psys_len = psys_len;
- geom_step = (EEVEE_GeometryMotionData *)hair_step;
- geom_step->type = EEVEE_MOTION_DATA_HAIR;
- }
- else {
- geom_step = MEM_callocN(sizeof(EEVEE_GeometryMotionData), __func__);
- geom_step->type = EEVEE_MOTION_DATA_MESH;
- }
- BLI_ghash_insert(mb->geom, key, geom_step);
- }
- return geom_step;
+ return mb_data->geometry_data;
}
-EEVEE_GeometryMotionData *EEVEE_motion_blur_geometry_data_get(EEVEE_MotionBlurData *mb, Object *ob)
+EEVEE_HairMotionData *EEVEE_motion_blur_hair_data_get(EEVEE_ObjectMotionData *mb_data, Object *ob)
{
- return motion_blur_deform_data_get(mb, ob, false);
-}
-
-EEVEE_HairMotionData *EEVEE_motion_blur_hair_data_get(EEVEE_MotionBlurData *mb, Object *ob)
-{
- return motion_blur_deform_data_get(mb, ob, true);
+ if (mb_data->hair_data == NULL) {
+ /* Ugly, we allocate for each modifiers and just fill based on modifier index in the list. */
+ int psys_len = (ob->type != OB_CURVES) ? BLI_listbase_count(&ob->modifiers) : 1;
+ EEVEE_HairMotionData *hair_step = MEM_callocN(
+ sizeof(EEVEE_HairMotionData) + sizeof(hair_step->psys[0]) * psys_len, __func__);
+ hair_step->psys_len = psys_len;
+ hair_step->type = EEVEE_MOTION_DATA_HAIR;
+ mb_data->hair_data = hair_step;
+ }
+ return mb_data->hair_data;
}
/* View Layer data. */
diff --git a/source/blender/draw/engines/eevee/eevee_depth_of_field.c b/source/blender/draw/engines/eevee/eevee_depth_of_field.c
index 39cfbb40318..ef4d88bd521 100644
--- a/source/blender/draw/engines/eevee/eevee_depth_of_field.c
+++ b/source/blender/draw/engines/eevee/eevee_depth_of_field.c
@@ -626,11 +626,6 @@ static void dof_reduce_pass_init(EEVEE_FramebufferList *fbl,
"dof_reduced_color", UNPACK2(res), mip_count, GPU_RGBA16F, NULL);
txl->dof_reduced_coc = GPU_texture_create_2d(
"dof_reduced_coc", UNPACK2(res), mip_count, GPU_R16F, NULL);
-
- /* TODO(@fclem): Remove once we have immutable storage or when mips are generated on creation.
- */
- GPU_texture_generate_mipmap(txl->dof_reduced_color);
- GPU_texture_generate_mipmap(txl->dof_reduced_coc);
}
GPU_framebuffer_ensure_config(&fbl->dof_reduce_fb,
diff --git a/source/blender/draw/engines/eevee/eevee_motion_blur.c b/source/blender/draw/engines/eevee/eevee_motion_blur.c
index 93ffa2be9f3..fbc19a01a8b 100644
--- a/source/blender/draw/engines/eevee/eevee_motion_blur.c
+++ b/source/blender/draw/engines/eevee/eevee_motion_blur.c
@@ -226,15 +226,14 @@ void EEVEE_motion_blur_hair_cache_populate(EEVEE_ViewLayerData *UNUSED(sldata),
}
/* For now we assume hair objects are always moving. */
- EEVEE_ObjectMotionData *mb_data = EEVEE_motion_blur_object_data_get(
- &effects->motion_blur, ob, true);
+ EEVEE_ObjectMotionData *mb_data = EEVEE_motion_blur_object_data_get(&effects->motion_blur, ob);
if (mb_data) {
int mb_step = effects->motion_blur_step;
/* Store transform. */
DRW_hair_duplimat_get(ob, psys, md, mb_data->obmat[mb_step]);
- EEVEE_HairMotionData *mb_hair = EEVEE_motion_blur_hair_data_get(&effects->motion_blur, ob);
+ EEVEE_HairMotionData *mb_hair = EEVEE_motion_blur_hair_data_get(mb_data, ob);
int psys_id = (md != NULL) ? BLI_findindex(&ob->modifiers, md) : 0;
if (psys_id >= mb_hair->psys_len) {
@@ -252,8 +251,8 @@ void EEVEE_motion_blur_hair_cache_populate(EEVEE_ViewLayerData *UNUSED(sldata),
copy_m4_m4(mb_data->obmat[MB_NEXT], mb_data->obmat[MB_CURR]);
}
- GPUTexture *tex_prev = mb_hair->psys[psys_id].hair_pos_tx[MB_PREV];
- GPUTexture *tex_next = mb_hair->psys[psys_id].hair_pos_tx[MB_NEXT];
+ GPUTexture *tex_prev = mb_hair->psys[psys_id].step_data[MB_PREV].hair_pos_tx;
+ GPUTexture *tex_next = mb_hair->psys[psys_id].step_data[MB_NEXT].hair_pos_tx;
grp = DRW_shgroup_hair_create_sub(ob, psys, md, effects->motion_blur.hair_grp, NULL);
DRW_shgroup_uniform_mat4(grp, "prevModelMatrix", mb_data->obmat[MB_PREV]);
@@ -265,7 +264,7 @@ void EEVEE_motion_blur_hair_cache_populate(EEVEE_ViewLayerData *UNUSED(sldata),
}
else {
/* Store vertex position buffer. */
- mb_hair->psys[psys_id].hair_pos[mb_step] = DRW_hair_pos_buffer_get(ob, psys, md);
+ mb_hair->psys[psys_id].step_data[mb_step].hair_pos = DRW_hair_pos_buffer_get(ob, psys, md);
mb_hair->use_deform = true;
}
}
@@ -304,24 +303,14 @@ void EEVEE_motion_blur_cache_populate(EEVEE_ViewLayerData *UNUSED(sldata),
return;
}
- const DupliObject *dup = DRW_object_get_dupli(ob);
- if (dup != NULL && dup->ob->data != dup->ob_data) {
- /* Geometry instances do not support motion blur correctly yet. The #key used in
- * #motion_blur_deform_data_get has to take ids of instances (#DupliObject.persistent_id) into
- * account. Otherwise it can't find matching geometry instances at different points in time. */
- return;
- }
-
- EEVEE_ObjectMotionData *mb_data = EEVEE_motion_blur_object_data_get(
- &effects->motion_blur, ob, false);
+ EEVEE_ObjectMotionData *mb_data = EEVEE_motion_blur_object_data_get(&effects->motion_blur, ob);
if (mb_data) {
int mb_step = effects->motion_blur_step;
/* Store transform. */
copy_m4_m4(mb_data->obmat[mb_step], ob->obmat);
- EEVEE_GeometryMotionData *mb_geom = EEVEE_motion_blur_geometry_data_get(&effects->motion_blur,
- ob);
+ EEVEE_GeometryMotionData *mb_geom = EEVEE_motion_blur_geometry_data_get(mb_data);
if (mb_step == MB_CURR) {
GPUBatch *batch = DRW_cache_object_surface_get(ob);
@@ -407,86 +396,93 @@ void EEVEE_motion_blur_cache_finish(EEVEE_Data *vedata)
DRW_cache_restart();
}
- for (BLI_ghashIterator_init(&ghi, effects->motion_blur.geom);
+ for (BLI_ghashIterator_init(&ghi, effects->motion_blur.object);
BLI_ghashIterator_done(&ghi) == false;
BLI_ghashIterator_step(&ghi)) {
- EEVEE_GeometryMotionData *mb_geom = BLI_ghashIterator_getValue(&ghi);
- EEVEE_HairMotionData *mb_hair = (EEVEE_HairMotionData *)mb_geom;
-
- if (!mb_geom->use_deform) {
- continue;
- }
-
- switch (mb_geom->type) {
- case EEVEE_MOTION_DATA_HAIR:
- if (mb_step == MB_CURR) {
- /* TODO(fclem): Check if vertex count mismatch. */
- mb_hair->use_deform = true;
- }
- else {
- for (int i = 0; i < mb_hair->psys_len; i++) {
- if (mb_hair->psys[i].hair_pos[mb_step] == NULL) {
- continue;
- }
- mb_hair->psys[i].hair_pos[mb_step] = GPU_vertbuf_duplicate(
- mb_hair->psys[i].hair_pos[mb_step]);
-
+ EEVEE_ObjectMotionData *mb_data = BLI_ghashIterator_getValue(&ghi);
+ EEVEE_HairMotionData *mb_hair = mb_data->hair_data;
+ EEVEE_GeometryMotionData *mb_geom = mb_data->geometry_data;
+ if (mb_hair != NULL && mb_hair->use_deform) {
+ if (mb_step == MB_CURR) {
+ /* TODO(fclem): Check if vertex count mismatch. */
+ mb_hair->use_deform = true;
+ }
+ else {
+ for (int i = 0; i < mb_hair->psys_len; i++) {
+ GPUVertBuf *vbo = mb_hair->psys[i].step_data[mb_step].hair_pos;
+ if (vbo == NULL) {
+ continue;
+ }
+ EEVEE_HairMotionStepData **step_data_cache_ptr;
+ if (!BLI_ghash_ensure_p(effects->motion_blur.hair_motion_step_cache[mb_step],
+ vbo,
+ (void ***)&step_data_cache_ptr)) {
+ EEVEE_HairMotionStepData *new_step_data = MEM_callocN(sizeof(EEVEE_HairMotionStepData),
+ __func__);
+ /* Duplicate the vbo, otherwise it would be lost when evaluating another frame. */
+ new_step_data->hair_pos = GPU_vertbuf_duplicate(vbo);
/* Create vbo immediately to bind to texture buffer. */
- GPU_vertbuf_use(mb_hair->psys[i].hair_pos[mb_step]);
-
- mb_hair->psys[i].hair_pos_tx[mb_step] = GPU_texture_create_from_vertbuf(
- "hair_pos_motion_blur", mb_hair->psys[i].hair_pos[mb_step]);
+ GPU_vertbuf_use(new_step_data->hair_pos);
+ new_step_data->hair_pos_tx = GPU_texture_create_from_vertbuf("hair_pos_motion_blur",
+ new_step_data->hair_pos);
+ *step_data_cache_ptr = new_step_data;
}
+ mb_hair->psys[i].step_data[mb_step] = **step_data_cache_ptr;
}
- break;
-
- case EEVEE_MOTION_DATA_MESH:
- if (mb_step == MB_CURR) {
- /* Modify batch to have data from adjacent frames. */
- GPUBatch *batch = mb_geom->batch;
- for (int i = 0; i < MB_CURR; i++) {
- GPUVertBuf *vbo = mb_geom->vbo[i];
- if (vbo && batch) {
- if (GPU_vertbuf_get_vertex_len(vbo) != GPU_vertbuf_get_vertex_len(batch->verts[0])) {
- /* Vertex count mismatch, disable deform motion blur. */
- mb_geom->use_deform = false;
- }
-
- if (mb_geom->use_deform == false) {
- motion_blur_remove_vbo_reference_from_batch(
- batch, mb_geom->vbo[MB_PREV], mb_geom->vbo[MB_NEXT]);
-
- GPU_VERTBUF_DISCARD_SAFE(mb_geom->vbo[MB_PREV]);
- GPU_VERTBUF_DISCARD_SAFE(mb_geom->vbo[MB_NEXT]);
- break;
- }
+ }
+ }
+ if (mb_geom != NULL && mb_geom->use_deform) {
+ if (mb_step == MB_CURR) {
+ /* Modify batch to have data from adjacent frames. */
+ GPUBatch *batch = mb_geom->batch;
+ for (int i = 0; i < MB_CURR; i++) {
+ GPUVertBuf *vbo = mb_geom->vbo[i];
+ if (vbo && batch) {
+ if (GPU_vertbuf_get_vertex_len(vbo) != GPU_vertbuf_get_vertex_len(batch->verts[0])) {
+ /* Vertex count mismatch, disable deform motion blur. */
+ mb_geom->use_deform = false;
+ }
+ if (mb_geom->use_deform == false) {
+ motion_blur_remove_vbo_reference_from_batch(
+ batch, mb_geom->vbo[MB_PREV], mb_geom->vbo[MB_NEXT]);
+ break;
+ }
+ /* Avoid adding the same vbo more than once when the batch is used by multiple
+ * instances. */
+ if (!GPU_batch_vertbuf_has(batch, vbo)) {
+ /* Currently, the code assumes that all objects that share the same mesh in the
+ * current frame also share the same mesh on other frames. */
GPU_batch_vertbuf_add_ex(batch, vbo, false);
}
}
}
- else {
- GPUVertBuf *vbo = mb_geom->vbo[mb_step];
- if (vbo) {
- /* Use the vbo to perform the copy on the GPU. */
- GPU_vertbuf_use(vbo);
- /* Perform a copy to avoid losing it after RE_engine_frame_set(). */
- mb_geom->vbo[mb_step] = vbo = GPU_vertbuf_duplicate(vbo);
+ }
+ else {
+ GPUVertBuf *vbo = mb_geom->vbo[mb_step];
+ if (vbo) {
+ /* Use the vbo to perform the copy on the GPU. */
+ GPU_vertbuf_use(vbo);
+ /* Perform a copy to avoid losing it after RE_engine_frame_set(). */
+ GPUVertBuf **vbo_cache_ptr;
+ if (!BLI_ghash_ensure_p(effects->motion_blur.position_vbo_cache[mb_step],
+ vbo,
+ (void ***)&vbo_cache_ptr)) {
+ /* Duplicate the vbo, otherwise it would be lost when evaluating another frame. */
+ GPUVertBuf *duplicated_vbo = GPU_vertbuf_duplicate(vbo);
+ *vbo_cache_ptr = duplicated_vbo;
/* Find and replace "pos" attrib name. */
- GPUVertFormat *format = (GPUVertFormat *)GPU_vertbuf_get_format(vbo);
+ GPUVertFormat *format = (GPUVertFormat *)GPU_vertbuf_get_format(duplicated_vbo);
int attrib_id = GPU_vertformat_attr_id_get(format, "pos");
GPU_vertformat_attr_rename(format, attrib_id, (mb_step == MB_PREV) ? "prv" : "nxt");
}
- else {
- /* This might happen if the object visibility has been animated. */
- mb_geom->use_deform = false;
- }
+ mb_geom->vbo[mb_step] = vbo = *vbo_cache_ptr;
}
- break;
-
- default:
- BLI_assert(0);
- break;
+ else {
+ /* This might happen if the object visibility has been animated. */
+ mb_geom->use_deform = false;
+ }
+ }
}
}
}
@@ -503,54 +499,62 @@ void EEVEE_motion_blur_swap_data(EEVEE_Data *vedata)
/* Camera Data. */
effects->motion_blur.camera[MB_PREV] = effects->motion_blur.camera[MB_NEXT];
- /* Object Data. */
- for (BLI_ghashIterator_init(&ghi, effects->motion_blur.object);
- BLI_ghashIterator_done(&ghi) == false;
- BLI_ghashIterator_step(&ghi)) {
- EEVEE_ObjectMotionData *mb_data = BLI_ghashIterator_getValue(&ghi);
-
- copy_m4_m4(mb_data->obmat[MB_PREV], mb_data->obmat[MB_NEXT]);
+ /* Swap #position_vbo_cache pointers. */
+ if (effects->motion_blur.position_vbo_cache[MB_PREV]) {
+ BLI_ghash_free(effects->motion_blur.position_vbo_cache[MB_PREV],
+ NULL,
+ (GHashValFreeFP)GPU_vertbuf_discard);
+ }
+ effects->motion_blur.position_vbo_cache[MB_PREV] =
+ effects->motion_blur.position_vbo_cache[MB_NEXT];
+ effects->motion_blur.position_vbo_cache[MB_NEXT] = NULL;
+
+ /* Swap #hair_motion_step_cache pointers. */
+ if (effects->motion_blur.hair_motion_step_cache[MB_PREV]) {
+ BLI_ghash_free(effects->motion_blur.hair_motion_step_cache[MB_PREV],
+ NULL,
+ (GHashValFreeFP)EEVEE_motion_hair_step_free);
}
+ effects->motion_blur.hair_motion_step_cache[MB_PREV] =
+ effects->motion_blur.hair_motion_step_cache[MB_NEXT];
+ effects->motion_blur.hair_motion_step_cache[MB_NEXT] = NULL;
- /* Deformation Data. */
- for (BLI_ghashIterator_init(&ghi, effects->motion_blur.geom);
- BLI_ghashIterator_done(&ghi) == false;
+ /* Rename attributes in #position_vbo_cache. */
+ for (BLI_ghashIterator_init(&ghi, effects->motion_blur.position_vbo_cache[MB_PREV]);
+ !BLI_ghashIterator_done(&ghi);
BLI_ghashIterator_step(&ghi)) {
- EEVEE_GeometryMotionData *mb_geom = BLI_ghashIterator_getValue(&ghi);
- EEVEE_HairMotionData *mb_hair = (EEVEE_HairMotionData *)mb_geom;
+ GPUVertBuf *vbo = BLI_ghashIterator_getValue(&ghi);
+ GPUVertFormat *format = (GPUVertFormat *)GPU_vertbuf_get_format(vbo);
+ int attrib_id = GPU_vertformat_attr_id_get(format, "nxt");
+ GPU_vertformat_attr_rename(format, attrib_id, "prv");
+ }
- switch (mb_geom->type) {
- case EEVEE_MOTION_DATA_HAIR:
- for (int i = 0; i < mb_hair->psys_len; i++) {
- GPU_VERTBUF_DISCARD_SAFE(mb_hair->psys[i].hair_pos[MB_PREV]);
- DRW_TEXTURE_FREE_SAFE(mb_hair->psys[i].hair_pos_tx[MB_PREV]);
- mb_hair->psys[i].hair_pos[MB_PREV] = mb_hair->psys[i].hair_pos[MB_NEXT];
- mb_hair->psys[i].hair_pos_tx[MB_PREV] = mb_hair->psys[i].hair_pos_tx[MB_NEXT];
- mb_hair->psys[i].hair_pos[MB_NEXT] = NULL;
- mb_hair->psys[i].hair_pos_tx[MB_NEXT] = NULL;
- }
- break;
+ /* Object Data. */
+ for (BLI_ghashIterator_init(&ghi, effects->motion_blur.object); !BLI_ghashIterator_done(&ghi);
+ BLI_ghashIterator_step(&ghi)) {
+ EEVEE_ObjectMotionData *mb_data = BLI_ghashIterator_getValue(&ghi);
+ EEVEE_GeometryMotionData *mb_geom = mb_data->geometry_data;
+ EEVEE_HairMotionData *mb_hair = mb_data->hair_data;
- case EEVEE_MOTION_DATA_MESH:
- if (mb_geom->batch != NULL) {
- motion_blur_remove_vbo_reference_from_batch(
- mb_geom->batch, mb_geom->vbo[MB_PREV], mb_geom->vbo[MB_NEXT]);
- }
- GPU_VERTBUF_DISCARD_SAFE(mb_geom->vbo[MB_PREV]);
- mb_geom->vbo[MB_PREV] = mb_geom->vbo[MB_NEXT];
- mb_geom->vbo[MB_NEXT] = NULL;
-
- if (mb_geom->vbo[MB_PREV]) {
- GPUVertBuf *vbo = mb_geom->vbo[MB_PREV];
- GPUVertFormat *format = (GPUVertFormat *)GPU_vertbuf_get_format(vbo);
- int attrib_id = GPU_vertformat_attr_id_get(format, "nxt");
- GPU_vertformat_attr_rename(format, attrib_id, "prv");
- }
- break;
+ copy_m4_m4(mb_data->obmat[MB_PREV], mb_data->obmat[MB_NEXT]);
- default:
- BLI_assert(0);
- break;
+ if (mb_hair != NULL) {
+ for (int i = 0; i < mb_hair->psys_len; i++) {
+ mb_hair->psys[i].step_data[MB_PREV].hair_pos =
+ mb_hair->psys[i].step_data[MB_NEXT].hair_pos;
+ mb_hair->psys[i].step_data[MB_PREV].hair_pos_tx =
+ mb_hair->psys[i].step_data[MB_NEXT].hair_pos_tx;
+ mb_hair->psys[i].step_data[MB_NEXT].hair_pos = NULL;
+ mb_hair->psys[i].step_data[MB_NEXT].hair_pos_tx = NULL;
+ }
+ }
+ if (mb_geom != NULL) {
+ if (mb_geom->batch != NULL) {
+ motion_blur_remove_vbo_reference_from_batch(
+ mb_geom->batch, mb_geom->vbo[MB_PREV], mb_geom->vbo[MB_NEXT]);
+ }
+ mb_geom->vbo[MB_PREV] = mb_geom->vbo[MB_NEXT];
+ mb_geom->vbo[MB_NEXT] = NULL;
}
}
}
diff --git a/source/blender/draw/engines/eevee/eevee_private.h b/source/blender/draw/engines/eevee/eevee_private.h
index e8828cc7494..2518ee53da3 100644
--- a/source/blender/draw/engines/eevee/eevee_private.h
+++ b/source/blender/draw/engines/eevee/eevee_private.h
@@ -618,8 +618,23 @@ enum {
#define MB_CURR 2
typedef struct EEVEE_MotionBlurData {
+ /**
+ * Maps #EEVEE_ObjectKey to #EEVEE_ObjectMotionData.
+ */
struct GHash *object;
- struct GHash *geom;
+ /**
+ * Maps original #GPUVertBuf to duplicated #GPUVertBuf.
+ * There are two maps for #MB_PREV and #MB_NEXT.
+ * Only the values are owned.
+ */
+ struct GHash *position_vbo_cache[2];
+ /**
+ * Maps original #GPUVertBuf to #EEVEE_HairMotionStepData.
+ * There are two maps for #MB_PREV and #MB_NEXT.
+ * Only the values are owned.
+ */
+ struct GHash *hair_motion_step_cache[2];
+
struct {
float viewmat[4][4];
float persmat[4][4];
@@ -637,15 +652,16 @@ typedef struct EEVEE_ObjectKey {
int id[8]; /* MAX_DUPLI_RECUR */
} EEVEE_ObjectKey;
-typedef struct EEVEE_ObjectMotionData {
- float obmat[3][4][4];
-} EEVEE_ObjectMotionData;
-
typedef enum eEEVEEMotionData {
EEVEE_MOTION_DATA_MESH = 0,
EEVEE_MOTION_DATA_HAIR,
} eEEVEEMotionData;
+typedef struct EEVEE_HairMotionStepData {
+ struct GPUVertBuf *hair_pos;
+ struct GPUTexture *hair_pos_tx;
+} EEVEE_HairMotionStepData;
+
typedef struct EEVEE_HairMotionData {
/** Needs to be first to ensure casting. */
eEEVEEMotionData type;
@@ -653,8 +669,8 @@ typedef struct EEVEE_HairMotionData {
/** Allocator will alloc enough slot for all particle systems. Or 1 if it's a hair object. */
int psys_len;
struct {
- struct GPUVertBuf *hair_pos[2]; /* Position buffer for time = t +/- step. */
- struct GPUTexture *hair_pos_tx[2]; /* Buffer Texture of the corresponding VBO. */
+ /* The vbos and textures are not owned. */
+ EEVEE_HairMotionStepData step_data[2]; /* Data for time = t +/- step. */
} psys[0];
} EEVEE_HairMotionData;
@@ -664,10 +680,18 @@ typedef struct EEVEE_GeometryMotionData {
/** To disable deform mb if vertcount mismatch. */
int use_deform;
+ /* The batch and vbos are not owned. */
struct GPUBatch *batch; /* Batch for time = t. */
struct GPUVertBuf *vbo[2]; /* VBO for time = t +/- step. */
} EEVEE_GeometryMotionData;
+typedef struct EEVEE_ObjectMotionData {
+ float obmat[3][4][4];
+
+ EEVEE_GeometryMotionData *geometry_data;
+ EEVEE_HairMotionData *hair_data;
+} EEVEE_ObjectMotionData;
+
/* ************ EFFECTS DATA ************* */
typedef enum EEVEE_EffectsFlag {
@@ -1062,17 +1086,15 @@ typedef struct EEVEE_PrivateData {
void EEVEE_motion_blur_data_init(EEVEE_MotionBlurData *mb);
void EEVEE_motion_blur_data_free(EEVEE_MotionBlurData *mb);
void EEVEE_view_layer_data_free(void *storage);
+void EEVEE_motion_hair_step_free(EEVEE_HairMotionStepData *step_data);
EEVEE_ViewLayerData *EEVEE_view_layer_data_get(void);
EEVEE_ViewLayerData *EEVEE_view_layer_data_ensure_ex(struct ViewLayer *view_layer);
EEVEE_ViewLayerData *EEVEE_view_layer_data_ensure(void);
EEVEE_ObjectEngineData *EEVEE_object_data_get(Object *ob);
EEVEE_ObjectEngineData *EEVEE_object_data_ensure(Object *ob);
-EEVEE_ObjectMotionData *EEVEE_motion_blur_object_data_get(EEVEE_MotionBlurData *mb,
- Object *ob,
- bool hair);
-EEVEE_GeometryMotionData *EEVEE_motion_blur_geometry_data_get(EEVEE_MotionBlurData *mb,
- Object *ob);
-EEVEE_HairMotionData *EEVEE_motion_blur_hair_data_get(EEVEE_MotionBlurData *mb, Object *ob);
+EEVEE_ObjectMotionData *EEVEE_motion_blur_object_data_get(EEVEE_MotionBlurData *mb, Object *ob);
+EEVEE_GeometryMotionData *EEVEE_motion_blur_geometry_data_get(EEVEE_ObjectMotionData *mb_data);
+EEVEE_HairMotionData *EEVEE_motion_blur_hair_data_get(EEVEE_ObjectMotionData *mb_data, Object *ob);
EEVEE_LightProbeEngineData *EEVEE_lightprobe_data_get(Object *ob);
EEVEE_LightProbeEngineData *EEVEE_lightprobe_data_ensure(Object *ob);
EEVEE_LightEngineData *EEVEE_light_data_get(Object *ob);
diff --git a/source/blender/draw/engines/eevee/eevee_shadows.c b/source/blender/draw/engines/eevee/eevee_shadows.c
index b5e9a296c16..29d98f6795d 100644
--- a/source/blender/draw/engines/eevee/eevee_shadows.c
+++ b/source/blender/draw/engines/eevee/eevee_shadows.c
@@ -273,11 +273,9 @@ void EEVEE_shadows_update(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
/* Resize shcasters buffers if too big. */
if (frontbuffer->alloc_count - frontbuffer->count > SH_CASTER_ALLOC_CHUNK) {
- frontbuffer->alloc_count = (frontbuffer->count / SH_CASTER_ALLOC_CHUNK) *
+ frontbuffer->alloc_count = divide_ceil_u(max_ii(1, frontbuffer->count),
+ SH_CASTER_ALLOC_CHUNK) *
SH_CASTER_ALLOC_CHUNK;
- frontbuffer->alloc_count += (frontbuffer->count % SH_CASTER_ALLOC_CHUNK != 0) ?
- SH_CASTER_ALLOC_CHUNK :
- 0;
frontbuffer->bbox = MEM_reallocN(frontbuffer->bbox,
sizeof(EEVEE_BoundBox) * frontbuffer->alloc_count);
BLI_BITMAP_RESIZE(frontbuffer->update, frontbuffer->alloc_count);
diff --git a/source/blender/draw/engines/image/image_buffer_cache.hh b/source/blender/draw/engines/image/image_buffer_cache.hh
new file mode 100644
index 00000000000..ef11551c879
--- /dev/null
+++ b/source/blender/draw/engines/image/image_buffer_cache.hh
@@ -0,0 +1,131 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright 2022, Blender Foundation.
+ */
+
+/** \file
+ * \ingroup draw_engine
+ */
+
+#pragma once
+
+#include "BLI_vector.hh"
+
+#include "IMB_imbuf.h"
+#include "IMB_imbuf_types.h"
+
+struct FloatImageBuffer {
+ ImBuf *source_buffer = nullptr;
+ ImBuf *float_buffer = nullptr;
+ bool is_used = true;
+
+ FloatImageBuffer(ImBuf *source_buffer, ImBuf *float_buffer)
+ : source_buffer(source_buffer), float_buffer(float_buffer)
+ {
+ }
+
+ FloatImageBuffer(FloatImageBuffer &&other) noexcept
+ {
+ source_buffer = other.source_buffer;
+ float_buffer = other.float_buffer;
+ is_used = other.is_used;
+ other.source_buffer = nullptr;
+ other.float_buffer = nullptr;
+ }
+
+ virtual ~FloatImageBuffer()
+ {
+ IMB_freeImBuf(float_buffer);
+ float_buffer = nullptr;
+ source_buffer = nullptr;
+ }
+
+ FloatImageBuffer &operator=(FloatImageBuffer &&other) noexcept
+ {
+ this->source_buffer = other.source_buffer;
+ this->float_buffer = other.float_buffer;
+ is_used = other.is_used;
+ other.source_buffer = nullptr;
+ other.float_buffer = nullptr;
+ return *this;
+ }
+};
+
+struct FloatBufferCache {
+ private:
+ blender::Vector<FloatImageBuffer> cache_;
+
+ public:
+ ImBuf *ensure_float_buffer(ImBuf *image_buffer)
+ {
+ /* Check if we can use the float buffer of the given image_buffer. */
+ if (image_buffer->rect_float != nullptr) {
+ return image_buffer;
+ }
+
+ /* Do we have a cached float buffer. */
+ for (FloatImageBuffer &item : cache_) {
+ if (item.source_buffer == image_buffer) {
+ item.is_used = true;
+ return item.float_buffer;
+ }
+ }
+
+ /* Generate a new float buffer. */
+ IMB_float_from_rect(image_buffer);
+ ImBuf *new_imbuf = IMB_allocImBuf(image_buffer->x, image_buffer->y, image_buffer->planes, 0);
+ new_imbuf->rect_float = image_buffer->rect_float;
+ new_imbuf->flags |= IB_rectfloat;
+ new_imbuf->mall |= IB_rectfloat;
+ image_buffer->rect_float = nullptr;
+ image_buffer->flags &= ~IB_rectfloat;
+ image_buffer->mall &= ~IB_rectfloat;
+
+ cache_.append(FloatImageBuffer(image_buffer, new_imbuf));
+ return new_imbuf;
+ }
+
+ void reset_usage_flags()
+ {
+ for (FloatImageBuffer &buffer : cache_) {
+ buffer.is_used = false;
+ }
+ }
+
+ void mark_used(const ImBuf *image_buffer)
+ {
+ for (FloatImageBuffer &item : cache_) {
+ if (item.source_buffer == image_buffer) {
+ item.is_used = true;
+ return;
+ }
+ }
+ }
+
+ void remove_unused_buffers()
+ {
+ for (int64_t i = cache_.size() - 1; i >= 0; i--) {
+ if (!cache_[i].is_used) {
+ cache_.remove_and_reorder(i);
+ }
+ }
+ }
+
+ void clear()
+ {
+ cache_.clear();
+ }
+};
diff --git a/source/blender/draw/engines/image/image_drawing_mode.hh b/source/blender/draw/engines/image/image_drawing_mode.hh
index b3d6c3abd18..c091f800d95 100644
--- a/source/blender/draw/engines/image/image_drawing_mode.hh
+++ b/source/blender/draw/engines/image/image_drawing_mode.hh
@@ -157,6 +157,7 @@ template<typename TextureMethod> class ScreenSpaceDrawingMode : public AbstractD
if (tile_buffer == nullptr) {
continue;
}
+ instance_data.float_buffers.mark_used(tile_buffer);
BKE_image_release_ibuf(image, tile_buffer, lock);
DRWShadingGroup *shsub = DRW_shgroup_create_sub(shgrp);
@@ -184,12 +185,14 @@ template<typename TextureMethod> class ScreenSpaceDrawingMode : public AbstractD
switch (changes.get_result_code()) {
case ePartialUpdateCollectResult::FullUpdateNeeded:
instance_data.mark_all_texture_slots_dirty();
+ instance_data.float_buffers.clear();
break;
case ePartialUpdateCollectResult::NoChangesDetected:
break;
case ePartialUpdateCollectResult::PartialChangesDetected:
/* Partial update when wrap repeat is enabled is not supported. */
if (instance_data.flags.do_tile_drawing) {
+ instance_data.float_buffers.clear();
instance_data.mark_all_texture_slots_dirty();
}
else {
@@ -200,6 +203,34 @@ template<typename TextureMethod> class ScreenSpaceDrawingMode : public AbstractD
do_full_update_for_dirty_textures(instance_data, image_user);
}
+ /**
+ * Update the float buffer in the region given by the partial update checker.
+ */
+ void do_partial_update_float_buffer(
+ ImBuf *float_buffer, PartialUpdateChecker<ImageTileData>::CollectResult &iterator) const
+ {
+ ImBuf *src = iterator.tile_data.tile_buffer;
+ BLI_assert(float_buffer->rect_float != nullptr);
+ BLI_assert(float_buffer->rect == nullptr);
+ BLI_assert(src->rect_float == nullptr);
+ BLI_assert(src->rect != nullptr);
+
+ /* Calculate the overlap between the updated region and the buffer size. Partial Update Checker
+ * always returns a tile (256x256). Which could lay partially outside the buffer when using
+ * different resolutions.
+ */
+ rcti buffer_rect;
+ BLI_rcti_init(&buffer_rect, 0, float_buffer->x, 0, float_buffer->y);
+ rcti clipped_update_region;
+ const bool has_overlap = BLI_rcti_isect(
+ &buffer_rect, &iterator.changed_region.region, &clipped_update_region);
+ if (!has_overlap) {
+ return;
+ }
+
+ IMB_float_from_rect_ex(float_buffer, src, &clipped_update_region);
+ }
+
void do_partial_update(PartialUpdateChecker<ImageTileData>::CollectResult &iterator,
IMAGE_InstanceData &instance_data) const
{
@@ -208,7 +239,11 @@ template<typename TextureMethod> class ScreenSpaceDrawingMode : public AbstractD
if (iterator.tile_data.tile_buffer == nullptr) {
continue;
}
- ensure_float_buffer(*iterator.tile_data.tile_buffer);
+ ImBuf *tile_buffer = ensure_float_buffer(instance_data, iterator.tile_data.tile_buffer);
+ if (tile_buffer != iterator.tile_data.tile_buffer) {
+ do_partial_update_float_buffer(tile_buffer, iterator);
+ }
+
const float tile_width = static_cast<float>(iterator.tile_data.tile_buffer->x);
const float tile_height = static_cast<float>(iterator.tile_data.tile_buffer->y);
@@ -283,7 +318,6 @@ template<typename TextureMethod> class ScreenSpaceDrawingMode : public AbstractD
&extracted_buffer, texture_region_width, texture_region_height, 32, IB_rectfloat);
int offset = 0;
- ImBuf *tile_buffer = iterator.tile_data.tile_buffer;
for (int y = gpu_texture_region_to_update.ymin; y < gpu_texture_region_to_update.ymax;
y++) {
float yf = y / (float)texture_height;
@@ -372,16 +406,12 @@ template<typename TextureMethod> class ScreenSpaceDrawingMode : public AbstractD
* rect_float as the reference-counter isn't 0. To work around this we destruct any created local
* buffers ourself.
*/
- bool ensure_float_buffer(ImBuf &image_buffer) const
+ ImBuf *ensure_float_buffer(IMAGE_InstanceData &instance_data, ImBuf *image_buffer) const
{
- if (image_buffer.rect_float == nullptr) {
- IMB_float_from_rect(&image_buffer);
- return true;
- }
- return false;
+ return instance_data.float_buffers.ensure_float_buffer(image_buffer);
}
- void do_full_update_texture_slot(const IMAGE_InstanceData &instance_data,
+ void do_full_update_texture_slot(IMAGE_InstanceData &instance_data,
const TextureInfo &texture_info,
ImBuf &texture_buffer,
ImBuf &tile_buffer,
@@ -389,10 +419,7 @@ template<typename TextureMethod> class ScreenSpaceDrawingMode : public AbstractD
{
const int texture_width = texture_buffer.x;
const int texture_height = texture_buffer.y;
- const bool float_buffer_created = ensure_float_buffer(tile_buffer);
- /* TODO(jbakker): Find leak when rendering VSE and don't free here. */
- const bool do_free_float_buffer = float_buffer_created &&
- instance_data.image->type == IMA_TYPE_R_RESULT;
+ ImBuf *float_tile_buffer = ensure_float_buffer(instance_data, &tile_buffer);
/* IMB_transform works in a non-consistent space. This should be documented or fixed!.
* Construct a variant of the info_uv_to_texture that adds the texel space
@@ -423,16 +450,12 @@ template<typename TextureMethod> class ScreenSpaceDrawingMode : public AbstractD
transform_mode = IMB_TRANSFORM_MODE_CROP_SRC;
}
- IMB_transform(&tile_buffer,
+ IMB_transform(float_tile_buffer,
&texture_buffer,
transform_mode,
IMB_FILTER_NEAREST,
uv_to_texel,
crop_rect_ptr);
-
- if (do_free_float_buffer) {
- imb_freerectfloatImBuf(&tile_buffer);
- }
}
public:
@@ -451,6 +474,7 @@ template<typename TextureMethod> class ScreenSpaceDrawingMode : public AbstractD
instance_data->partial_update.ensure_image(image);
instance_data->clear_dirty_flag();
+ instance_data->float_buffers.reset_usage_flags();
/* Step: Find out which screen space textures are needed to draw on the screen. Remove the
* screen space textures that aren't needed. */
@@ -459,7 +483,7 @@ template<typename TextureMethod> class ScreenSpaceDrawingMode : public AbstractD
method.update_screen_uv_bounds();
/* Check for changes in the image user compared to the last time. */
- instance_data->update_image_user(iuser);
+ instance_data->update_image_usage(iuser);
/* Step: Update the GPU textures based on the changes in the image. */
instance_data->update_gpu_texture_allocations();
@@ -467,12 +491,16 @@ template<typename TextureMethod> class ScreenSpaceDrawingMode : public AbstractD
/* Step: Add the GPU textures to the shgroup. */
instance_data->update_batches();
- add_depth_shgroups(*instance_data, image, iuser);
+ if (!instance_data->flags.do_tile_drawing) {
+ add_depth_shgroups(*instance_data, image, iuser);
+ }
add_shgroups(instance_data);
}
- void draw_finish(IMAGE_Data *UNUSED(vedata)) const override
+ void draw_finish(IMAGE_Data *vedata) const override
{
+ IMAGE_InstanceData *instance_data = vedata->instance_data;
+ instance_data->float_buffers.remove_unused_buffers();
}
void draw_scene(IMAGE_Data *vedata) const override
@@ -481,8 +509,10 @@ template<typename TextureMethod> class ScreenSpaceDrawingMode : public AbstractD
DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
GPU_framebuffer_bind(dfbl->default_fb);
+
static float clear_col[4] = {0.0f, 0.0f, 0.0f, 0.0f};
- GPU_framebuffer_clear_color_depth(dfbl->default_fb, clear_col, 1.0);
+ float clear_depth = instance_data->flags.do_tile_drawing ? 0.75 : 1.0f;
+ GPU_framebuffer_clear_color_depth(dfbl->default_fb, clear_col, clear_depth);
DRW_view_set_active(instance_data->view);
DRW_draw_pass(instance_data->passes.depth_pass);
diff --git a/source/blender/draw/engines/image/image_engine.cc b/source/blender/draw/engines/image/image_engine.cc
index 180e9601cbd..e972d21cda4 100644
--- a/source/blender/draw/engines/image/image_engine.cc
+++ b/source/blender/draw/engines/image/image_engine.cc
@@ -107,6 +107,7 @@ class ImageEngine {
space->release_buffer(instance_data->image, image_buffer, lock);
ImageUser *iuser = space->get_image_user();
+ BKE_image_multiview_index(instance_data->image, iuser);
drawing_mode.cache_image(vedata, instance_data->image, iuser);
}
diff --git a/source/blender/draw/engines/image/image_instance_data.hh b/source/blender/draw/engines/image/image_instance_data.hh
index dcc3b7d15cb..682b93a80b3 100644
--- a/source/blender/draw/engines/image/image_instance_data.hh
+++ b/source/blender/draw/engines/image/image_instance_data.hh
@@ -8,10 +8,12 @@
#pragma once
#include "image_batches.hh"
+#include "image_buffer_cache.hh"
#include "image_partial_updater.hh"
#include "image_private.hh"
#include "image_shader_params.hh"
#include "image_texture_info.hh"
+#include "image_usage.hh"
#include "image_wrappers.hh"
#include "DRW_render.h"
@@ -25,8 +27,8 @@ constexpr int SCREEN_SPACE_DRAWING_MODE_TEXTURE_LEN = 1;
struct IMAGE_InstanceData {
struct Image *image;
- /** Copy of the last image user to detect iuser differences that require a full update. */
- struct ImageUser last_image_user;
+ /** Usage data of the previous time, to identify changes that require a full update. */
+ ImageUsage last_usage;
PartialImageUpdater partial_update;
@@ -47,11 +49,18 @@ struct IMAGE_InstanceData {
DRWPass *depth_pass;
} passes;
+ /**
+ * Cache containing the float buffers when drawing byte images.
+ */
+ FloatBufferCache float_buffers;
+
/** \brief Transform matrix to convert a normalized screen space coordinates to texture space. */
float ss_to_texture[4][4];
TextureInfo texture_infos[SCREEN_SPACE_DRAWING_MODE_TEXTURE_LEN];
public:
+ virtual ~IMAGE_InstanceData() = default;
+
void clear_dirty_flag()
{
reset_dirty_flag(false);
@@ -95,24 +104,13 @@ struct IMAGE_InstanceData {
}
}
- void update_image_user(const ImageUser *image_user)
+ void update_image_usage(const ImageUser *image_user)
{
- short requested_pass = image_user ? image_user->pass : 0;
- short requested_layer = image_user ? image_user->layer : 0;
- short requested_view = image_user ? image_user->multi_index : 0;
- /* There is room for 2 multiview textures. When a higher number is requested we should always
- * target the first view slot. This is fine as multi view images aren't used together. */
- if (requested_view > 1) {
- requested_view = 0;
- }
-
- if (last_image_user.pass != requested_pass || last_image_user.layer != requested_layer ||
- last_image_user.multi_index != requested_view) {
-
- last_image_user.pass = requested_pass;
- last_image_user.layer = requested_layer;
- last_image_user.multi_index = requested_view;
+ ImageUsage usage(image, image_user, flags.do_tile_drawing);
+ if (last_usage != usage) {
+ last_usage = usage;
reset_dirty_flag(true);
+ float_buffers.clear();
}
}
diff --git a/source/blender/draw/engines/image/image_usage.hh b/source/blender/draw/engines/image/image_usage.hh
new file mode 100644
index 00000000000..bea5c3853b0
--- /dev/null
+++ b/source/blender/draw/engines/image/image_usage.hh
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright 2022 Blender Foundation. */
+
+/** \file
+ * \ingroup draw_engine
+ */
+
+#pragma once
+
+/**
+ * ImageUsage contains data of the image and image user to identify changes that require a rebuild
+ * the texture slots.
+ */
+struct ImageUsage {
+ /** Render pass of the image that is used. */
+ short pass = 0;
+ /** Layer of the image that is used.*/
+ short layer = 0;
+ /** View of the image that is used. */
+ short view = 0;
+
+ ColorManagedColorspaceSettings colorspace_settings;
+ /** IMA_ALPHA_* */
+ char alpha_mode;
+ bool last_tile_drawing;
+
+ const void *last_image = nullptr;
+
+ ImageUsage() = default;
+ ImageUsage(const struct Image *image, const struct ImageUser *image_user, bool do_tile_drawing)
+ {
+ pass = image_user ? image_user->pass : 0;
+ layer = image_user ? image_user->layer : 0;
+ view = image_user ? image_user->multi_index : 0;
+ colorspace_settings = image->colorspace_settings;
+ alpha_mode = image->alpha_mode;
+ last_image = static_cast<const void *>(image);
+ last_tile_drawing = do_tile_drawing;
+ }
+
+ bool operator==(const ImageUsage &other) const
+ {
+ return memcmp(this, &other, sizeof(ImageUsage)) == 0;
+ }
+ bool operator!=(const ImageUsage &other) const
+ {
+ return !(*this == other);
+ }
+};
diff --git a/source/blender/draw/engines/overlay/overlay_engine.c b/source/blender/draw/engines/overlay/overlay_engine.c
index b41d9ce69ef..ad0d939e99a 100644
--- a/source/blender/draw/engines/overlay/overlay_engine.c
+++ b/source/blender/draw/engines/overlay/overlay_engine.c
@@ -182,7 +182,9 @@ static void OVERLAY_cache_init(void *vedata)
case CTX_MODE_WEIGHT_GPENCIL:
OVERLAY_edit_gpencil_cache_init(vedata);
break;
+ case CTX_MODE_SCULPT_CURVES:
case CTX_MODE_OBJECT:
+ case CTX_MODE_EDIT_CURVES:
break;
default:
BLI_assert_msg(0, "Draw mode invalid");
@@ -210,7 +212,7 @@ BLI_INLINE OVERLAY_DupliData *OVERLAY_duplidata_get(Object *ob, void *vedata, bo
{
OVERLAY_DupliData **dupli_data = (OVERLAY_DupliData **)DRW_duplidata_get(vedata);
*do_init = false;
- if (!ELEM(ob->type, OB_MESH, OB_SURF, OB_LATTICE, OB_CURVE, OB_FONT)) {
+ if (!ELEM(ob->type, OB_MESH, OB_SURF, OB_LATTICE, OB_CURVES_LEGACY, OB_FONT)) {
return NULL;
}
@@ -237,7 +239,7 @@ static bool overlay_object_is_edit_mode(const OVERLAY_PrivateData *pd, const Obj
return pd->ctx_mode == CTX_MODE_EDIT_MESH;
case OB_ARMATURE:
return pd->ctx_mode == CTX_MODE_EDIT_ARMATURE;
- case OB_CURVE:
+ case OB_CURVES_LEGACY:
return pd->ctx_mode == CTX_MODE_EDIT_CURVE;
case OB_SURF:
return pd->ctx_mode == CTX_MODE_EDIT_SURFACE;
@@ -296,7 +298,7 @@ static void OVERLAY_cache_populate(void *vedata, Object *ob)
(ob->sculpt->mode_type == OB_MODE_SCULPT);
const bool has_surface = ELEM(ob->type,
OB_MESH,
- OB_CURVE,
+ OB_CURVES_LEGACY,
OB_SURF,
OB_MBALL,
OB_FONT,
@@ -366,7 +368,7 @@ static void OVERLAY_cache_populate(void *vedata, Object *ob)
OVERLAY_edit_armature_cache_populate(vedata, ob);
}
break;
- case OB_CURVE:
+ case OB_CURVES_LEGACY:
OVERLAY_edit_curve_cache_populate(vedata, ob);
break;
case OB_SURF:
@@ -661,6 +663,8 @@ static void OVERLAY_draw_scene(void *vedata)
case CTX_MODE_WEIGHT_GPENCIL:
OVERLAY_edit_gpencil_draw(vedata);
break;
+ case CTX_MODE_SCULPT_CURVES:
+ break;
default:
break;
}
diff --git a/source/blender/draw/engines/overlay/overlay_extra.c b/source/blender/draw/engines/overlay/overlay_extra.c
index e370873c234..aae12e5513e 100644
--- a/source/blender/draw/engines/overlay/overlay_extra.c
+++ b/source/blender/draw/engines/overlay/overlay_extra.c
@@ -456,7 +456,7 @@ static void OVERLAY_texture_space(OVERLAY_ExtraCallBuffers *cb, Object *ob, cons
case ID_ME:
BKE_mesh_texspace_get_reference((Mesh *)ob_data, NULL, &texcoloc, &texcosize);
break;
- case ID_CU: {
+ case ID_CU_LEGACY: {
Curve *cu = (Curve *)ob_data;
BKE_curve_texspace_ensure(cu);
texcoloc = cu->loc;
@@ -499,7 +499,7 @@ static void OVERLAY_forcefield(OVERLAY_ExtraCallBuffers *cb, Object *ob, ViewLay
int theme_id = DRW_object_wire_theme_get(ob, view_layer, NULL);
float *color = DRW_color_background_blend_get(theme_id);
PartDeflect *pd = ob->pd;
- Curve *cu = (ob->type == OB_CURVE) ? ob->data : NULL;
+ Curve *cu = (ob->type == OB_CURVES_LEGACY) ? ob->data : NULL;
union {
float mat[4][4];
diff --git a/source/blender/draw/engines/overlay/overlay_motion_path.c b/source/blender/draw/engines/overlay/overlay_motion_path.c
index 58825923f37..aeba721e7ac 100644
--- a/source/blender/draw/engines/overlay/overlay_motion_path.c
+++ b/source/blender/draw/engines/overlay/overlay_motion_path.c
@@ -90,8 +90,8 @@ static void motion_path_get_frame_range_to_draw(bAnimVizSettings *avs,
end = current_frame + avs->path_ac + 1;
}
else {
- start = avs->path_sf;
- end = avs->path_ef;
+ start = mpath->start_frame;
+ end = mpath->end_frame;
}
if (start > end) {
diff --git a/source/blender/draw/engines/overlay/overlay_wireframe.c b/source/blender/draw/engines/overlay/overlay_wireframe.c
index 24eceb30441..2636d7876d5 100644
--- a/source/blender/draw/engines/overlay/overlay_wireframe.c
+++ b/source/blender/draw/engines/overlay/overlay_wireframe.c
@@ -196,14 +196,14 @@ void OVERLAY_wireframe_cache_populate(OVERLAY_Data *vedata,
}
}
- if (ELEM(ob->type, OB_CURVE, OB_FONT, OB_SURF)) {
+ if (ELEM(ob->type, OB_CURVES_LEGACY, OB_FONT, OB_SURF)) {
OVERLAY_ExtraCallBuffers *cb = OVERLAY_extra_call_buffer_get(vedata, ob);
float *color;
DRW_object_wire_theme_get(ob, draw_ctx->view_layer, &color);
struct GPUBatch *geom = NULL;
switch (ob->type) {
- case OB_CURVE:
+ case OB_CURVES_LEGACY:
geom = DRW_cache_curve_edge_wire_get(ob);
break;
case OB_FONT:
diff --git a/source/blender/draw/engines/overlay/shaders/paint_texture_frag.glsl b/source/blender/draw/engines/overlay/shaders/paint_texture_frag.glsl
index 4d0692039a4..ebaa898429d 100644
--- a/source/blender/draw/engines/overlay/shaders/paint_texture_frag.glsl
+++ b/source/blender/draw/engines/overlay/shaders/paint_texture_frag.glsl
@@ -15,7 +15,7 @@ void main()
if (maskInvertStencil) {
mask.rgb = 1.0 - mask.rgb;
}
- float mask_step = smoothstep(0, 3.0, mask.r + mask.g + mask.b);
+ float mask_step = smoothstep(0.0, 3.0, mask.r + mask.g + mask.b);
mask.rgb *= maskColor;
mask.a = mask_step * opacity;
diff --git a/source/blender/draw/engines/select/select_draw_utils.c b/source/blender/draw/engines/select/select_draw_utils.c
index 82812ef98a5..7615b5bb39c 100644
--- a/source/blender/draw/engines/select/select_draw_utils.c
+++ b/source/blender/draw/engines/select/select_draw_utils.c
@@ -225,7 +225,7 @@ void select_id_draw_object(void *vedata,
stl, ob, select_mode, initial_offset, r_vert_offset, r_edge_offset, r_face_offset);
}
break;
- case OB_CURVE:
+ case OB_CURVES_LEGACY:
case OB_SURF:
break;
}
diff --git a/source/blender/draw/engines/workbench/workbench_opaque.c b/source/blender/draw/engines/workbench/workbench_opaque.c
index 5e12d6a736c..191a2e6d1cc 100644
--- a/source/blender/draw/engines/workbench/workbench_opaque.c
+++ b/source/blender/draw/engines/workbench/workbench_opaque.c
@@ -73,11 +73,13 @@ void workbench_opaque_cache_init(WORKBENCH_Data *vedata)
sh = workbench_shader_opaque_get(wpd, data);
wpd->prepass[opaque][infront][data].common_shgrp = grp = DRW_shgroup_create(sh, pass);
+ DRW_shgroup_uniform_block(grp, "world_data", wpd->world_ubo);
DRW_shgroup_uniform_block(grp, "materials_data", wpd->material_ubo_curr);
DRW_shgroup_uniform_int_copy(grp, "materialIndex", -1);
DRW_shgroup_uniform_bool_copy(grp, "useMatcap", use_matcap);
wpd->prepass[opaque][infront][data].vcol_shgrp = grp = DRW_shgroup_create(sh, pass);
+ DRW_shgroup_uniform_block(grp, "world_data", wpd->world_ubo);
DRW_shgroup_uniform_block(grp, "materials_data", wpd->material_ubo_curr);
DRW_shgroup_uniform_int_copy(grp, "materialIndex", 0); /* Default material. (uses vcol) */
DRW_shgroup_uniform_bool_copy(grp, "useMatcap", use_matcap);
@@ -85,6 +87,7 @@ void workbench_opaque_cache_init(WORKBENCH_Data *vedata)
sh = workbench_shader_opaque_image_get(wpd, data, false);
wpd->prepass[opaque][infront][data].image_shgrp = grp = DRW_shgroup_create(sh, pass);
+ DRW_shgroup_uniform_block(grp, "world_data", wpd->world_ubo);
DRW_shgroup_uniform_block(grp, "materials_data", wpd->material_ubo_curr);
DRW_shgroup_uniform_int_copy(grp, "materialIndex", 0); /* Default material. */
DRW_shgroup_uniform_bool_copy(grp, "useMatcap", use_matcap);
@@ -92,6 +95,7 @@ void workbench_opaque_cache_init(WORKBENCH_Data *vedata)
sh = workbench_shader_opaque_image_get(wpd, data, true);
wpd->prepass[opaque][infront][data].image_tiled_shgrp = grp = DRW_shgroup_create(sh, pass);
+ DRW_shgroup_uniform_block(grp, "world_data", wpd->world_ubo);
DRW_shgroup_uniform_block(grp, "materials_data", wpd->material_ubo_curr);
DRW_shgroup_uniform_int_copy(grp, "materialIndex", 0); /* Default material. */
DRW_shgroup_uniform_bool_copy(grp, "useMatcap", use_matcap);
diff --git a/source/blender/draw/intern/DRW_gpu_wrapper.hh b/source/blender/draw/intern/DRW_gpu_wrapper.hh
index f387d5371b5..bce001659b2 100644
--- a/source/blender/draw/intern/DRW_gpu_wrapper.hh
+++ b/source/blender/draw/intern/DRW_gpu_wrapper.hh
@@ -641,11 +641,6 @@ class Texture : NonCopyable {
}
if (tx_ == nullptr) {
tx_ = create(w, h, d, mips, format, data, layered, cubemap);
- if (mips > 1) {
- /* TODO(@fclem): Remove once we have immutable storage or when mips are
- * generated on creation. */
- GPU_texture_generate_mipmap(tx_);
- }
return true;
}
return false;
diff --git a/source/blender/draw/intern/draw_cache.c b/source/blender/draw/intern/draw_cache.c
index ce8d3136432..8fc97ddcfc2 100644
--- a/source/blender/draw/intern/draw_cache.c
+++ b/source/blender/draw/intern/draw_cache.c
@@ -813,7 +813,7 @@ GPUBatch *DRW_cache_object_edge_detection_get(Object *ob, bool *r_is_manifold)
switch (ob->type) {
case OB_MESH:
return DRW_cache_mesh_edge_detection_get(ob, r_is_manifold);
- case OB_CURVE:
+ case OB_CURVES_LEGACY:
return NULL;
case OB_SURF:
return DRW_cache_surf_edge_detection_get(ob, r_is_manifold);
@@ -837,7 +837,7 @@ GPUBatch *DRW_cache_object_face_wireframe_get(Object *ob)
switch (ob->type) {
case OB_MESH:
return DRW_cache_mesh_face_wireframe_get(ob);
- case OB_CURVE:
+ case OB_CURVES_LEGACY:
return NULL;
case OB_SURF:
return DRW_cache_surf_face_wireframe_get(ob);
@@ -864,7 +864,7 @@ GPUBatch *DRW_cache_object_loose_edges_get(struct Object *ob)
switch (ob->type) {
case OB_MESH:
return DRW_cache_mesh_loose_edges_get(ob);
- case OB_CURVE:
+ case OB_CURVES_LEGACY:
return NULL;
case OB_SURF:
return DRW_cache_surf_loose_edges_get(ob);
@@ -888,7 +888,7 @@ GPUBatch *DRW_cache_object_surface_get(Object *ob)
switch (ob->type) {
case OB_MESH:
return DRW_cache_mesh_surface_get(ob);
- case OB_CURVE:
+ case OB_CURVES_LEGACY:
return NULL;
case OB_SURF:
return DRW_cache_surf_surface_get(ob);
@@ -915,7 +915,7 @@ GPUVertBuf *DRW_cache_object_pos_vertbuf_get(Object *ob)
switch (type) {
case OB_MESH:
return DRW_mesh_batch_cache_pos_vertbuf_get((me != NULL) ? me : ob->data);
- case OB_CURVE:
+ case OB_CURVES_LEGACY:
case OB_SURF:
case OB_FONT:
return DRW_curve_batch_cache_pos_vertbuf_get(ob->data);
@@ -947,7 +947,7 @@ int DRW_cache_object_material_count_get(struct Object *ob)
switch (type) {
case OB_MESH:
return DRW_mesh_material_count_get(ob, (me != NULL) ? me : ob->data);
- case OB_CURVE:
+ case OB_CURVES_LEGACY:
case OB_SURF:
case OB_FONT:
return DRW_curve_material_count_get(ob->data);
@@ -972,7 +972,7 @@ GPUBatch **DRW_cache_object_surface_material_get(struct Object *ob,
switch (ob->type) {
case OB_MESH:
return DRW_cache_mesh_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
- case OB_CURVE:
+ case OB_CURVES_LEGACY:
return NULL;
case OB_SURF:
return DRW_cache_surf_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
@@ -2922,21 +2922,21 @@ GPUBatch *DRW_cache_mesh_surface_mesh_analysis_get(Object *ob)
GPUBatch *DRW_cache_curve_edge_wire_get(Object *ob)
{
- BLI_assert(ob->type == OB_CURVE);
+ BLI_assert(ob->type == OB_CURVES_LEGACY);
struct Curve *cu = ob->data;
return DRW_curve_batch_cache_get_wire_edge(cu);
}
GPUBatch *DRW_cache_curve_edge_normal_get(Object *ob)
{
- BLI_assert(ob->type == OB_CURVE);
+ BLI_assert(ob->type == OB_CURVES_LEGACY);
struct Curve *cu = ob->data;
return DRW_curve_batch_cache_get_normal_edge(cu);
}
GPUBatch *DRW_cache_curve_edge_overlay_get(Object *ob)
{
- BLI_assert(ELEM(ob->type, OB_CURVE, OB_SURF));
+ BLI_assert(ELEM(ob->type, OB_CURVES_LEGACY, OB_SURF));
struct Curve *cu = ob->data;
return DRW_curve_batch_cache_get_edit_edges(cu);
@@ -2944,7 +2944,7 @@ GPUBatch *DRW_cache_curve_edge_overlay_get(Object *ob)
GPUBatch *DRW_cache_curve_vert_overlay_get(Object *ob)
{
- BLI_assert(ELEM(ob->type, OB_CURVE, OB_SURF));
+ BLI_assert(ELEM(ob->type, OB_CURVES_LEGACY, OB_SURF));
struct Curve *cu = ob->data;
return DRW_curve_batch_cache_get_edit_verts(cu);
@@ -3373,7 +3373,7 @@ void drw_batch_cache_validate(Object *ob)
case OB_MESH:
DRW_mesh_batch_cache_validate(ob, (Mesh *)ob->data);
break;
- case OB_CURVE:
+ case OB_CURVES_LEGACY:
case OB_FONT:
DRW_curve_batch_cache_validate((Curve *)ob->data);
break;
@@ -3423,7 +3423,7 @@ void drw_batch_cache_generate_requested(Object *ob)
DRW_mesh_batch_cache_create_requested(
DST.task_graph, ob, (Mesh *)ob->data, scene, is_paint_mode, use_hide);
break;
- case OB_CURVE:
+ case OB_CURVES_LEGACY:
case OB_FONT:
DRW_curve_batch_cache_create_requested(ob, scene);
break;
diff --git a/source/blender/draw/intern/draw_cache_extract_mesh.cc b/source/blender/draw/intern/draw_cache_extract_mesh.cc
index 738a9029167..49e51d77f7b 100644
--- a/source/blender/draw/intern/draw_cache_extract_mesh.cc
+++ b/source/blender/draw/intern/draw_cache_extract_mesh.cc
@@ -819,6 +819,7 @@ static void mesh_buffer_cache_create_requested_subdiv(MeshBatchCache *cache,
EXTRACT_ADD_REQUESTED(vbo, edituv_data);
/* Make sure UVs are computed before edituv stuffs. */
EXTRACT_ADD_REQUESTED(vbo, uv);
+ EXTRACT_ADD_REQUESTED(vbo, tan);
EXTRACT_ADD_REQUESTED(vbo, edituv_stretch_area);
EXTRACT_ADD_REQUESTED(vbo, edituv_stretch_angle);
EXTRACT_ADD_REQUESTED(ibo, lines_adjacency);
@@ -832,6 +833,7 @@ static void mesh_buffer_cache_create_requested_subdiv(MeshBatchCache *cache,
return;
}
+ mesh_render_data_update_looptris(mr, MR_ITER_LOOPTRI, MR_DATA_LOOPTRI);
mesh_render_data_update_loose_geom(mr, mbc, MR_ITER_LEDGE | MR_ITER_LVERT, MR_DATA_LOOSE_GEOM);
void *data_stack = MEM_mallocN(extractors.data_size_total(), __func__);
diff --git a/source/blender/draw/intern/draw_cache_impl_curve.cc b/source/blender/draw/intern/draw_cache_impl_curve.cc
index abba3beb893..6a3d3fa5e9e 100644
--- a/source/blender/draw/intern/draw_cache_impl_curve.cc
+++ b/source/blender/draw/intern/draw_cache_impl_curve.cc
@@ -945,7 +945,7 @@ int DRW_curve_material_count_get(Curve *cu)
void DRW_curve_batch_cache_create_requested(Object *ob, const struct Scene *scene)
{
- BLI_assert(ELEM(ob->type, OB_CURVE, OB_SURF, OB_FONT));
+ BLI_assert(ELEM(ob->type, OB_CURVES_LEGACY, OB_SURF, OB_FONT));
Curve *cu = (Curve *)ob->data;
CurveBatchCache *cache = curve_batch_cache_get(cu);
diff --git a/source/blender/draw/intern/draw_cache_impl_curves.cc b/source/blender/draw/intern/draw_cache_impl_curves.cc
index a779c694cd2..df1ac12605a 100644
--- a/source/blender/draw/intern/draw_cache_impl_curves.cc
+++ b/source/blender/draw/intern/draw_cache_impl_curves.cc
@@ -22,7 +22,7 @@
#include "DNA_curves_types.h"
#include "DNA_object_types.h"
-#include "BKE_curves.h"
+#include "BKE_curves.hh"
#include "GPU_batch.h"
#include "GPU_material.h"
@@ -133,12 +133,12 @@ static void curves_batch_cache_fill_segments_proc_pos(Curves *curves,
{
/* TODO: use hair radius layer if available. */
const int curve_size = curves->geometry.curve_size;
- Span<int> offsets{curves->geometry.offsets, curves->geometry.curve_size + 1};
-
- Span<float3> positions{(float3 *)curves->geometry.position, curves->geometry.point_size};
+ const blender::bke::CurvesGeometry &geometry = blender::bke::CurvesGeometry::wrap(
+ curves->geometry);
+ Span<float3> positions = geometry.positions();
for (const int i : IndexRange(curve_size)) {
- const IndexRange curve_range(offsets[i], offsets[i + 1] - offsets[i]);
+ const IndexRange curve_range = geometry.range_for_curve(i);
Span<float3> spline_positions = positions.slice(curve_range);
float total_len = 0.0f;
@@ -215,11 +215,11 @@ static void curves_batch_cache_fill_strands_data(Curves *curves,
GPUVertBufRaw *data_step,
GPUVertBufRaw *seg_step)
{
- const int curve_size = curves->geometry.curve_size;
- Span<int> offsets{curves->geometry.offsets, curves->geometry.curve_size + 1};
+ const blender::bke::CurvesGeometry &geometry = blender::bke::CurvesGeometry::wrap(
+ curves->geometry);
- for (const int i : IndexRange(curve_size)) {
- const IndexRange curve_range(offsets[i], offsets[i + 1] - offsets[i]);
+ for (const int i : IndexRange(geometry.curves_size())) {
+ const IndexRange curve_range = geometry.range_for_curve(i);
*(uint *)GPU_vertbuf_raw_step(data_step) = curve_range.start();
*(ushort *)GPU_vertbuf_raw_step(seg_step) = curve_range.size() - 1;
diff --git a/source/blender/draw/intern/draw_cache_impl_mesh.c b/source/blender/draw/intern/draw_cache_impl_mesh.c
index 8833a354c21..79a080cfccd 100644
--- a/source/blender/draw/intern/draw_cache_impl_mesh.c
+++ b/source/blender/draw/intern/draw_cache_impl_mesh.c
@@ -339,11 +339,7 @@ static void drw_mesh_attributes_merge(DRW_MeshAttributes *dst,
/* Return true if all requests in b are in a. */
static bool drw_mesh_attributes_overlap(DRW_MeshAttributes *a, DRW_MeshAttributes *b)
{
- if (a->num_requests != b->num_requests) {
- return false;
- }
-
- for (int i = 0; i < a->num_requests; i++) {
+ for (int i = 0; i < b->num_requests; i++) {
if (!has_request(a, b->requests[i])) {
return false;
}
@@ -1712,7 +1708,7 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
const int required_mode = BKE_subsurf_modifier_eval_required_mode(DRW_state_is_scene_render(),
is_editmode);
- const bool do_subdivision = BKE_subsurf_modifier_can_do_gpu_subdiv(scene, ob, required_mode);
+ const bool do_subdivision = BKE_subsurf_modifier_can_do_gpu_subdiv(scene, ob, me, required_mode);
MeshBufferList *mbuflist = &cache->final.buff;
diff --git a/source/blender/draw/intern/draw_cache_impl_subdivision.cc b/source/blender/draw/intern/draw_cache_impl_subdivision.cc
index ac2e5bbca2e..5d99478476c 100644
--- a/source/blender/draw/intern/draw_cache_impl_subdivision.cc
+++ b/source/blender/draw/intern/draw_cache_impl_subdivision.cc
@@ -67,7 +67,6 @@ enum {
SHADER_BUFFER_NORMALS_ACCUMULATE,
SHADER_BUFFER_NORMALS_FINALIZE,
SHADER_PATCH_EVALUATION,
- SHADER_PATCH_EVALUATION_LIMIT_NORMALS,
SHADER_PATCH_EVALUATION_FVAR,
SHADER_PATCH_EVALUATION_FACE_DOTS,
SHADER_COMP_CUSTOM_DATA_INTERP_1D,
@@ -107,7 +106,6 @@ static const char *get_shader_code(int shader_type)
return datatoc_common_subdiv_normals_finalize_comp_glsl;
}
case SHADER_PATCH_EVALUATION:
- case SHADER_PATCH_EVALUATION_LIMIT_NORMALS:
case SHADER_PATCH_EVALUATION_FVAR:
case SHADER_PATCH_EVALUATION_FACE_DOTS: {
return datatoc_common_subdiv_patch_evaluation_comp_glsl;
@@ -159,9 +157,6 @@ static const char *get_shader_name(int shader_type)
case SHADER_PATCH_EVALUATION: {
return "subdiv patch evaluation";
}
- case SHADER_PATCH_EVALUATION_LIMIT_NORMALS: {
- return "subdiv patch evaluation limit normals";
- }
case SHADER_PATCH_EVALUATION_FVAR: {
return "subdiv patch evaluation face-varying";
}
@@ -199,13 +194,7 @@ static GPUShader *get_patch_evaluation_shader(int shader_type)
const char *compute_code = get_shader_code(shader_type);
const char *defines = nullptr;
- if (shader_type == SHADER_PATCH_EVALUATION_LIMIT_NORMALS) {
- defines =
- "#define OSD_PATCH_BASIS_GLSL\n"
- "#define OPENSUBDIV_GLSL_COMPUTE_USE_1ST_DERIVATIVES\n"
- "#define LIMIT_NORMALS\n";
- }
- else if (shader_type == SHADER_PATCH_EVALUATION_FVAR) {
+ if (shader_type == SHADER_PATCH_EVALUATION_FVAR) {
defines =
"#define OSD_PATCH_BASIS_GLSL\n"
"#define OPENSUBDIV_GLSL_COMPUTE_USE_1ST_DERIVATIVES\n"
@@ -246,7 +235,6 @@ static GPUShader *get_subdiv_shader(int shader_type, const char *defines)
{
if (ELEM(shader_type,
SHADER_PATCH_EVALUATION,
- SHADER_PATCH_EVALUATION_LIMIT_NORMALS,
SHADER_PATCH_EVALUATION_FVAR,
SHADER_PATCH_EVALUATION_FACE_DOTS)) {
return get_patch_evaluation_shader(shader_type);
@@ -592,6 +580,67 @@ void draw_subdiv_cache_free(DRWSubdivCache *cache)
SUBDIV_COARSE_FACE_FLAG_ACTIVE) \
<< SUBDIV_COARSE_FACE_FLAG_OFFSET)
+static uint32_t compute_coarse_face_flag(BMFace *f, BMFace *efa_act)
+{
+ if (f == nullptr) {
+ /* May happen during mapped extraction. */
+ return 0;
+ }
+
+ uint32_t flag = 0;
+ if (BM_elem_flag_test(f, BM_ELEM_SMOOTH)) {
+ flag |= SUBDIV_COARSE_FACE_FLAG_SMOOTH;
+ }
+ if (BM_elem_flag_test(f, BM_ELEM_SELECT)) {
+ flag |= SUBDIV_COARSE_FACE_FLAG_SELECT;
+ }
+ if (f == efa_act) {
+ flag |= SUBDIV_COARSE_FACE_FLAG_ACTIVE;
+ }
+ const int loopstart = BM_elem_index_get(f->l_first);
+ return (uint)(loopstart) | (flag << SUBDIV_COARSE_FACE_FLAG_OFFSET);
+}
+
+static void draw_subdiv_cache_extra_coarse_face_data_bm(BMesh *bm,
+ BMFace *efa_act,
+ uint32_t *flags_data)
+{
+ BMFace *f;
+ BMIter iter;
+
+ BM_ITER_MESH (f, &iter, bm, BM_FACES_OF_MESH) {
+ const int index = BM_elem_index_get(f);
+ flags_data[index] = compute_coarse_face_flag(f, efa_act);
+ }
+}
+
+static void draw_subdiv_cache_extra_coarse_face_data_mesh(Mesh *mesh, uint32_t *flags_data)
+{
+ for (int i = 0; i < mesh->totpoly; i++) {
+ uint32_t flag = 0;
+ if ((mesh->mpoly[i].flag & ME_SMOOTH) != 0) {
+ flag = SUBDIV_COARSE_FACE_FLAG_SMOOTH;
+ }
+ flags_data[i] = (uint)(mesh->mpoly[i].loopstart) | (flag << SUBDIV_COARSE_FACE_FLAG_OFFSET);
+ }
+}
+
+static void draw_subdiv_cache_extra_coarse_face_data_mapped(Mesh *mesh,
+ BMesh *bm,
+ MeshRenderData *mr,
+ uint32_t *flags_data)
+{
+ if (bm == nullptr) {
+ draw_subdiv_cache_extra_coarse_face_data_mesh(mesh, flags_data);
+ return;
+ }
+
+ for (int i = 0; i < mesh->totpoly; i++) {
+ BMFace *f = bm_original_face_get(mr, i);
+ flags_data[i] = compute_coarse_face_flag(f, mr->efa_act);
+ }
+}
+
static void draw_subdiv_cache_update_extra_coarse_face_data(DRWSubdivCache *cache,
Mesh *mesh,
MeshRenderData *mr)
@@ -611,56 +660,13 @@ static void draw_subdiv_cache_update_extra_coarse_face_data(DRWSubdivCache *cach
uint32_t *flags_data = (uint32_t *)(GPU_vertbuf_get_data(cache->extra_coarse_face_data));
if (mr->extract_type == MR_EXTRACT_BMESH) {
- BMesh *bm = cache->bm;
- BMFace *f;
- BMIter iter;
-
- /* Ensure all current elements follow new customdata layout. */
- BM_ITER_MESH (f, &iter, bm, BM_FACES_OF_MESH) {
- const int index = BM_elem_index_get(f);
- uint32_t flag = 0;
- if (BM_elem_flag_test(f, BM_ELEM_SMOOTH)) {
- flag |= SUBDIV_COARSE_FACE_FLAG_SMOOTH;
- }
- if (BM_elem_flag_test(f, BM_ELEM_SELECT)) {
- flag |= SUBDIV_COARSE_FACE_FLAG_SELECT;
- }
- if (f == mr->efa_act) {
- flag |= SUBDIV_COARSE_FACE_FLAG_ACTIVE;
- }
- const int loopstart = BM_elem_index_get(f->l_first);
- flags_data[index] = (uint)(loopstart) | (flag << SUBDIV_COARSE_FACE_FLAG_OFFSET);
- }
+ draw_subdiv_cache_extra_coarse_face_data_bm(cache->bm, mr->efa_act, flags_data);
}
else if (mr->extract_type == MR_EXTRACT_MAPPED) {
- for (int i = 0; i < mesh->totpoly; i++) {
- BMFace *f = bm_original_face_get(mr, i);
- uint32_t flag = 0;
-
- if (f) {
- if (BM_elem_flag_test(f, BM_ELEM_SMOOTH)) {
- flag |= SUBDIV_COARSE_FACE_FLAG_SMOOTH;
- }
- if (BM_elem_flag_test(f, BM_ELEM_SELECT)) {
- flag |= SUBDIV_COARSE_FACE_FLAG_SELECT;
- }
- if (f == mr->efa_act) {
- flag |= SUBDIV_COARSE_FACE_FLAG_ACTIVE;
- }
- const int loopstart = BM_elem_index_get(f->l_first);
- flag = (uint)(loopstart) | (flag << SUBDIV_COARSE_FACE_FLAG_OFFSET);
- }
- flags_data[i] = flag;
- }
+ draw_subdiv_cache_extra_coarse_face_data_mapped(mesh, cache->bm, mr, flags_data);
}
else {
- for (int i = 0; i < mesh->totpoly; i++) {
- uint32_t flag = 0;
- if ((mesh->mpoly[i].flag & ME_SMOOTH) != 0) {
- flag = SUBDIV_COARSE_FACE_FLAG_SMOOTH;
- }
- flags_data[i] = (uint)(mesh->mpoly[i].loopstart) | (flag << SUBDIV_COARSE_FACE_FLAG_OFFSET);
- }
+ draw_subdiv_cache_extra_coarse_face_data_mesh(mesh, flags_data);
}
/* Make sure updated data is re-uploaded. */
@@ -1176,9 +1182,7 @@ static void drw_subdiv_compute_dispatch(const DRWSubdivCache *cache,
GPU_compute_dispatch(shader, dispatch_rx, dispatch_ry, 1);
}
-void draw_subdiv_extract_pos_nor(const DRWSubdivCache *cache,
- GPUVertBuf *pos_nor,
- const bool do_limit_normals)
+void draw_subdiv_extract_pos_nor(const DRWSubdivCache *cache, GPUVertBuf *pos_nor)
{
Subdiv *subdiv = cache->subdiv;
OpenSubdiv_Evaluator *evaluator = subdiv->evaluator;
@@ -1203,8 +1207,7 @@ void draw_subdiv_extract_pos_nor(const DRWSubdivCache *cache,
get_patch_param_format());
evaluator->wrapPatchParamBuffer(evaluator, &patch_param_buffer_interface);
- GPUShader *shader = get_patch_evaluation_shader(
- do_limit_normals ? SHADER_PATCH_EVALUATION_LIMIT_NORMALS : SHADER_PATCH_EVALUATION);
+ GPUShader *shader = get_patch_evaluation_shader(SHADER_PATCH_EVALUATION);
GPU_shader_bind(shader);
GPU_vertbuf_bind_as_ssbo(src_buffer, 0);
@@ -1299,7 +1302,8 @@ void draw_subdiv_interp_custom_data(const DRWSubdivCache *cache,
GPUVertBuf *src_data,
GPUVertBuf *dst_data,
int dimensions,
- int dst_offset)
+ int dst_offset,
+ bool compress_to_u16)
{
GPUShader *shader = nullptr;
@@ -1319,10 +1323,17 @@ void draw_subdiv_interp_custom_data(const DRWSubdivCache *cache,
"#define DIMENSIONS 3\n");
}
else if (dimensions == 4) {
- shader = get_subdiv_shader(SHADER_COMP_CUSTOM_DATA_INTERP_4D,
- "#define SUBDIV_POLYGON_OFFSET\n"
- "#define DIMENSIONS 4\n"
- "#define GPU_FETCH_U16_TO_FLOAT\n");
+ if (compress_to_u16) {
+ shader = get_subdiv_shader(SHADER_COMP_CUSTOM_DATA_INTERP_4D,
+ "#define SUBDIV_POLYGON_OFFSET\n"
+ "#define DIMENSIONS 4\n"
+ "#define GPU_FETCH_U16_TO_FLOAT\n");
+ }
+ else {
+ shader = get_subdiv_shader(SHADER_COMP_CUSTOM_DATA_INTERP_4D,
+ "#define SUBDIV_POLYGON_OFFSET\n"
+ "#define DIMENSIONS 4\n");
+ }
}
else {
/* Crash if dimensions are not supported. */
@@ -1376,6 +1387,7 @@ void draw_subdiv_accumulate_normals(const DRWSubdivCache *cache,
GPUVertBuf *pos_nor,
GPUVertBuf *face_adjacency_offsets,
GPUVertBuf *face_adjacency_lists,
+ GPUVertBuf *vertex_loop_map,
GPUVertBuf *vertex_normals)
{
GPUShader *shader = get_subdiv_shader(SHADER_BUFFER_NORMALS_ACCUMULATE, nullptr);
@@ -1386,6 +1398,7 @@ void draw_subdiv_accumulate_normals(const DRWSubdivCache *cache,
GPU_vertbuf_bind_as_ssbo(pos_nor, binding_point++);
GPU_vertbuf_bind_as_ssbo(face_adjacency_offsets, binding_point++);
GPU_vertbuf_bind_as_ssbo(face_adjacency_lists, binding_point++);
+ GPU_vertbuf_bind_as_ssbo(vertex_loop_map, binding_point++);
GPU_vertbuf_bind_as_ssbo(vertex_normals, binding_point++);
drw_subdiv_compute_dispatch(cache, shader, 0, 0, cache->num_subdiv_verts);
@@ -1785,9 +1798,9 @@ static bool draw_subdiv_create_requested_buffers(const Scene *scene,
const float obmat[4][4],
const bool do_final,
const bool do_uvedit,
- const bool UNUSED(use_subsurf_fdots),
+ const bool /*use_subsurf_fdots*/,
const ToolSettings *ts,
- const bool UNUSED(use_hide),
+ const bool /*use_hide*/,
OpenSubdiv_EvaluatorCache *evaluator_cache)
{
SubsurfModifierData *smd = BKE_object_get_last_subsurf_modifier(ob);
@@ -1833,8 +1846,6 @@ static bool draw_subdiv_create_requested_buffers(const Scene *scene,
draw_cache->subdiv = subdiv;
draw_cache->optimal_display = optimal_display;
draw_cache->num_subdiv_triangles = tris_count_from_number_of_loops(draw_cache->num_subdiv_loops);
- /* We can only evaluate limit normals if the patches are adaptive. */
- draw_cache->do_limit_normals = settings.is_adaptive;
draw_cache->use_custom_loop_normals = (smd->flags & eSubsurfModifierFlag_UseCustomNormals) &&
(mesh_eval->flag & ME_AUTOSMOOTH) &&
diff --git a/source/blender/draw/intern/draw_common.c b/source/blender/draw/intern/draw_common.c
index fcfaf404fc2..2897234f4dc 100644
--- a/source/blender/draw/intern/draw_common.c
+++ b/source/blender/draw/intern/draw_common.c
@@ -412,7 +412,7 @@ bool DRW_object_is_flat(Object *ob, int *r_axis)
if (!ELEM(ob->type,
OB_MESH,
- OB_CURVE,
+ OB_CURVES_LEGACY,
OB_SURF,
OB_FONT,
OB_MBALL,
diff --git a/source/blender/draw/intern/draw_manager.c b/source/blender/draw/intern/draw_manager.c
index 440f74af64b..2886fe53879 100644
--- a/source/blender/draw/intern/draw_manager.c
+++ b/source/blender/draw/intern/draw_manager.c
@@ -2774,7 +2774,7 @@ void DRW_draw_depth_object(
GPU_uniformbuf_free(ubo);
} break;
- case OB_CURVE:
+ case OB_CURVES_LEGACY:
case OB_SURF:
break;
}
diff --git a/source/blender/draw/intern/draw_manager_data.c b/source/blender/draw/intern/draw_manager_data.c
index 3b35b8c1f9d..95691a0df68 100644
--- a/source/blender/draw/intern/draw_manager_data.c
+++ b/source/blender/draw/intern/draw_manager_data.c
@@ -514,7 +514,7 @@ static void drw_call_calc_orco(Object *ob, float (*r_orcofacs)[4])
case ID_ME:
BKE_mesh_texspace_get_reference((Mesh *)ob_data, NULL, &texcoloc, &texcosize);
break;
- case ID_CU: {
+ case ID_CU_LEGACY: {
Curve *cu = (Curve *)ob_data;
BKE_curve_texspace_ensure(cu);
texcoloc = cu->loc;
diff --git a/source/blender/draw/intern/draw_subdivision.h b/source/blender/draw/intern/draw_subdivision.h
index 6714ba571e5..bd02df6d48b 100644
--- a/source/blender/draw/intern/draw_subdivision.h
+++ b/source/blender/draw/intern/draw_subdivision.h
@@ -51,7 +51,6 @@ typedef struct DRWSubdivCache {
struct BMesh *bm;
struct Subdiv *subdiv;
bool optimal_display;
- bool do_limit_normals;
bool use_custom_loop_normals;
/* Coordinates used to evaluate patches for UVs, positions, and normals. */
@@ -165,6 +164,7 @@ void draw_subdiv_accumulate_normals(const DRWSubdivCache *cache,
struct GPUVertBuf *pos_nor,
struct GPUVertBuf *face_adjacency_offsets,
struct GPUVertBuf *face_adjacency_lists,
+ struct GPUVertBuf *vertex_loop_map,
struct GPUVertBuf *vertex_normals);
void draw_subdiv_finalize_normals(const DRWSubdivCache *cache,
@@ -176,15 +176,14 @@ void draw_subdiv_finalize_custom_normals(const DRWSubdivCache *cache,
GPUVertBuf *src_custom_normals,
GPUVertBuf *pos_nor);
-void draw_subdiv_extract_pos_nor(const DRWSubdivCache *cache,
- struct GPUVertBuf *pos_nor,
- bool do_limit_normals);
+void draw_subdiv_extract_pos_nor(const DRWSubdivCache *cache, struct GPUVertBuf *pos_nor);
void draw_subdiv_interp_custom_data(const DRWSubdivCache *cache,
struct GPUVertBuf *src_data,
struct GPUVertBuf *dst_data,
int dimensions,
- int dst_offset);
+ int dst_offset,
+ bool compress_to_u16);
void draw_subdiv_extract_uvs(const DRWSubdivCache *cache,
struct GPUVertBuf *uvs,
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_attributes.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_attributes.cc
index d5e34bc082e..4f4aa764fbc 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_attributes.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_attributes.cc
@@ -402,7 +402,7 @@ static void extract_attr_init_subdiv(const DRWSubdivCache *subdiv_cache,
/* Ensure data is uploaded properly. */
GPU_vertbuf_tag_dirty(src_data);
draw_subdiv_interp_custom_data(
- subdiv_cache, src_data, dst_buffer, static_cast<int>(dimensions), 0);
+ subdiv_cache, src_data, dst_buffer, static_cast<int>(dimensions), 0, false);
GPU_vertbuf_discard(src_data);
}
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_pos_nor.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_pos_nor.cc
index bd7f1ba0128..22fda284a74 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_pos_nor.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_pos_nor.cc
@@ -217,14 +217,12 @@ static void extract_pos_nor_init_subdiv(const DRWSubdivCache *subdiv_cache,
void *UNUSED(data))
{
GPUVertBuf *vbo = static_cast<GPUVertBuf *>(buffer);
- const bool do_limit_normals = subdiv_cache->do_limit_normals &&
- !subdiv_cache->use_custom_loop_normals;
/* Initialize the vertex buffer, it was already allocated. */
GPU_vertbuf_init_build_on_device(
vbo, get_pos_nor_format(), subdiv_cache->num_subdiv_loops + mr->loop_loose_len);
- draw_subdiv_extract_pos_nor(subdiv_cache, vbo, do_limit_normals);
+ draw_subdiv_extract_pos_nor(subdiv_cache, vbo);
if (subdiv_cache->use_custom_loop_normals) {
Mesh *coarse_mesh = subdiv_cache->mesh;
@@ -243,14 +241,15 @@ static void extract_pos_nor_init_subdiv(const DRWSubdivCache *subdiv_cache,
GPU_vertbuf_init_build_on_device(
dst_custom_normals, get_custom_normals_format(), subdiv_cache->num_subdiv_loops);
- draw_subdiv_interp_custom_data(subdiv_cache, src_custom_normals, dst_custom_normals, 3, 0);
+ draw_subdiv_interp_custom_data(
+ subdiv_cache, src_custom_normals, dst_custom_normals, 3, 0, false);
draw_subdiv_finalize_custom_normals(subdiv_cache, dst_custom_normals, vbo);
GPU_vertbuf_discard(src_custom_normals);
GPU_vertbuf_discard(dst_custom_normals);
}
- else if (!do_limit_normals) {
+ else {
/* We cannot evaluate vertex normals using the limit surface, so compute them manually. */
GPUVertBuf *subdiv_loop_subdiv_vert_index = draw_subdiv_build_origindex_buffer(
subdiv_cache->subdiv_loop_subdiv_vert_index, subdiv_cache->num_subdiv_loops);
@@ -263,6 +262,7 @@ static void extract_pos_nor_init_subdiv(const DRWSubdivCache *subdiv_cache,
vbo,
subdiv_cache->subdiv_vertex_face_adjacency_offsets,
subdiv_cache->subdiv_vertex_face_adjacency,
+ subdiv_loop_subdiv_vert_index,
vertex_normals);
draw_subdiv_finalize_normals(subdiv_cache, vertex_normals, subdiv_loop_subdiv_vert_index, vbo);
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_sculpt_data.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_sculpt_data.cc
index 78c215845e0..96595df9276 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_sculpt_data.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_sculpt_data.cc
@@ -151,7 +151,7 @@ static void extract_sculpt_data_init_subdiv(const DRWSubdivCache *subdiv_cache,
GPU_vertbuf_init_build_on_device(
subdiv_mask_vbo, &mask_format, subdiv_cache->num_subdiv_loops);
- draw_subdiv_interp_custom_data(subdiv_cache, mask_vbo, subdiv_mask_vbo, 1, 0);
+ draw_subdiv_interp_custom_data(subdiv_cache, mask_vbo, subdiv_mask_vbo, 1, 0, false);
}
/* Then, gather face sets. */
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_tan.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_tan.cc
index 209168750e7..225d1676151 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_tan.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_tan.cc
@@ -16,22 +16,26 @@
#include "extract_mesh.h"
+#include "draw_subdivision.h"
+
namespace blender::draw {
/* ---------------------------------------------------------------------- */
/** \name Extract Tangent layers
* \{ */
-static void extract_tan_ex_init(const MeshRenderData *mr,
- struct MeshBatchCache *cache,
- GPUVertBuf *vbo,
- const bool do_hq)
+static void extract_tan_init_common(const MeshRenderData *mr,
+ struct MeshBatchCache *cache,
+ GPUVertFormat *format,
+ GPUVertCompType comp_type,
+ GPUVertFetchMode fetch_mode,
+ CustomData *r_loop_data,
+ int *r_v_len,
+ int *r_tan_len,
+ char r_tangent_names[MAX_MTFACE][MAX_CUSTOMDATA_LAYER_NAME],
+ bool *r_use_orco_tan)
{
- GPUVertCompType comp_type = do_hq ? GPU_COMP_I16 : GPU_COMP_I10;
- GPUVertFetchMode fetch_mode = GPU_FETCH_INT_TO_FLOAT_UNIT;
-
- GPUVertFormat format = {0};
- GPU_vertformat_deinterleave(&format);
+ GPU_vertformat_deinterleave(format);
CustomData *cd_ldata = (mr->extract_type == MR_EXTRACT_BMESH) ? &mr->bm->ldata : &mr->me->ldata;
CustomData *cd_vdata = (mr->extract_type == MR_EXTRACT_BMESH) ? &mr->bm->vdata : &mr->me->vdata;
@@ -41,7 +45,6 @@ static void extract_tan_ex_init(const MeshRenderData *mr,
bool use_orco_tan = cache->cd_used.tan_orco != 0;
int tan_len = 0;
- char tangent_names[MAX_MTFACE][MAX_CUSTOMDATA_LAYER_NAME];
/* FIXME(T91838): This is to avoid a crash when orco tangent was requested but there are valid
* uv layers. It would be better to fix the root cause. */
@@ -57,17 +60,17 @@ static void extract_tan_ex_init(const MeshRenderData *mr,
GPU_vertformat_safe_attr_name(layer_name, attr_safe_name, GPU_MAX_SAFE_ATTR_NAME);
/* Tangent layer name. */
BLI_snprintf(attr_name, sizeof(attr_name), "t%s", attr_safe_name);
- GPU_vertformat_attr_add(&format, attr_name, comp_type, 4, fetch_mode);
+ GPU_vertformat_attr_add(format, attr_name, comp_type, 4, fetch_mode);
/* Active render layer name. */
if (i == CustomData_get_render_layer(cd_ldata, CD_MLOOPUV)) {
- GPU_vertformat_alias_add(&format, "t");
+ GPU_vertformat_alias_add(format, "t");
}
/* Active display layer name. */
if (i == CustomData_get_active_layer(cd_ldata, CD_MLOOPUV)) {
- GPU_vertformat_alias_add(&format, "at");
+ GPU_vertformat_alias_add(format, "at");
}
- BLI_strncpy(tangent_names[tan_len++], layer_name, MAX_CUSTOMDATA_LAYER_NAME);
+ BLI_strncpy(r_tangent_names[tan_len++], layer_name, MAX_CUSTOMDATA_LAYER_NAME);
}
}
if (use_orco_tan && orco == nullptr) {
@@ -94,20 +97,19 @@ static void extract_tan_ex_init(const MeshRenderData *mr,
}
/* Start Fresh */
- CustomData loop_data;
- CustomData_reset(&loop_data);
+ CustomData_reset(r_loop_data);
if (tan_len != 0 || use_orco_tan) {
short tangent_mask = 0;
bool calc_active_tangent = false;
if (mr->extract_type == MR_EXTRACT_BMESH) {
BKE_editmesh_loop_tangent_calc(mr->edit_bmesh,
calc_active_tangent,
- tangent_names,
+ r_tangent_names,
tan_len,
mr->poly_normals,
mr->loop_normals,
orco,
- &loop_data,
+ r_loop_data,
mr->loop_len,
&tangent_mask);
}
@@ -120,13 +122,13 @@ static void extract_tan_ex_init(const MeshRenderData *mr,
mr->tri_len,
cd_ldata,
calc_active_tangent,
- tangent_names,
+ r_tangent_names,
tan_len,
mr->vert_normals,
mr->poly_normals,
mr->loop_normals,
orco,
- &loop_data,
+ r_loop_data,
mr->loop_len,
&tangent_mask);
}
@@ -134,12 +136,12 @@ static void extract_tan_ex_init(const MeshRenderData *mr,
if (use_orco_tan) {
char attr_name[32], attr_safe_name[GPU_MAX_SAFE_ATTR_NAME];
- const char *layer_name = CustomData_get_layer_name(&loop_data, CD_TANGENT, 0);
+ const char *layer_name = CustomData_get_layer_name(r_loop_data, CD_TANGENT, 0);
GPU_vertformat_safe_attr_name(layer_name, attr_safe_name, GPU_MAX_SAFE_ATTR_NAME);
BLI_snprintf(attr_name, sizeof(*attr_name), "t%s", attr_safe_name);
- GPU_vertformat_attr_add(&format, attr_name, comp_type, 4, fetch_mode);
- GPU_vertformat_alias_add(&format, "t");
- GPU_vertformat_alias_add(&format, "at");
+ GPU_vertformat_attr_add(format, attr_name, comp_type, 4, fetch_mode);
+ GPU_vertformat_alias_add(format, "t");
+ GPU_vertformat_alias_add(format, "at");
}
if (orco_allocated) {
@@ -147,12 +149,42 @@ static void extract_tan_ex_init(const MeshRenderData *mr,
}
int v_len = mr->loop_len;
- if (format.attr_len == 0) {
- GPU_vertformat_attr_add(&format, "dummy", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
+ if (format->attr_len == 0) {
+ GPU_vertformat_attr_add(format, "dummy", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
/* VBO will not be used, only allocate minimum of memory. */
v_len = 1;
}
+ *r_use_orco_tan = use_orco_tan;
+ *r_v_len = v_len;
+ *r_tan_len = tan_len;
+}
+
+static void extract_tan_ex_init(const MeshRenderData *mr,
+ struct MeshBatchCache *cache,
+ GPUVertBuf *vbo,
+ const bool do_hq)
+{
+ GPUVertCompType comp_type = do_hq ? GPU_COMP_I16 : GPU_COMP_I10;
+ GPUVertFetchMode fetch_mode = GPU_FETCH_INT_TO_FLOAT_UNIT;
+
+ GPUVertFormat format = {0};
+ CustomData loop_data;
+ int v_len = 0;
+ int tan_len = 0;
+ bool use_orco_tan;
+ char tangent_names[MAX_MTFACE][MAX_CUSTOMDATA_LAYER_NAME];
+ extract_tan_init_common(mr,
+ cache,
+ &format,
+ comp_type,
+ fetch_mode,
+ &loop_data,
+ &v_len,
+ &tan_len,
+ tangent_names,
+ &use_orco_tan);
+
GPU_vertbuf_init_with_format(vbo, &format);
GPU_vertbuf_data_alloc(vbo, v_len);
@@ -211,10 +243,92 @@ static void extract_tan_init(const MeshRenderData *mr,
extract_tan_ex_init(mr, cache, vbo, false);
}
+static GPUVertFormat *get_coarse_tan_format()
+{
+ static GPUVertFormat format = {0};
+ if (format.attr_len == 0) {
+ GPU_vertformat_attr_add(&format, "tan", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
+ }
+ return &format;
+}
+
+static void extract_tan_init_subdiv(const DRWSubdivCache *subdiv_cache,
+ const MeshRenderData *mr,
+ struct MeshBatchCache *cache,
+ void *buffer,
+ void *UNUSED(data))
+{
+ GPUVertCompType comp_type = GPU_COMP_F32;
+ GPUVertFetchMode fetch_mode = GPU_FETCH_FLOAT;
+ GPUVertFormat format = {0};
+ CustomData loop_data;
+ int coarse_len = 0;
+ int tan_len = 0;
+ bool use_orco_tan;
+ char tangent_names[MAX_MTFACE][MAX_CUSTOMDATA_LAYER_NAME];
+ extract_tan_init_common(mr,
+ cache,
+ &format,
+ comp_type,
+ fetch_mode,
+ &loop_data,
+ &coarse_len,
+ &tan_len,
+ tangent_names,
+ &use_orco_tan);
+
+ GPUVertBuf *dst_buffer = static_cast<GPUVertBuf *>(buffer);
+ GPU_vertbuf_init_build_on_device(dst_buffer, &format, subdiv_cache->num_subdiv_loops);
+
+ GPUVertBuf *coarse_vbo = GPU_vertbuf_calloc();
+ /* Dynamic as we upload and interpolate layers one at a time. */
+ GPU_vertbuf_init_with_format_ex(coarse_vbo, get_coarse_tan_format(), GPU_USAGE_DYNAMIC);
+ GPU_vertbuf_data_alloc(coarse_vbo, coarse_len);
+
+ /* Index of the tangent layer in the compact buffer. Used layers are stored in a single buffer.
+ */
+ int pack_layer_index = 0;
+ for (int i = 0; i < tan_len; i++) {
+ float(*tan_data)[4] = (float(*)[4])GPU_vertbuf_get_data(coarse_vbo);
+ const char *name = tangent_names[i];
+ float(*layer_data)[4] = (float(*)[4])CustomData_get_layer_named(&loop_data, CD_TANGENT, name);
+ for (int ml_index = 0; ml_index < mr->loop_len; ml_index++) {
+ copy_v3_v3(*tan_data, layer_data[ml_index]);
+ (*tan_data)[3] = (layer_data[ml_index][3] > 0.0f) ? 1.0f : -1.0f;
+ tan_data++;
+ }
+
+ /* Ensure data is uploaded properly. */
+ GPU_vertbuf_tag_dirty(coarse_vbo);
+ /* Include stride in offset. */
+ const int dst_offset = (int)subdiv_cache->num_subdiv_loops * 4 * pack_layer_index++;
+ draw_subdiv_interp_custom_data(subdiv_cache, coarse_vbo, dst_buffer, 4, dst_offset, false);
+ }
+ if (use_orco_tan) {
+ float(*tan_data)[4] = (float(*)[4])GPU_vertbuf_get_data(coarse_vbo);
+ float(*layer_data)[4] = (float(*)[4])CustomData_get_layer_n(&loop_data, CD_TANGENT, 0);
+ for (int ml_index = 0; ml_index < mr->loop_len; ml_index++) {
+ copy_v3_v3(*tan_data, layer_data[ml_index]);
+ (*tan_data)[3] = (layer_data[ml_index][3] > 0.0f) ? 1.0f : -1.0f;
+ tan_data++;
+ }
+
+ /* Ensure data is uploaded properly. */
+ GPU_vertbuf_tag_dirty(coarse_vbo);
+ /* Include stride in offset. */
+ const int dst_offset = (int)subdiv_cache->num_subdiv_loops * 4 * pack_layer_index++;
+ draw_subdiv_interp_custom_data(subdiv_cache, coarse_vbo, dst_buffer, 4, dst_offset, true);
+ }
+
+ CustomData_free(&loop_data, mr->loop_len);
+ GPU_vertbuf_discard(coarse_vbo);
+}
+
constexpr MeshExtract create_extractor_tan()
{
MeshExtract extractor = {nullptr};
extractor.init = extract_tan_init;
+ extractor.init_subdiv = extract_tan_init_subdiv;
extractor.data_type = MR_DATA_POLY_NOR | MR_DATA_TAN_LOOP_NOR | MR_DATA_LOOPTRI;
extractor.data_size = 0;
extractor.use_threading = false;
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_vcol.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_vcol.cc
index 138ff9fd1ff..7a8f4a9a17e 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_vcol.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_vcol.cc
@@ -164,7 +164,7 @@ static void extract_vcol_init_subdiv(const DRWSubdivCache *subdiv_cache,
/* Ensure data is uploaded properly. */
GPU_vertbuf_tag_dirty(src_data);
- draw_subdiv_interp_custom_data(subdiv_cache, src_data, dst_buffer, 4, dst_offset);
+ draw_subdiv_interp_custom_data(subdiv_cache, src_data, dst_buffer, 4, dst_offset, true);
}
}
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_weights.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_weights.cc
index 2e30d6bdfcf..89aa16ca0c7 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_weights.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_weights.cc
@@ -153,10 +153,10 @@ static void extract_weights_iter_poly_mesh(const MeshRenderData *mr,
}
static void extract_weights_init_subdiv(const DRWSubdivCache *subdiv_cache,
- const MeshRenderData *UNUSED(mr),
+ const MeshRenderData *mr,
struct MeshBatchCache *cache,
void *buffer,
- void *UNUSED(data))
+ void *_data)
{
Mesh *coarse_mesh = subdiv_cache->mesh;
GPUVertBuf *vbo = static_cast<GPUVertBuf *>(buffer);
@@ -168,32 +168,24 @@ static void extract_weights_init_subdiv(const DRWSubdivCache *subdiv_cache,
GPU_vertbuf_init_build_on_device(vbo, &format, subdiv_cache->num_subdiv_loops);
GPUVertBuf *coarse_weights = GPU_vertbuf_calloc();
- GPU_vertbuf_init_with_format(coarse_weights, &format);
- GPU_vertbuf_data_alloc(coarse_weights, coarse_mesh->totloop);
- float *coarse_weights_data = static_cast<float *>(GPU_vertbuf_get_data(coarse_weights));
+ extract_weights_init(mr, cache, coarse_weights, _data);
- const DRW_MeshWeightState *wstate = &cache->weight_state;
- const MDeformVert *dverts = static_cast<const MDeformVert *>(
- CustomData_get_layer(&coarse_mesh->vdata, CD_MDEFORMVERT));
-
- for (int i = 0; i < coarse_mesh->totpoly; i++) {
- const MPoly *mpoly = &coarse_mesh->mpoly[i];
-
- for (int loop_index = mpoly->loopstart; loop_index < mpoly->loopstart + mpoly->totloop;
- loop_index++) {
- const MLoop *ml = &coarse_mesh->mloop[loop_index];
-
- if (dverts != nullptr) {
- const MDeformVert *dvert = &dverts[ml->v];
- coarse_weights_data[loop_index] = evaluate_vertex_weight(dvert, wstate);
- }
- else {
- coarse_weights_data[loop_index] = evaluate_vertex_weight(nullptr, wstate);
- }
+ if (mr->extract_type != MR_EXTRACT_BMESH) {
+ for (int i = 0; i < coarse_mesh->totpoly; i++) {
+ const MPoly *mpoly = &coarse_mesh->mpoly[i];
+ extract_weights_iter_poly_mesh(mr, mpoly, i, _data);
+ }
+ }
+ else {
+ BMIter f_iter;
+ BMFace *efa;
+ int face_index = 0;
+ BM_ITER_MESH_INDEX (efa, &f_iter, mr->bm, BM_FACES_OF_MESH, face_index) {
+ extract_weights_iter_poly_bm(mr, efa, face_index, _data);
}
}
- draw_subdiv_interp_custom_data(subdiv_cache, coarse_weights, vbo, 1, 0);
+ draw_subdiv_interp_custom_data(subdiv_cache, coarse_weights, vbo, 1, 0, false);
GPU_vertbuf_discard(coarse_weights);
}
diff --git a/source/blender/draw/intern/shaders/common_subdiv_custom_data_interp_comp.glsl b/source/blender/draw/intern/shaders/common_subdiv_custom_data_interp_comp.glsl
index df0016761e2..097ae0b3913 100644
--- a/source/blender/draw/intern/shaders/common_subdiv_custom_data_interp_comp.glsl
+++ b/source/blender/draw/intern/shaders/common_subdiv_custom_data_interp_comp.glsl
@@ -1,5 +1,5 @@
-/* To be compile with common_subdiv_lib.glsl */
+/* To be compiled with common_subdiv_lib.glsl */
layout(std430, binding = 1) readonly restrict buffer sourceBuffer
{
diff --git a/source/blender/draw/intern/shaders/common_subdiv_ibo_lines_comp.glsl b/source/blender/draw/intern/shaders/common_subdiv_ibo_lines_comp.glsl
index f11c0f6427e..3cbb9f980f3 100644
--- a/source/blender/draw/intern/shaders/common_subdiv_ibo_lines_comp.glsl
+++ b/source/blender/draw/intern/shaders/common_subdiv_ibo_lines_comp.glsl
@@ -1,5 +1,5 @@
-/* To be compile with common_subdiv_lib.glsl */
+/* To be compiled with common_subdiv_lib.glsl */
layout(std430, binding = 0) readonly buffer inputEdgeOrigIndex
{
diff --git a/source/blender/draw/intern/shaders/common_subdiv_ibo_tris_comp.glsl b/source/blender/draw/intern/shaders/common_subdiv_ibo_tris_comp.glsl
index 3257ebdae17..3dccc82541e 100644
--- a/source/blender/draw/intern/shaders/common_subdiv_ibo_tris_comp.glsl
+++ b/source/blender/draw/intern/shaders/common_subdiv_ibo_tris_comp.glsl
@@ -1,5 +1,5 @@
-/* To be compile with common_subdiv_lib.glsl */
+/* To be compiled with common_subdiv_lib.glsl */
/* Generate triangles from subdivision quads indices. */
diff --git a/source/blender/draw/intern/shaders/common_subdiv_lib.glsl b/source/blender/draw/intern/shaders/common_subdiv_lib.glsl
index e6538d80111..5d71c5e4bb8 100644
--- a/source/blender/draw/intern/shaders/common_subdiv_lib.glsl
+++ b/source/blender/draw/intern/shaders/common_subdiv_lib.glsl
@@ -140,6 +140,13 @@ void set_vertex_nor(inout PosNorLoop vertex_data, vec3 nor)
set_vertex_nor(vertex_data, nor, 0);
}
+void add_newell_cross_v3_v3v3(inout vec3 n, vec3 v_prev, vec3 v_curr)
+{
+ n[0] += (v_prev[1] - v_curr[1]) * (v_prev[2] + v_curr[2]);
+ n[1] += (v_prev[2] - v_curr[2]) * (v_prev[0] + v_curr[0]);
+ n[2] += (v_prev[0] - v_curr[0]) * (v_prev[1] + v_curr[1]);
+}
+
#define ORIGINDEX_NONE -1
#ifdef SUBDIV_POLYGON_OFFSET
diff --git a/source/blender/draw/intern/shaders/common_subdiv_normals_accumulate_comp.glsl b/source/blender/draw/intern/shaders/common_subdiv_normals_accumulate_comp.glsl
index 575090472b1..0665cadfd2d 100644
--- a/source/blender/draw/intern/shaders/common_subdiv_normals_accumulate_comp.glsl
+++ b/source/blender/draw/intern/shaders/common_subdiv_normals_accumulate_comp.glsl
@@ -1,5 +1,5 @@
-/* To be compile with common_subdiv_lib.glsl */
+/* To be compiled with common_subdiv_lib.glsl */
layout(std430, binding = 0) readonly buffer inputVertexData
{
@@ -16,11 +16,33 @@ layout(std430, binding = 2) readonly buffer faceAdjacencyLists
uint face_adjacency_lists[];
};
-layout(std430, binding = 3) writeonly buffer vertexNormals
+layout(std430, binding = 3) readonly buffer vertexLoopMap
+{
+ uint vert_loop_map[];
+};
+
+layout(std430, binding = 4) writeonly buffer vertexNormals
{
vec3 normals[];
};
+void find_prev_and_next_vertex_on_face(
+ uint face_index, uint vertex_index, out uint curr, out uint next, out uint prev)
+{
+ uint start_loop_index = face_index * 4;
+
+ for (uint i = 0; i < 4; i++) {
+ uint subdiv_vert_index = vert_loop_map[start_loop_index + i];
+
+ if (subdiv_vert_index == vertex_index) {
+ curr = i;
+ next = (i + 1) % 4;
+ prev = (i + 4 - 1) % 4;
+ break;
+ }
+ }
+}
+
void main()
{
uint vertex_index = get_global_invocation_index();
@@ -39,18 +61,37 @@ void main()
uint adjacent_face = face_adjacency_lists[first_adjacent_face_offset + i];
uint start_loop_index = adjacent_face * 4;
- /* Compute face normal. */
- vec3 adjacent_verts[3];
- for (uint j = 0; j < 3; j++) {
- adjacent_verts[j] = get_vertex_pos(pos_nor[start_loop_index + j]);
+ /* Compute the face normal using Newell's method. */
+ vec3 verts[4];
+ for (uint j = 0; j < 4; j++) {
+ verts[j] = get_vertex_pos(pos_nor[start_loop_index + j]);
}
- vec3 face_normal = normalize(
- cross(adjacent_verts[1] - adjacent_verts[0], adjacent_verts[2] - adjacent_verts[0]));
- accumulated_normal += face_normal;
+ vec3 face_normal = vec3(0.0);
+ add_newell_cross_v3_v3v3(face_normal, verts[0], verts[1]);
+ add_newell_cross_v3_v3v3(face_normal, verts[1], verts[2]);
+ add_newell_cross_v3_v3v3(face_normal, verts[2], verts[3]);
+ add_newell_cross_v3_v3v3(face_normal, verts[3], verts[0]);
+
+ /* Accumulate angle weighted normal. */
+ uint curr_vert = 0;
+ uint next_vert = 0;
+ uint prev_vert = 0;
+ find_prev_and_next_vertex_on_face(
+ adjacent_face, vertex_index, curr_vert, next_vert, prev_vert);
+
+ vec3 curr_co = verts[curr_vert];
+ vec3 prev_co = verts[next_vert];
+ vec3 next_co = verts[prev_vert];
+
+ vec3 edvec_prev = normalize(prev_co - curr_co);
+ vec3 edvec_next = normalize(curr_co - next_co);
+
+ float fac = acos(-dot(edvec_prev, edvec_next));
+
+ accumulated_normal += face_normal * fac;
}
- float weight = 1.0 / float(number_of_adjacent_faces);
vec3 normal = normalize(accumulated_normal);
normals[vertex_index] = normal;
}
diff --git a/source/blender/draw/intern/shaders/common_subdiv_normals_finalize_comp.glsl b/source/blender/draw/intern/shaders/common_subdiv_normals_finalize_comp.glsl
index c2e0e752783..e6a56ff02c7 100644
--- a/source/blender/draw/intern/shaders/common_subdiv_normals_finalize_comp.glsl
+++ b/source/blender/draw/intern/shaders/common_subdiv_normals_finalize_comp.glsl
@@ -1,5 +1,5 @@
-/* To be compile with common_subdiv_lib.glsl */
+/* To be compiled with common_subdiv_lib.glsl */
#ifdef CUSTOM_NORMALS
struct CustomNormal {
diff --git a/source/blender/draw/intern/shaders/common_subdiv_patch_evaluation_comp.glsl b/source/blender/draw/intern/shaders/common_subdiv_patch_evaluation_comp.glsl
index 5dd7decf663..65cf4ebb90f 100644
--- a/source/blender/draw/intern/shaders/common_subdiv_patch_evaluation_comp.glsl
+++ b/source/blender/draw/intern/shaders/common_subdiv_patch_evaluation_comp.glsl
@@ -1,5 +1,5 @@
-/* To be compile with common_subdiv_lib.glsl */
+/* To be compiled with common_subdiv_lib.glsl */
/* Source buffer. */
layout(std430, binding = 0) buffer src_buffer
@@ -394,12 +394,8 @@ void main()
evaluate_patches_limits(patch_co.patch_index, uv.x, uv.y, pos, du, dv);
-# if defined(LIMIT_NORMALS)
- vec3 nor = normalize(cross(du, dv));
-# else
/* This will be computed later. */
vec3 nor = vec3(0.0);
-# endif
int origindex = input_vert_origindex[loop_index];
uint flag = 0;
diff --git a/source/blender/draw/intern/shaders/common_subdiv_vbo_edge_fac_comp.glsl b/source/blender/draw/intern/shaders/common_subdiv_vbo_edge_fac_comp.glsl
index 6c76cd41ca4..2161f0b28a9 100644
--- a/source/blender/draw/intern/shaders/common_subdiv_vbo_edge_fac_comp.glsl
+++ b/source/blender/draw/intern/shaders/common_subdiv_vbo_edge_fac_comp.glsl
@@ -1,5 +1,5 @@
-/* To be compile with common_subdiv_lib.glsl */
+/* To be compiled with common_subdiv_lib.glsl */
layout(std430, binding = 0) readonly buffer inputVertexData
{
diff --git a/source/blender/draw/intern/shaders/common_subdiv_vbo_edituv_strech_angle_comp.glsl b/source/blender/draw/intern/shaders/common_subdiv_vbo_edituv_strech_angle_comp.glsl
index ea73b9482d3..a8c9b7183eb 100644
--- a/source/blender/draw/intern/shaders/common_subdiv_vbo_edituv_strech_angle_comp.glsl
+++ b/source/blender/draw/intern/shaders/common_subdiv_vbo_edituv_strech_angle_comp.glsl
@@ -1,5 +1,5 @@
-/* To be compile with common_subdiv_lib.glsl */
+/* To be compiled with common_subdiv_lib.glsl */
layout(std430, binding = 0) readonly buffer inputVerts
{
diff --git a/source/blender/draw/intern/shaders/common_subdiv_vbo_edituv_strech_area_comp.glsl b/source/blender/draw/intern/shaders/common_subdiv_vbo_edituv_strech_area_comp.glsl
index e897fb3f3c0..230484048b1 100644
--- a/source/blender/draw/intern/shaders/common_subdiv_vbo_edituv_strech_area_comp.glsl
+++ b/source/blender/draw/intern/shaders/common_subdiv_vbo_edituv_strech_area_comp.glsl
@@ -1,5 +1,5 @@
-/* To be compile with common_subdiv_lib.glsl */
+/* To be compiled with common_subdiv_lib.glsl */
layout(std430, binding = 1) readonly buffer inputCoarseData
{
diff --git a/source/blender/draw/intern/shaders/common_subdiv_vbo_lnor_comp.glsl b/source/blender/draw/intern/shaders/common_subdiv_vbo_lnor_comp.glsl
index 41a8df3cf82..b7e04e240fb 100644
--- a/source/blender/draw/intern/shaders/common_subdiv_vbo_lnor_comp.glsl
+++ b/source/blender/draw/intern/shaders/common_subdiv_vbo_lnor_comp.glsl
@@ -1,5 +1,5 @@
-/* To be compile with common_subdiv_lib.glsl */
+/* To be compiled with common_subdiv_lib.glsl */
layout(std430, binding = 1) readonly buffer inputVertexData
{
@@ -38,13 +38,18 @@ void main()
}
}
else {
- /* Face is flat shaded, compute flat face normal from an inscribed triangle. */
- vec3 verts[3];
- for (int i = 0; i < 3; i++) {
- verts[i] = get_vertex_pos(pos_nor[start_loop_index + i]);
- }
-
- vec3 face_normal = normalize(cross(verts[1] - verts[0], verts[2] - verts[0]));
+ vec3 v0 = get_vertex_pos(pos_nor[start_loop_index + 0]);
+ vec3 v1 = get_vertex_pos(pos_nor[start_loop_index + 1]);
+ vec3 v2 = get_vertex_pos(pos_nor[start_loop_index + 2]);
+ vec3 v3 = get_vertex_pos(pos_nor[start_loop_index + 3]);
+
+ vec3 face_normal = vec3(0.0);
+ add_newell_cross_v3_v3v3(face_normal, v0, v1);
+ add_newell_cross_v3_v3v3(face_normal, v1, v2);
+ add_newell_cross_v3_v3v3(face_normal, v2, v3);
+ add_newell_cross_v3_v3v3(face_normal, v3, v0);
+
+ face_normal = normalize(face_normal);
for (int i = 0; i < 4; i++) {
output_lnor[start_loop_index + i] = face_normal;
}
diff --git a/source/blender/draw/intern/shaders/common_subdiv_vbo_sculpt_data_comp.glsl b/source/blender/draw/intern/shaders/common_subdiv_vbo_sculpt_data_comp.glsl
index 7182ce57ad3..77b599f6252 100644
--- a/source/blender/draw/intern/shaders/common_subdiv_vbo_sculpt_data_comp.glsl
+++ b/source/blender/draw/intern/shaders/common_subdiv_vbo_sculpt_data_comp.glsl
@@ -1,5 +1,5 @@
-/* To be compile with common_subdiv_lib.glsl */
+/* To be compiled with common_subdiv_lib.glsl */
struct SculptData {
uint face_set_color;