Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorClément Foucault <foucault.clem@gmail.com>2022-01-27 16:37:24 +0300
committerClément Foucault <foucault.clem@gmail.com>2022-01-27 16:37:24 +0300
commit6016dbb1a75566e1fb3ca257a1f13b9bba4467fd (patch)
tree30ca1741378f36e7c6af17beb1b12c46fb487d05 /source/blender/blenkernel/intern
parentc453aaa0b2408b50af7d47722114f1903f27a6fc (diff)
parent0379ddac7d68114798c8963821fd821c1b3d7d28 (diff)
Merge branch 'master' into temp-gpu-image-engine
# Conflicts: # source/blender/blenkernel/CMakeLists.txt # source/blender/draw/CMakeLists.txt
Diffstat (limited to 'source/blender/blenkernel/intern')
-rw-r--r--source/blender/blenkernel/intern/DerivedMesh.cc334
-rw-r--r--source/blender/blenkernel/intern/action.c22
-rw-r--r--source/blender/blenkernel/intern/anim_data.c35
-rw-r--r--source/blender/blenkernel/intern/armature.c82
-rw-r--r--source/blender/blenkernel/intern/asset_catalog.cc18
-rw-r--r--source/blender/blenkernel/intern/asset_catalog_test.cc40
-rw-r--r--source/blender/blenkernel/intern/attribute_access.cc2
-rw-r--r--source/blender/blenkernel/intern/attribute_access_intern.hh10
-rw-r--r--source/blender/blenkernel/intern/blendfile_link_append.c56
-rw-r--r--source/blender/blenkernel/intern/bpath.c6
-rw-r--r--source/blender/blenkernel/intern/brush.c23
-rw-r--r--source/blender/blenkernel/intern/bvhutils.cc4
-rw-r--r--source/blender/blenkernel/intern/cachefile.c47
-rw-r--r--source/blender/blenkernel/intern/cdderivedmesh.c4
-rw-r--r--source/blender/blenkernel/intern/cloth.c3
-rw-r--r--source/blender/blenkernel/intern/collection.c94
-rw-r--r--source/blender/blenkernel/intern/constraint.c44
-rw-r--r--source/blender/blenkernel/intern/crazyspace.c85
-rw-r--r--source/blender/blenkernel/intern/cryptomatte.cc4
-rw-r--r--source/blender/blenkernel/intern/curve.cc (renamed from source/blender/blenkernel/intern/curve.c)604
-rw-r--r--source/blender/blenkernel/intern/curve_eval.cc10
-rw-r--r--source/blender/blenkernel/intern/curve_to_mesh_convert.cc15
-rw-r--r--source/blender/blenkernel/intern/curveprofile.cc2
-rw-r--r--source/blender/blenkernel/intern/customdata.cc (renamed from source/blender/blenkernel/intern/customdata.c)1047
-rw-r--r--source/blender/blenkernel/intern/data_transfer.c31
-rw-r--r--source/blender/blenkernel/intern/data_transfer_intern.h51
-rw-r--r--source/blender/blenkernel/intern/displist.cc32
-rw-r--r--source/blender/blenkernel/intern/dynamicpaint.c26
-rw-r--r--source/blender/blenkernel/intern/editmesh.c42
-rw-r--r--source/blender/blenkernel/intern/effect.c4
-rw-r--r--source/blender/blenkernel/intern/fcurve.c4
-rw-r--r--source/blender/blenkernel/intern/fcurve_driver.c7
-rw-r--r--source/blender/blenkernel/intern/fluid.c67
-rw-r--r--source/blender/blenkernel/intern/geometry_component_curve.cc94
-rw-r--r--source/blender/blenkernel/intern/geometry_component_instances.cc64
-rw-r--r--source/blender/blenkernel/intern/geometry_component_mesh.cc411
-rw-r--r--source/blender/blenkernel/intern/geometry_set.cc56
-rw-r--r--source/blender/blenkernel/intern/geometry_set_instances.cc20
-rw-r--r--source/blender/blenkernel/intern/gpencil_geom.cc28
-rw-r--r--source/blender/blenkernel/intern/gpencil_modifier.c8
-rw-r--r--source/blender/blenkernel/intern/hair.cc (renamed from source/blender/blenkernel/intern/hair.c)114
-rw-r--r--source/blender/blenkernel/intern/idprop_create.cc140
-rw-r--r--source/blender/blenkernel/intern/idprop_serialize.cc844
-rw-r--r--source/blender/blenkernel/intern/idprop_serialize_test.cc448
-rw-r--r--source/blender/blenkernel/intern/image.c392
-rw-r--r--source/blender/blenkernel/intern/image_gpu.cc2
-rw-r--r--source/blender/blenkernel/intern/image_save.c53
-rw-r--r--source/blender/blenkernel/intern/key.c13
-rw-r--r--source/blender/blenkernel/intern/lattice.c6
-rw-r--r--source/blender/blenkernel/intern/layer.c68
-rw-r--r--source/blender/blenkernel/intern/lib_id.c54
-rw-r--r--source/blender/blenkernel/intern/lib_id_delete.c42
-rw-r--r--source/blender/blenkernel/intern/lib_id_remapper.cc175
-rw-r--r--source/blender/blenkernel/intern/lib_id_remapper_test.cc83
-rw-r--r--source/blender/blenkernel/intern/lib_override.c331
-rw-r--r--source/blender/blenkernel/intern/lib_query.c2
-rw-r--r--source/blender/blenkernel/intern/lib_remap.c348
-rw-r--r--source/blender/blenkernel/intern/lib_remap_test.cc369
-rw-r--r--source/blender/blenkernel/intern/linestyle.c3
-rw-r--r--source/blender/blenkernel/intern/mask.c3
-rw-r--r--source/blender/blenkernel/intern/material.c5
-rw-r--r--source/blender/blenkernel/intern/mesh.cc147
-rw-r--r--source/blender/blenkernel/intern/mesh_boolean_convert.cc2
-rw-r--r--source/blender/blenkernel/intern/mesh_convert.cc75
-rw-r--r--source/blender/blenkernel/intern/mesh_debug.cc115
-rw-r--r--source/blender/blenkernel/intern/mesh_iterators.c59
-rw-r--r--source/blender/blenkernel/intern/mesh_merge.c16
-rw-r--r--source/blender/blenkernel/intern/mesh_mirror.c16
-rw-r--r--source/blender/blenkernel/intern/mesh_normals.cc243
-rw-r--r--source/blender/blenkernel/intern/mesh_remap.c66
-rw-r--r--source/blender/blenkernel/intern/mesh_remesh_voxel.cc2
-rw-r--r--source/blender/blenkernel/intern/mesh_runtime.c128
-rw-r--r--source/blender/blenkernel/intern/mesh_tangent.c11
-rw-r--r--source/blender/blenkernel/intern/mesh_validate.c19
-rw-r--r--source/blender/blenkernel/intern/mesh_wrapper.c91
-rw-r--r--source/blender/blenkernel/intern/modifier.c18
-rw-r--r--source/blender/blenkernel/intern/movieclip.c19
-rw-r--r--source/blender/blenkernel/intern/multires_reshape.h13
-rw-r--r--source/blender/blenkernel/intern/multires_reshape_smooth.c89
-rw-r--r--source/blender/blenkernel/intern/multires_reshape_util.c6
-rw-r--r--source/blender/blenkernel/intern/multires_reshape_vertcos.c3
-rw-r--r--source/blender/blenkernel/intern/multires_versioning.c2
-rw-r--r--source/blender/blenkernel/intern/node.cc1456
-rw-r--r--source/blender/blenkernel/intern/node_tree_update.cc1670
-rw-r--r--source/blender/blenkernel/intern/object.cc125
-rw-r--r--source/blender/blenkernel/intern/object_dupli.cc86
-rw-r--r--source/blender/blenkernel/intern/object_update.c18
-rw-r--r--source/blender/blenkernel/intern/packedFile.c2
-rw-r--r--source/blender/blenkernel/intern/paint.c1
-rw-r--r--source/blender/blenkernel/intern/particle.c19
-rw-r--r--source/blender/blenkernel/intern/particle_distribute.c3
-rw-r--r--source/blender/blenkernel/intern/pbvh.c24
-rw-r--r--source/blender/blenkernel/intern/pbvh_intern.h3
-rw-r--r--source/blender/blenkernel/intern/pointcache.c57
-rw-r--r--source/blender/blenkernel/intern/pointcloud.cc83
-rw-r--r--source/blender/blenkernel/intern/screen.c26
-rw-r--r--source/blender/blenkernel/intern/shrinkwrap.c48
-rw-r--r--source/blender/blenkernel/intern/simulation.cc2
-rw-r--r--source/blender/blenkernel/intern/softbody.c2
-rw-r--r--source/blender/blenkernel/intern/spline_base.cc45
-rw-r--r--source/blender/blenkernel/intern/spline_bezier.cc55
-rw-r--r--source/blender/blenkernel/intern/spline_nurbs.cc89
-rw-r--r--source/blender/blenkernel/intern/spline_poly.cc8
-rw-r--r--source/blender/blenkernel/intern/subdiv.c12
-rw-r--r--source/blender/blenkernel/intern/subdiv_ccg.c3
-rw-r--r--source/blender/blenkernel/intern/subdiv_converter_mesh.c15
-rw-r--r--source/blender/blenkernel/intern/subdiv_deform.c6
-rw-r--r--source/blender/blenkernel/intern/subdiv_eval.c250
-rw-r--r--source/blender/blenkernel/intern/subdiv_foreach.c3
-rw-r--r--source/blender/blenkernel/intern/subdiv_inline.h6
-rw-r--r--source/blender/blenkernel/intern/subdiv_mesh.c112
-rw-r--r--source/blender/blenkernel/intern/subdiv_modifier.c160
-rw-r--r--source/blender/blenkernel/intern/subsurf_ccg.c64
-rw-r--r--source/blender/blenkernel/intern/texture.c2
-rw-r--r--source/blender/blenkernel/intern/tracking_region_tracker.c7
-rw-r--r--source/blender/blenkernel/intern/tracking_test.cc2
-rw-r--r--source/blender/blenkernel/intern/type_conversions.cc3
-rw-r--r--source/blender/blenkernel/intern/undo_system.c3
-rw-r--r--source/blender/blenkernel/intern/volume.cc62
-rw-r--r--source/blender/blenkernel/intern/volume_render.cc2
-rw-r--r--source/blender/blenkernel/intern/volume_to_mesh.cc11
-rw-r--r--source/blender/blenkernel/intern/writeffmpeg.c2
122 files changed, 8353 insertions, 4365 deletions
diff --git a/source/blender/blenkernel/intern/DerivedMesh.cc b/source/blender/blenkernel/intern/DerivedMesh.cc
index 6c9c5490ca0..d0d19ff199d 100644
--- a/source/blender/blenkernel/intern/DerivedMesh.cc
+++ b/source/blender/blenkernel/intern/DerivedMesh.cc
@@ -38,9 +38,9 @@
#include "BLI_array.h"
#include "BLI_bitmap.h"
#include "BLI_blenlib.h"
-#include "BLI_float2.hh"
#include "BLI_linklist.h"
#include "BLI_math.h"
+#include "BLI_math_vec_types.hh"
#include "BLI_task.h"
#include "BLI_task.hh"
#include "BLI_utildefines.h"
@@ -775,28 +775,6 @@ static void mesh_calc_modifier_final_normals(const Mesh *mesh_input,
/* Compute normals. */
const bool do_loop_normals = ((mesh_input->flag & ME_AUTOSMOOTH) != 0 ||
(final_datamask->lmask & CD_MASK_NORMAL) != 0);
- /* Some modifiers may need this info from their target (other) object,
- * simpler to generate it here as well.
- * Note that they will always be generated when no loop normals are computed,
- * since they are needed by drawing code. */
- const bool do_poly_normals = ((final_datamask->pmask & CD_MASK_NORMAL) != 0);
-
- /* In case we also need poly normals, add the layer and compute them here
- * (BKE_mesh_calc_normals_split() assumes that if that data exists, it is always valid). */
- if (do_poly_normals) {
- if (!CustomData_has_layer(&mesh_final->pdata, CD_NORMAL)) {
- float(*polynors)[3] = (float(*)[3])CustomData_add_layer(
- &mesh_final->pdata, CD_NORMAL, CD_CALLOC, nullptr, mesh_final->totpoly);
- BKE_mesh_calc_normals_poly_and_vertex(mesh_final->mvert,
- mesh_final->totvert,
- mesh_final->mloop,
- mesh_final->totloop,
- mesh_final->mpoly,
- mesh_final->totpoly,
- polynors,
- nullptr);
- }
- }
if (do_loop_normals) {
/* Compute loop normals (NOTE: will compute poly and vert normals as well, if needed!). */
@@ -814,11 +792,7 @@ static void mesh_calc_modifier_final_normals(const Mesh *mesh_input,
* normals and will also have to calculate normals on the fly, try avoid
* this where possible since calculating polygon normals isn't fast,
* note that this isn't a problem for subsurf (only quads) or editmode
- * which deals with drawing differently.
- *
- * Only calc vertex normals if they are flagged as dirty.
- * If using loop normals, poly nors have already been computed.
- */
+ * which deals with drawing differently. */
if (!do_loop_normals) {
BKE_mesh_ensure_normals_for_display(mesh_final);
}
@@ -927,6 +901,7 @@ static void mesh_calc_modifiers(struct Depsgraph *depsgraph,
* constructive modifier is executed, or a deform modifier needs normals
* or certain data layers. */
Mesh *mesh_input = (Mesh *)ob->data;
+ BKE_mesh_assert_normals_dirty_or_calculated(mesh_input);
Mesh *mesh_final = nullptr;
Mesh *mesh_deform = nullptr;
/* This geometry set contains the non-mesh data that might be generated by modifiers. */
@@ -1458,26 +1433,6 @@ static void editbmesh_calc_modifier_final_normals(Mesh *mesh_final,
const bool do_loop_normals = ((mesh_final->flag & ME_AUTOSMOOTH) != 0 ||
(final_datamask->lmask & CD_MASK_NORMAL) != 0);
- /* Some modifiers may need this info from their target (other) object,
- * simpler to generate it here as well. */
- const bool do_poly_normals = ((final_datamask->pmask & CD_MASK_NORMAL) != 0);
-
- /* In case we also need poly normals, add the layer and compute them here
- * (BKE_mesh_calc_normals_split() assumes that if that data exists, it is always valid). */
- if (do_poly_normals) {
- if (!CustomData_has_layer(&mesh_final->pdata, CD_NORMAL)) {
- float(*polynors)[3] = (float(*)[3])CustomData_add_layer(
- &mesh_final->pdata, CD_NORMAL, CD_CALLOC, nullptr, mesh_final->totpoly);
- BKE_mesh_calc_normals_poly_and_vertex(mesh_final->mvert,
- mesh_final->totvert,
- mesh_final->mloop,
- mesh_final->totloop,
- mesh_final->mpoly,
- mesh_final->totpoly,
- polynors,
- nullptr);
- }
- }
if (do_loop_normals) {
/* Compute loop normals */
@@ -1808,31 +1763,12 @@ static void mesh_build_extra_data(struct Depsgraph *depsgraph, Object *ob, Mesh
}
}
-static void mesh_runtime_check_normals_valid(const Mesh *mesh)
-{
- UNUSED_VARS_NDEBUG(mesh);
- BLI_assert(!(mesh->runtime.cd_dirty_vert & CD_MASK_NORMAL));
- BLI_assert(!(mesh->runtime.cd_dirty_loop & CD_MASK_NORMAL));
- BLI_assert(!(mesh->runtime.cd_dirty_poly & CD_MASK_NORMAL));
-}
-
static void mesh_build_data(struct Depsgraph *depsgraph,
Scene *scene,
Object *ob,
const CustomData_MeshMasks *dataMask,
const bool need_mapping)
{
- BLI_assert(ob->type == OB_MESH);
-
- /* Evaluated meshes aren't supposed to be created on original instances. If you do,
- * they aren't cleaned up properly on mode switch, causing crashes, e.g T58150. */
- BLI_assert(ob->id.tag & LIB_TAG_COPIED_ON_WRITE);
-
- BKE_object_free_derived_caches(ob);
- if (DEG_is_active(depsgraph)) {
- BKE_sculpt_update_object_before_eval(ob);
- }
-
#if 0 /* XXX This is already taken care of in mesh_calc_modifiers()... */
if (need_mapping) {
/* Also add the flag so that it is recorded in lastDataMask. */
@@ -1890,7 +1826,6 @@ static void mesh_build_data(struct Depsgraph *depsgraph,
}
}
- mesh_runtime_check_normals_valid(mesh_eval);
mesh_build_extra_data(depsgraph, ob, mesh_eval);
}
@@ -1900,15 +1835,7 @@ static void editbmesh_build_data(struct Depsgraph *depsgraph,
BMEditMesh *em,
CustomData_MeshMasks *dataMask)
{
- BLI_assert(obedit->id.tag & LIB_TAG_COPIED_ON_WRITE);
-
- BKE_object_free_derived_caches(obedit);
- if (DEG_is_active(depsgraph)) {
- BKE_sculpt_update_object_before_eval(obedit);
- }
-
- BKE_editmesh_free_derived_caches(em);
-
+ Mesh *mesh = static_cast<Mesh *>(obedit->data);
Mesh *me_cage;
Mesh *me_final;
GeometrySet *non_mesh_components;
@@ -1916,15 +1843,33 @@ static void editbmesh_build_data(struct Depsgraph *depsgraph,
editbmesh_calc_modifiers(
depsgraph, scene, obedit, em, dataMask, &me_cage, &me_final, &non_mesh_components);
- em->mesh_eval_final = me_final;
- em->mesh_eval_cage = me_cage;
- obedit->runtime.geometry_set_eval = non_mesh_components;
+ /* The modifier stack result is expected to share edit mesh pointer with the input.
+ * This is similar `mesh_calc_finalize()`. */
+ BKE_mesh_free_editmesh(me_final);
+ BKE_mesh_free_editmesh(me_cage);
+ me_final->edit_mesh = me_cage->edit_mesh = em;
+
+ /* Object has edit_mesh but is not in edit mode (object shares mesh datablock with another object
+ * with is in edit mode).
+ * Convert edit mesh to mesh until the draw manager can draw mesh wrapper which is not in the
+ * edit mode. */
+ if (!(obedit->mode & OB_MODE_EDIT)) {
+ BKE_mesh_wrapper_ensure_mdata(me_final);
+ if (me_final != me_cage) {
+ BKE_mesh_wrapper_ensure_mdata(me_cage);
+ }
+ }
+
+ const bool is_mesh_eval_owned = (me_final != mesh->runtime.mesh_eval);
+ BKE_object_eval_assign_data(obedit, &me_final->id, is_mesh_eval_owned);
+
+ obedit->runtime.editmesh_eval_cage = me_cage;
- BKE_object_boundbox_calc_from_mesh(obedit, em->mesh_eval_final);
+ obedit->runtime.geometry_set_eval = non_mesh_components;
- em->lastDataMask = *dataMask;
+ BKE_object_boundbox_calc_from_mesh(obedit, me_final);
- mesh_runtime_check_normals_valid(em->mesh_eval_final);
+ obedit->runtime.last_data_mask = *dataMask;
}
static void object_get_datamask(const Depsgraph *depsgraph,
@@ -1980,9 +1925,25 @@ static void object_get_datamask(const Depsgraph *depsgraph,
void makeDerivedMesh(struct Depsgraph *depsgraph,
Scene *scene,
Object *ob,
- BMEditMesh *em,
const CustomData_MeshMasks *dataMask)
{
+ BLI_assert(ob->type == OB_MESH);
+
+ /* Evaluated meshes aren't supposed to be created on original instances. If you do,
+ * they aren't cleaned up properly on mode switch, causing crashes, e.g T58150. */
+ BLI_assert(ob->id.tag & LIB_TAG_COPIED_ON_WRITE);
+
+ BKE_object_free_derived_caches(ob);
+ if (DEG_is_active(depsgraph)) {
+ BKE_sculpt_update_object_before_eval(ob);
+ }
+
+ /* NOTE: Access the `edit_mesh` after freeing the derived caches, so that `ob->data` is restored
+ * to the pre-evaluated state. This is because the evaluated state is not necessarily sharing the
+ * `edit_mesh` pointer with the input. For example, if the object is first evaluated in the
+ * object mode, and then user in another scene moves object to edit mode. */
+ BMEditMesh *em = ((Mesh *)ob->data)->edit_mesh;
+
bool need_mapping;
CustomData_MeshMasks cddata_masks = *dataMask;
object_get_datamask(depsgraph, ob, &cddata_masks, &need_mapping);
@@ -2021,8 +1982,9 @@ Mesh *mesh_get_eval_final(struct Depsgraph *depsgraph,
!CustomData_MeshMasks_are_matching(&(ob->runtime.last_data_mask), &cddata_masks) ||
(need_mapping && !ob->runtime.last_need_mapping)) {
CustomData_MeshMasks_update(&cddata_masks, &ob->runtime.last_data_mask);
- mesh_build_data(
- depsgraph, scene, ob, &cddata_masks, need_mapping || ob->runtime.last_need_mapping);
+
+ makeDerivedMesh(depsgraph, scene, ob, dataMask);
+
mesh_eval = BKE_object_get_evaluated_mesh(ob);
}
@@ -2037,6 +1999,15 @@ Mesh *mesh_get_eval_deform(struct Depsgraph *depsgraph,
Object *ob,
const CustomData_MeshMasks *dataMask)
{
+ BMEditMesh *em = ((Mesh *)ob->data)->edit_mesh;
+ if (em != nullptr) {
+ /* There is no such a concept as deformed mesh in edit mode.
+ * Explicitly disallow this request so that the evaluated result is not modified with evaluated
+ * result from the wrong mode. */
+ BLI_assert_msg(0, "Request of derformed mesh of object which is in edit mode");
+ return nullptr;
+ }
+
/* This function isn't thread-safe and can't be used during evaluation. */
BLI_assert(DEG_is_evaluating(depsgraph) == false);
@@ -2074,18 +2045,6 @@ Mesh *mesh_create_eval_final(Depsgraph *depsgraph,
return result;
}
-Mesh *mesh_create_eval_final_index_render(Depsgraph *depsgraph,
- Scene *scene,
- Object *ob,
- const CustomData_MeshMasks *dataMask,
- int index)
-{
- Mesh *result;
- mesh_calc_modifiers(
- depsgraph, scene, ob, true, false, dataMask, index, false, false, nullptr, &result, nullptr);
- return result;
-}
-
Mesh *mesh_create_eval_no_deform(Depsgraph *depsgraph,
Scene *scene,
Object *ob,
@@ -2110,33 +2069,6 @@ Mesh *mesh_create_eval_no_deform_render(Depsgraph *depsgraph,
/***/
-Mesh *editbmesh_get_eval_cage_and_final(Depsgraph *depsgraph,
- Scene *scene,
- Object *obedit,
- BMEditMesh *em,
- const CustomData_MeshMasks *dataMask,
- /* return args */
- Mesh **r_final)
-{
- CustomData_MeshMasks cddata_masks = *dataMask;
-
- /* if there's no derived mesh or the last data mask used doesn't include
- * the data we need, rebuild the derived mesh
- */
- object_get_datamask(depsgraph, obedit, &cddata_masks, nullptr);
-
- if (!em->mesh_eval_cage ||
- !CustomData_MeshMasks_are_matching(&(em->lastDataMask), &cddata_masks)) {
- editbmesh_build_data(depsgraph, scene, obedit, em, &cddata_masks);
- }
-
- *r_final = em->mesh_eval_final;
- if (em->mesh_eval_final) {
- BLI_assert(!(em->mesh_eval_final->runtime.cd_dirty_vert & DM_DIRTY_NORMALS));
- }
- return em->mesh_eval_cage;
-}
-
Mesh *editbmesh_get_eval_cage(struct Depsgraph *depsgraph,
Scene *scene,
Object *obedit,
@@ -2150,12 +2082,12 @@ Mesh *editbmesh_get_eval_cage(struct Depsgraph *depsgraph,
*/
object_get_datamask(depsgraph, obedit, &cddata_masks, nullptr);
- if (!em->mesh_eval_cage ||
- !CustomData_MeshMasks_are_matching(&(em->lastDataMask), &cddata_masks)) {
+ if (!obedit->runtime.editmesh_eval_cage ||
+ !CustomData_MeshMasks_are_matching(&(obedit->runtime.last_data_mask), &cddata_masks)) {
editbmesh_build_data(depsgraph, scene, obedit, em, &cddata_masks);
}
- return em->mesh_eval_cage;
+ return obedit->runtime.editmesh_eval_cage;
}
Mesh *editbmesh_get_eval_cage_from_orig(struct Depsgraph *depsgraph,
@@ -2181,8 +2113,7 @@ struct MappedUserData {
static void make_vertexcos__mapFunc(void *userData,
int index,
const float co[3],
- const float UNUSED(no_f[3]),
- const short UNUSED(no_s[3]))
+ const float UNUSED(no[3]))
{
MappedUserData *mappedData = (MappedUserData *)userData;
@@ -2229,6 +2160,7 @@ void DM_calc_loop_tangents(DerivedMesh *dm,
calc_active_tangent,
tangent_names,
tangent_names_len,
+ (const float(*)[3])CustomData_get_layer(&dm->vertData, CD_NORMAL),
(const float(*)[3])CustomData_get_layer(&dm->polyData, CD_NORMAL),
(const float(*)[3])dm->getLoopDataArray(dm, CD_NORMAL),
(const float(*)[3])dm->getVertDataArray(dm, CD_ORCO), /* may be nullptr */
@@ -2311,145 +2243,3 @@ static void mesh_init_origspace(Mesh *mesh)
BKE_mesh_tessface_clear(mesh);
}
-
-/* derivedmesh info printing function,
- * to help track down differences DM output */
-
-#ifndef NDEBUG
-# include "BLI_dynstr.h"
-
-static void dm_debug_info_layers(DynStr *dynstr,
- DerivedMesh *dm,
- CustomData *cd,
- void *(*getElemDataArray)(DerivedMesh *, int))
-{
- int type;
-
- for (type = 0; type < CD_NUMTYPES; type++) {
- if (CustomData_has_layer(cd, type)) {
- /* NOTE: doesn't account for multiple layers. */
- const char *name = CustomData_layertype_name(type);
- const int size = CustomData_sizeof(type);
- const void *pt = getElemDataArray(dm, type);
- const int pt_size = pt ? (int)(MEM_allocN_len(pt) / size) : 0;
- const char *structname;
- int structnum;
- CustomData_file_write_info(type, &structname, &structnum);
- BLI_dynstr_appendf(
- dynstr,
- " dict(name='%s', struct='%s', type=%d, ptr='%p', elem=%d, length=%d),\n",
- name,
- structname,
- type,
- (const void *)pt,
- size,
- pt_size);
- }
- }
-}
-
-char *DM_debug_info(DerivedMesh *dm)
-{
- DynStr *dynstr = BLI_dynstr_new();
- char *ret;
- const char *tstr;
-
- BLI_dynstr_append(dynstr, "{\n");
- BLI_dynstr_appendf(dynstr, " 'ptr': '%p',\n", (void *)dm);
- switch (dm->type) {
- case DM_TYPE_CDDM:
- tstr = "DM_TYPE_CDDM";
- break;
- case DM_TYPE_CCGDM:
- tstr = "DM_TYPE_CCGDM";
- break;
- default:
- tstr = "UNKNOWN";
- break;
- }
- BLI_dynstr_appendf(dynstr, " 'type': '%s',\n", tstr);
- BLI_dynstr_appendf(dynstr, " 'numVertData': %d,\n", dm->numVertData);
- BLI_dynstr_appendf(dynstr, " 'numEdgeData': %d,\n", dm->numEdgeData);
- BLI_dynstr_appendf(dynstr, " 'numTessFaceData': %d,\n", dm->numTessFaceData);
- BLI_dynstr_appendf(dynstr, " 'numPolyData': %d,\n", dm->numPolyData);
- BLI_dynstr_appendf(dynstr, " 'deformedOnly': %d,\n", dm->deformedOnly);
-
- BLI_dynstr_append(dynstr, " 'vertexLayers': (\n");
- dm_debug_info_layers(dynstr, dm, &dm->vertData, dm->getVertDataArray);
- BLI_dynstr_append(dynstr, " ),\n");
-
- BLI_dynstr_append(dynstr, " 'edgeLayers': (\n");
- dm_debug_info_layers(dynstr, dm, &dm->edgeData, dm->getEdgeDataArray);
- BLI_dynstr_append(dynstr, " ),\n");
-
- BLI_dynstr_append(dynstr, " 'loopLayers': (\n");
- dm_debug_info_layers(dynstr, dm, &dm->loopData, dm->getLoopDataArray);
- BLI_dynstr_append(dynstr, " ),\n");
-
- BLI_dynstr_append(dynstr, " 'polyLayers': (\n");
- dm_debug_info_layers(dynstr, dm, &dm->polyData, dm->getPolyDataArray);
- BLI_dynstr_append(dynstr, " ),\n");
-
- BLI_dynstr_append(dynstr, " 'tessFaceLayers': (\n");
- dm_debug_info_layers(dynstr, dm, &dm->faceData, dm->getTessFaceDataArray);
- BLI_dynstr_append(dynstr, " ),\n");
-
- BLI_dynstr_append(dynstr, "}\n");
-
- ret = BLI_dynstr_get_cstring(dynstr);
- BLI_dynstr_free(dynstr);
- return ret;
-}
-
-void DM_debug_print(DerivedMesh *dm)
-{
- char *str = DM_debug_info(dm);
- puts(str);
- fflush(stdout);
- MEM_freeN(str);
-}
-
-bool DM_is_valid(DerivedMesh *dm)
-{
- const bool do_verbose = true;
- const bool do_fixes = false;
-
- bool is_valid = true;
- bool changed = true;
-
- is_valid &= BKE_mesh_validate_all_customdata(
- dm->getVertDataLayout(dm),
- dm->getNumVerts(dm),
- dm->getEdgeDataLayout(dm),
- dm->getNumEdges(dm),
- dm->getLoopDataLayout(dm),
- dm->getNumLoops(dm),
- dm->getPolyDataLayout(dm),
- dm->getNumPolys(dm),
- false, /* setting mask here isn't useful, gives false positives */
- do_verbose,
- do_fixes,
- &changed);
-
- is_valid &= BKE_mesh_validate_arrays(nullptr,
- dm->getVertArray(dm),
- dm->getNumVerts(dm),
- dm->getEdgeArray(dm),
- dm->getNumEdges(dm),
- dm->getTessFaceArray(dm),
- dm->getNumTessFaces(dm),
- dm->getLoopArray(dm),
- dm->getNumLoops(dm),
- dm->getPolyArray(dm),
- dm->getNumPolys(dm),
- (MDeformVert *)dm->getVertDataArray(dm, CD_MDEFORMVERT),
- do_verbose,
- do_fixes,
- &changed);
-
- BLI_assert(changed == false);
-
- return is_valid;
-}
-
-#endif /* NDEBUG */
diff --git a/source/blender/blenkernel/intern/action.c b/source/blender/blenkernel/intern/action.c
index ddba726ba83..fde42304185 100644
--- a/source/blender/blenkernel/intern/action.c
+++ b/source/blender/blenkernel/intern/action.c
@@ -307,7 +307,7 @@ static void action_asset_pre_save(void *asset_ptr, struct AssetMetaData *asset_d
BKE_asset_metadata_idprop_ensure(asset_data, action_type);
}
-AssetTypeInfo AssetType_AC = {
+static AssetTypeInfo AssetType_AC = {
/* pre_save_fn */ action_asset_pre_save,
};
@@ -705,7 +705,12 @@ bool BKE_pose_channels_is_valid(const bPose *pose)
#endif
-bPoseChannel *BKE_pose_channel_active(Object *ob)
+bool BKE_pose_is_layer_visible(const bArmature *arm, const bPoseChannel *pchan)
+{
+ return (pchan->bone->layer & arm->layer);
+}
+
+bPoseChannel *BKE_pose_channel_active(Object *ob, const bool check_arm_layer)
{
bArmature *arm = (ob) ? ob->data : NULL;
bPoseChannel *pchan;
@@ -716,14 +721,21 @@ bPoseChannel *BKE_pose_channel_active(Object *ob)
/* find active */
for (pchan = ob->pose->chanbase.first; pchan; pchan = pchan->next) {
- if ((pchan->bone) && (pchan->bone == arm->act_bone) && (pchan->bone->layer & arm->layer)) {
- return pchan;
+ if ((pchan->bone) && (pchan->bone == arm->act_bone)) {
+ if (!check_arm_layer || BKE_pose_is_layer_visible(arm, pchan)) {
+ return pchan;
+ }
}
}
return NULL;
}
+bPoseChannel *BKE_pose_channel_active_if_layer_visible(struct Object *ob)
+{
+ return BKE_pose_channel_active(ob, true);
+}
+
bPoseChannel *BKE_pose_channel_active_or_first_selected(struct Object *ob)
{
bArmature *arm = (ob) ? ob->data : NULL;
@@ -732,7 +744,7 @@ bPoseChannel *BKE_pose_channel_active_or_first_selected(struct Object *ob)
return NULL;
}
- bPoseChannel *pchan = BKE_pose_channel_active(ob);
+ bPoseChannel *pchan = BKE_pose_channel_active_if_layer_visible(ob);
if (pchan && (pchan->bone->flag & BONE_SELECTED) && PBONE_VISIBLE(arm, pchan->bone)) {
return pchan;
}
diff --git a/source/blender/blenkernel/intern/anim_data.c b/source/blender/blenkernel/intern/anim_data.c
index d93d5c456d8..42b72a7cd66 100644
--- a/source/blender/blenkernel/intern/anim_data.c
+++ b/source/blender/blenkernel/intern/anim_data.c
@@ -676,41 +676,6 @@ void BKE_animdata_transfer_by_basepath(Main *bmain, ID *srcID, ID *dstID, ListBa
}
}
-char *BKE_animdata_driver_path_hack(bContext *C,
- PointerRNA *ptr,
- PropertyRNA *prop,
- char *base_path)
-{
- ID *id = ptr->owner_id;
- ScrArea *area = CTX_wm_area(C);
-
- /* get standard path which may be extended */
- char *basepath = base_path ? base_path : RNA_path_from_ID_to_property(ptr, prop);
- char *path = basepath; /* in case no remapping is needed */
-
- /* Remapping will only be performed in the Properties Editor, as only this
- * restricts the subspace of options to the 'active' data (a manageable state)
- */
- /* TODO: watch out for pinned context? */
- if ((area) && (area->spacetype == SPACE_PROPERTIES)) {
- Object *ob = CTX_data_active_object(C);
-
- if (ob && id) {
- /* TODO: after material textures were removed, this function serves
- * no purpose anymore, but could be used again so was not removed. */
-
- /* fix RNA pointer, as we've now changed the ID root by changing the paths */
- if (basepath != path) {
- /* rebase provided pointer so that it starts from object... */
- RNA_pointer_create(&ob->id, ptr->type, ptr->data, ptr);
- }
- }
- }
-
- /* the path should now have been corrected for use */
- return path;
-}
-
/* Path Validation -------------------------------------------- */
/* Check if a given RNA Path is valid, by tracing it from the given ID,
diff --git a/source/blender/blenkernel/intern/armature.c b/source/blender/blenkernel/intern/armature.c
index 0a91d662c1b..5704ef6e42f 100644
--- a/source/blender/blenkernel/intern/armature.c
+++ b/source/blender/blenkernel/intern/armature.c
@@ -1356,9 +1356,12 @@ static void ease_handle_axis(const float deriv1[3], const float deriv2[3], float
copy_v3_v3(r_axis, deriv1);
- float len1 = len_squared_v3(deriv1), len2 = len_squared_v3(deriv2);
- float ratio = len1 / len2;
-
+ const float len2 = len_squared_v3(deriv2);
+ if (UNLIKELY(len2 == 0.0f)) {
+ return;
+ }
+ const float len1 = len_squared_v3(deriv1);
+ const float ratio = len1 / len2;
if (ratio < gap * gap) {
madd_v3_v3fl(r_axis, deriv2, gap - sqrtf(ratio));
}
@@ -2098,6 +2101,79 @@ void mat3_vec_to_roll(const float mat[3][3], const float vec[3], float *r_roll)
void vec_roll_to_mat3_normalized(const float nor[3], const float roll, float r_mat[3][3])
{
+ /**
+ * Given `v = (v.x, v.y, v.z)` our (normalized) bone vector, we want the rotation matrix M
+ * from the Y axis (so that `M * (0, 1, 0) = v`).
+ * - The rotation axis a lays on XZ plane, and it is orthonormal to v,
+ * hence to the projection of v onto XZ plane.
+ * - `a = (v.z, 0, -v.x)`
+ *
+ * We know a is eigenvector of M (so M * a = a).
+ * Finally, we have w, such that M * w = (0, 1, 0)
+ * (i.e. the vector that will be aligned with Y axis once transformed).
+ * We know w is symmetric to v by the Y axis.
+ * - `w = (-v.x, v.y, -v.z)`
+ *
+ * Solving this, we get (x, y and z being the components of v):
+ * <pre>
+ * ┌ (x^2 * y + z^2) / (x^2 + z^2), x, x * z * (y - 1) / (x^2 + z^2) ┐
+ * M = │ x * (y^2 - 1) / (x^2 + z^2), y, z * (y^2 - 1) / (x^2 + z^2) │
+ * └ x * z * (y - 1) / (x^2 + z^2), z, (x^2 + z^2 * y) / (x^2 + z^2) ┘
+ * </pre>
+ *
+ * This is stable as long as v (the bone) is not too much aligned with +/-Y
+ * (i.e. x and z components are not too close to 0).
+ *
+ * Since v is normalized, we have `x^2 + y^2 + z^2 = 1`,
+ * hence `x^2 + z^2 = 1 - y^2 = (1 - y)(1 + y)`.
+ *
+ * This allows to simplifies M like this:
+ * <pre>
+ * ┌ 1 - x^2 / (1 + y), x, -x * z / (1 + y) ┐
+ * M = │ -x, y, -z │
+ * └ -x * z / (1 + y), z, 1 - z^2 / (1 + y) ┘
+ * </pre>
+ *
+ * Written this way, we see the case v = +Y is no more a singularity.
+ * The only one
+ * remaining is the bone being aligned with -Y.
+ *
+ * Let's handle
+ * the asymptotic behavior when bone vector is reaching the limit of y = -1.
+ * Each of the four corner elements can vary from -1 to 1,
+ * depending on the axis a chosen for doing the rotation.
+ * And the "rotation" here is in fact established by mirroring XZ plane by that given axis,
+ * then inversing the Y-axis.
+ * For sufficiently small x and z, and with y approaching -1,
+ * all elements but the four corner ones of M will degenerate.
+ * So let's now focus on these corner elements.
+ *
+ * We rewrite M so that it only contains its four corner elements,
+ * and combine the `1 / (1 + y)` factor:
+ * <pre>
+ * ┌ 1 + y - x^2, -x * z ┐
+ * M* = 1 / (1 + y) * │ │
+ * └ -x * z, 1 + y - z^2 ┘
+ * </pre>
+ *
+ * When y is close to -1, computing 1 / (1 + y) will cause severe numerical instability,
+ * so we use a different approach based on x and z as inputs.
+ * We know `y^2 = 1 - (x^2 + z^2)`, and `y < 0`, hence `y = -sqrt(1 - (x^2 + z^2))`.
+ *
+ * Since x and z are both close to 0, we apply the binomial expansion to the second order:
+ * `y = -sqrt(1 - (x^2 + z^2)) = -1 + (x^2 + z^2) / 2 + (x^2 + z^2)^2 / 8`, which allows
+ * eliminating the problematic `1` constant.
+ *
+ * A first order expansion allows simplifying to this, but second order is more precise:
+ * <pre>
+ * ┌ z^2 - x^2, -2 * x * z ┐
+ * M* = 1 / (x^2 + z^2) * │ │
+ * └ -2 * x * z, x^2 - z^2 ┘
+ * </pre>
+ *
+ * P.S. In the end, this basically is a heavily optimized version of Damped Track +Y.
+ */
+
const float SAFE_THRESHOLD = 6.1e-3f; /* Theta above this value has good enough precision. */
const float CRITICAL_THRESHOLD = 2.5e-4f; /* True singularity if XZ distance is below this. */
const float THRESHOLD_SQUARED = CRITICAL_THRESHOLD * CRITICAL_THRESHOLD;
diff --git a/source/blender/blenkernel/intern/asset_catalog.cc b/source/blender/blenkernel/intern/asset_catalog.cc
index aec622bb71f..06dd623ff28 100644
--- a/source/blender/blenkernel/intern/asset_catalog.cc
+++ b/source/blender/blenkernel/intern/asset_catalog.cc
@@ -24,7 +24,7 @@
#include "BKE_asset_catalog.hh"
#include "BKE_asset_library.h"
-#include "BLI_fileops.h"
+#include "BLI_fileops.hh"
#include "BLI_path_util.h"
/* For S_ISREG() and S_ISDIR() on Windows. */
@@ -32,6 +32,10 @@
# include "BLI_winstuff.h"
#endif
+#include "CLG_log.h"
+
+static CLG_LogRef LOG = {"bke.asset_service"};
+
namespace blender::bke {
const CatalogFilePath AssetCatalogService::DEFAULT_CATALOG_FILENAME = "blender_assets.cats.txt";
@@ -311,6 +315,7 @@ void AssetCatalogService::load_from_disk(const CatalogFilePath &file_or_director
BLI_stat_t status;
if (BLI_stat(file_or_directory_path.data(), &status) == -1) {
/* TODO(@sybren): throw an appropriate exception. */
+ CLOG_WARN(&LOG, "path not found: %s", file_or_directory_path.data());
return;
}
@@ -337,6 +342,7 @@ void AssetCatalogService::load_directory_recursive(const CatalogFilePath &direct
if (!BLI_exists(file_path.data())) {
/* No file to be loaded is perfectly fine. */
+ CLOG_INFO(&LOG, 2, "path not found: %s", file_path.data());
return;
}
@@ -514,7 +520,7 @@ CatalogFilePath AssetCatalogService::find_suitable_cdf_path_for_writing(
"A non-empty .blend file path is required to be able to determine where the "
"catalog definition file should be put");
- /* Ask the asset library API for an appropriate location. */
+ /* Ask the asset library API for an appropriate location. */
char suitable_root_path[PATH_MAX];
const bool asset_lib_root_found = BKE_asset_library_find_suitable_root_path_from_path(
blend_file_path.c_str(), suitable_root_path);
@@ -824,8 +830,12 @@ void AssetCatalogDefinitionFile::parse_catalog_file(
const CatalogFilePath &catalog_definition_file_path,
AssetCatalogParsedFn catalog_loaded_callback)
{
- std::fstream infile(catalog_definition_file_path);
+ fstream infile(catalog_definition_file_path, std::ios::in);
+ if (!infile.is_open()) {
+ CLOG_ERROR(&LOG, "%s: unable to open file", catalog_definition_file_path.c_str());
+ return;
+ }
bool seen_version_number = false;
std::string line;
while (std::getline(infile, line)) {
@@ -956,7 +966,7 @@ bool AssetCatalogDefinitionFile::write_to_disk_unsafe(const CatalogFilePath &des
return false;
}
- std::ofstream output(dest_file_path);
+ fstream output(dest_file_path, std::ios::out);
/* TODO(@sybren): remember the line ending style that was originally read, then use that to write
* the file again. */
diff --git a/source/blender/blenkernel/intern/asset_catalog_test.cc b/source/blender/blenkernel/intern/asset_catalog_test.cc
index ba8f8716823..8c39bfc9770 100644
--- a/source/blender/blenkernel/intern/asset_catalog_test.cc
+++ b/source/blender/blenkernel/intern/asset_catalog_test.cc
@@ -27,6 +27,8 @@
#include "DNA_asset_types.h"
#include "DNA_userdef_types.h"
+#include "CLG_log.h"
+
#include "testing/testing.h"
namespace blender::bke::tests {
@@ -93,6 +95,18 @@ class AssetCatalogTest : public testing::Test {
CatalogFilePath asset_library_root_;
CatalogFilePath temp_library_path_;
+ static void SetUpTestSuite()
+ {
+ testing::Test::SetUpTestSuite();
+ CLG_init();
+ }
+
+ static void TearDownTestSuite()
+ {
+ CLG_exit();
+ testing::Test::TearDownTestSuite();
+ }
+
void SetUp() override
{
const std::string test_files_dir = blender::tests::flags_test_asset_dir();
@@ -225,7 +239,7 @@ class AssetCatalogTest : public testing::Test {
}
/* Create an empty CDF to add complexity. It should not save to this, but to the top-level
- * one.*/
+ * one. */
ASSERT_TRUE(BLI_file_touch(cdf_in_subdir.c_str()));
ASSERT_EQ(0, BLI_file_size(cdf_in_subdir.c_str()));
@@ -549,6 +563,30 @@ TEST_F(AssetCatalogTest, write_single_file)
/* TODO(@sybren): test ordering of catalogs in the file. */
}
+TEST_F(AssetCatalogTest, read_write_unicode_filepath)
+{
+ TestableAssetCatalogService service(asset_library_root_);
+ const CatalogFilePath load_from_path = asset_library_root_ + "/новый/" +
+ AssetCatalogService::DEFAULT_CATALOG_FILENAME;
+ service.load_from_disk(load_from_path);
+
+ const CatalogFilePath save_to_path = use_temp_path() + "новый.cats.txt";
+ AssetCatalogDefinitionFile *cdf = service.get_catalog_definition_file();
+ ASSERT_NE(nullptr, cdf) << "unable to load " << load_from_path;
+ EXPECT_TRUE(cdf->write_to_disk(save_to_path));
+
+ AssetCatalogService loaded_service(save_to_path);
+ loaded_service.load_from_disk();
+
+ /* Test that the file was loaded correctly. */
+ const bUUID materials_uuid("a2151dff-dead-4f29-b6bc-b2c7d6cccdb4");
+ const AssetCatalog *cat = loaded_service.find_catalog(materials_uuid);
+ ASSERT_NE(nullptr, cat);
+ EXPECT_EQ(materials_uuid, cat->catalog_id);
+ EXPECT_EQ(AssetCatalogPath("Материалы"), cat->path);
+ EXPECT_EQ("Russian Materials", cat->simple_name);
+}
+
TEST_F(AssetCatalogTest, no_writing_empty_files)
{
const CatalogFilePath temp_lib_root = create_temp_path();
diff --git a/source/blender/blenkernel/intern/attribute_access.cc b/source/blender/blenkernel/intern/attribute_access.cc
index 1a4265d936b..cc43a3e26a8 100644
--- a/source/blender/blenkernel/intern/attribute_access.cc
+++ b/source/blender/blenkernel/intern/attribute_access.cc
@@ -30,7 +30,7 @@
#include "DNA_pointcloud_types.h"
#include "BLI_color.hh"
-#include "BLI_float2.hh"
+#include "BLI_math_vec_types.hh"
#include "BLI_span.hh"
#include "BLT_translation.h"
diff --git a/source/blender/blenkernel/intern/attribute_access_intern.hh b/source/blender/blenkernel/intern/attribute_access_intern.hh
index b77d7010efa..2cd128081eb 100644
--- a/source/blender/blenkernel/intern/attribute_access_intern.hh
+++ b/source/blender/blenkernel/intern/attribute_access_intern.hh
@@ -161,7 +161,7 @@ class CustomDataAttributeProvider final : public DynamicAttributesProvider {
bool try_create(GeometryComponent &component,
const AttributeIDRef &attribute_id,
- const AttributeDomain domain,
+ AttributeDomain domain,
const CustomDataType data_type,
const AttributeInit &initializer) const final;
@@ -185,8 +185,8 @@ class CustomDataAttributeProvider final : public DynamicAttributesProvider {
*/
class NamedLegacyCustomDataProvider final : public DynamicAttributesProvider {
private:
- using AsReadAttribute = GVArray (*)(const void *data, const int domain_size);
- using AsWriteAttribute = GVMutableArray (*)(void *data, const int domain_size);
+ using AsReadAttribute = GVArray (*)(const void *data, int domain_size);
+ using AsWriteAttribute = GVMutableArray (*)(void *data, int domain_size);
const AttributeDomain domain_;
const CustomDataType attribute_type_;
const CustomDataType stored_type_;
@@ -229,8 +229,8 @@ class NamedLegacyCustomDataProvider final : public DynamicAttributesProvider {
* if the stored type is the same as the attribute type.
*/
class BuiltinCustomDataLayerProvider final : public BuiltinAttributeProvider {
- using AsReadAttribute = GVArray (*)(const void *data, const int domain_size);
- using AsWriteAttribute = GVMutableArray (*)(void *data, const int domain_size);
+ using AsReadAttribute = GVArray (*)(const void *data, int domain_size);
+ using AsWriteAttribute = GVMutableArray (*)(void *data, int domain_size);
using UpdateOnRead = void (*)(const GeometryComponent &component);
using UpdateOnWrite = void (*)(GeometryComponent &component);
const CustomDataType stored_type_;
diff --git a/source/blender/blenkernel/intern/blendfile_link_append.c b/source/blender/blenkernel/intern/blendfile_link_append.c
index c265a6e2b7d..9b3f4c2fae8 100644
--- a/source/blender/blenkernel/intern/blendfile_link_append.c
+++ b/source/blender/blenkernel/intern/blendfile_link_append.c
@@ -440,6 +440,16 @@ static bool object_in_any_collection(Main *bmain, Object *ob)
return false;
}
+static bool collection_instantiated_by_any_object(Main *bmain, Collection *collection)
+{
+ LISTBASE_FOREACH (Object *, ob, &bmain->objects) {
+ if (ob->type == OB_EMPTY && ob->instance_collection == collection) {
+ return true;
+ }
+ }
+ return false;
+}
+
static ID *loose_data_instantiate_process_check(LooseDataInstantiateContext *instantiate_context,
BlendfileLinkAppendContextItem *item)
{
@@ -633,12 +643,19 @@ static void loose_data_instantiate_collection_process(
* children.
*/
Collection *collection = (Collection *)id;
+ /* The collection could be linked/appended together with an Empty object instantiating it,
+ * better not instantiate the collection in the view-layer in that case.
+ *
+ * Can easily happen when copy/pasting such instantiating empty, see T93839. */
+ const bool collection_is_instantiated = collection_instantiated_by_any_object(bmain,
+ collection);
/* Always consider adding collections directly selected by the user. */
- bool do_add_collection = (item->tag & LINK_APPEND_TAG_INDIRECT) == 0;
+ bool do_add_collection = (item->tag & LINK_APPEND_TAG_INDIRECT) == 0 &&
+ !collection_is_instantiated;
/* In linking case, do not enforce instantiating non-directly linked collections/objects.
- * This avoids cluttering the ViewLayers, user can instantiate themselves specific collections
+ * This avoids cluttering the view-layers, user can instantiate themselves specific collections
* or objects easily from the Outliner if needed. */
- if (!do_add_collection && do_append) {
+ if (!do_add_collection && do_append && !collection_is_instantiated) {
LISTBASE_FOREACH (CollectionObject *, coll_ob, &collection->gobject) {
Object *ob = coll_ob->ob;
if (!object_in_any_scene(bmain, ob)) {
@@ -664,21 +681,25 @@ static void loose_data_instantiate_collection_process(
Collection *collection = (Collection *)id;
bool do_add_collection = (id->tag & LIB_TAG_DOIT) != 0;
+ if (!do_add_collection) {
+ continue;
+ }
/* When instantiated into view-layer, do not add collections if one of their parents is also
- * instantiated. In case of empty-instantiation though, instantiation of all user-selected
- * collections is the desired behavior. */
- if (!do_add_collection ||
- (!do_instantiate_as_empty &&
- loose_data_instantiate_collection_parents_check_recursive(collection))) {
+ * instantiated. */
+ if (!do_instantiate_as_empty &&
+ loose_data_instantiate_collection_parents_check_recursive(collection)) {
+ continue;
+ }
+ /* When instantiated as empty, do not add indirectly linked (i.e. non-user-selected)
+ * collections. */
+ if (do_instantiate_as_empty && (item->tag & LINK_APPEND_TAG_INDIRECT) != 0) {
continue;
}
loose_data_instantiate_ensure_active_collection(instantiate_context);
Collection *active_collection = instantiate_context->active_collection;
- /* In case user requested instantiation of collections as empties, do so for the one they
- * explicitly selected (originally directly linked IDs) only. */
- if (do_instantiate_as_empty && (item->tag & LINK_APPEND_TAG_INDIRECT) == 0) {
+ if (do_instantiate_as_empty) {
/* BKE_object_add(...) messes with the selection. */
Object *ob = BKE_object_add_only_object(bmain, OB_EMPTY, collection->id.name + 2);
ob->type = OB_EMPTY;
@@ -726,6 +747,8 @@ static void loose_data_instantiate_object_process(LooseDataInstantiateContext *i
* if you want it do it at the editor level. */
const bool object_set_active = false;
+ const bool is_linking = (lapp_context->params->flag & FILE_LINK) != 0;
+
/* NOTE: For objects we only view_layer-instantiate duplicated objects that are not yet used
* anywhere. */
LinkNode *itemlink;
@@ -736,6 +759,17 @@ static void loose_data_instantiate_object_process(LooseDataInstantiateContext *i
continue;
}
+ /* In linking case, never instantiate stray objects that are not directly linked.
+ *
+ * While this is not ideal (in theory no object should remain un-owned), in case of indirectly
+ * linked objects, the other solution would be to add them to a local collection, which would
+ * make them directly linked. Think for now keeping them indirectly linked is more important.
+ * Ref. T93757.
+ */
+ if (is_linking && (item->tag & LINK_APPEND_TAG_INDIRECT) != 0) {
+ continue;
+ }
+
Object *ob = (Object *)id;
if (object_in_any_collection(bmain, ob)) {
diff --git a/source/blender/blenkernel/intern/bpath.c b/source/blender/blenkernel/intern/bpath.c
index 85e49774dfd..a1570b4e031 100644
--- a/source/blender/blenkernel/intern/bpath.c
+++ b/source/blender/blenkernel/intern/bpath.c
@@ -236,7 +236,8 @@ void BKE_bpath_missing_files_check(Main *bmain, ReportList *reports)
BKE_bpath_foreach_path_main(&(BPathForeachPathData){
.bmain = bmain,
.callback_function = check_missing_files_foreach_path_cb,
- .flag = BKE_BPATH_FOREACH_PATH_ABSOLUTE | BKE_BPATH_FOREACH_PATH_SKIP_PACKED,
+ .flag = BKE_BPATH_FOREACH_PATH_ABSOLUTE | BKE_BPATH_FOREACH_PATH_SKIP_PACKED |
+ BKE_BPATH_FOREACH_PATH_RESOLVE_TOKEN | BKE_BPATH_TRAVERSE_SKIP_WEAK_REFERENCES,
.user_data = reports});
}
@@ -384,7 +385,8 @@ void BKE_bpath_missing_files_find(Main *bmain,
const bool find_all)
{
struct BPathFind_Data data = {NULL};
- const int flag = BKE_BPATH_FOREACH_PATH_ABSOLUTE | BKE_BPATH_FOREACH_PATH_RELOAD_EDITED;
+ const int flag = BKE_BPATH_FOREACH_PATH_ABSOLUTE | BKE_BPATH_FOREACH_PATH_RELOAD_EDITED |
+ BKE_BPATH_FOREACH_PATH_RESOLVE_TOKEN;
data.basedir = BKE_main_blendfile_path(bmain);
data.reports = reports;
diff --git a/source/blender/blenkernel/intern/brush.c b/source/blender/blenkernel/intern/brush.c
index 153a65d67db..c86d4658cc9 100644
--- a/source/blender/blenkernel/intern/brush.c
+++ b/source/blender/blenkernel/intern/brush.c
@@ -149,16 +149,9 @@ static void brush_make_local(Main *bmain, ID *id, const int flags)
Brush *brush = (Brush *)id;
const bool lib_local = (flags & LIB_ID_MAKELOCAL_FULL_LIBRARY) != 0;
- bool force_local = (flags & LIB_ID_MAKELOCAL_FORCE_LOCAL) != 0;
- bool force_copy = (flags & LIB_ID_MAKELOCAL_FORCE_COPY) != 0;
- BLI_assert(force_copy == false || force_copy != force_local);
- bool is_local = false, is_lib = false;
-
- /* - only lib users: do nothing (unless force_local is set)
- * - only local users: set flag
- * - mixed: make copy
- */
+ bool force_local, force_copy;
+ BKE_lib_id_make_local_generic_action_define(bmain, id, flags, &force_local, &force_copy);
if (brush->clone.image) {
/* Special case: ima always local immediately. Clone image should only have one user anyway. */
@@ -171,18 +164,6 @@ static void brush_make_local(Main *bmain, ID *id, const int flags)
BLI_assert(brush->clone.image->id.lib == NULL && brush->clone.image->id.newid == NULL);
}
- if (!force_local && !force_copy) {
- BKE_library_ID_test_usages(bmain, brush, &is_local, &is_lib);
- if (lib_local || is_local) {
- if (!is_lib) {
- force_local = true;
- }
- else {
- force_copy = true;
- }
- }
- }
-
if (force_local) {
BKE_lib_id_clear_library_data(bmain, &brush->id, flags);
BKE_lib_id_expand_local(bmain, &brush->id, flags);
diff --git a/source/blender/blenkernel/intern/bvhutils.cc b/source/blender/blenkernel/intern/bvhutils.cc
index a68119fbc1d..5e7a4eea0cd 100644
--- a/source/blender/blenkernel/intern/bvhutils.cc
+++ b/source/blender/blenkernel/intern/bvhutils.cc
@@ -127,7 +127,7 @@ bool bvhcache_has_tree(const BVHCache *bvh_cache, const BVHTree *tree)
BVHCache *bvhcache_init()
{
- BVHCache *cache = (BVHCache *)MEM_callocN(sizeof(BVHCache), __func__);
+ BVHCache *cache = MEM_cnew<BVHCache>(__func__);
BLI_mutex_init(&cache->mutex);
return cache;
}
@@ -1601,6 +1601,8 @@ BVHTree *BKE_bvhtree_from_mesh_get(struct BVHTreeFromMesh *data,
memset(data, 0, sizeof(*data));
}
+ data->vert_normals = BKE_mesh_vertex_normals_ensure(mesh);
+
return tree;
}
diff --git a/source/blender/blenkernel/intern/cachefile.c b/source/blender/blenkernel/intern/cachefile.c
index 8833f3eabe9..75df2e98fcd 100644
--- a/source/blender/blenkernel/intern/cachefile.c
+++ b/source/blender/blenkernel/intern/cachefile.c
@@ -54,6 +54,8 @@
#include "BLO_read_write.h"
+#include "MEM_guardedalloc.h"
+
#ifdef WITH_ALEMBIC
# include "ABC_alembic.h"
#endif
@@ -86,6 +88,7 @@ static void cache_file_copy_data(Main *UNUSED(bmain),
cache_file_dst->handle = NULL;
cache_file_dst->handle_readers = NULL;
BLI_duplicatelist(&cache_file_dst->object_paths, &cache_file_src->object_paths);
+ BLI_duplicatelist(&cache_file_dst->layers, &cache_file_src->layers);
}
static void cache_file_free_data(ID *id)
@@ -93,6 +96,7 @@ static void cache_file_free_data(ID *id)
CacheFile *cache_file = (CacheFile *)id;
cachefile_handle_free(cache_file);
BLI_freelistN(&cache_file->object_paths);
+ BLI_freelistN(&cache_file->layers);
}
static void cache_file_foreach_path(ID *id, BPathForeachPathData *bpath_data)
@@ -117,6 +121,11 @@ static void cache_file_blend_write(BlendWriter *writer, ID *id, const void *id_a
if (cache_file->adt) {
BKE_animdata_blend_write(writer, cache_file->adt);
}
+
+ /* write layers */
+ LISTBASE_FOREACH (CacheFileLayer *, layer, &cache_file->layers) {
+ BLO_write_struct(writer, CacheFileLayer, layer);
+ }
}
static void cache_file_blend_read_data(BlendDataReader *reader, ID *id)
@@ -130,6 +139,9 @@ static void cache_file_blend_read_data(BlendDataReader *reader, ID *id)
/* relink animdata */
BLO_read_data_address(reader, &cache_file->adt);
BKE_animdata_blend_read_data(reader, cache_file->adt);
+
+ /* relink layers */
+ BLO_read_list(reader, &cache_file->layers);
}
IDTypeInfo IDType_ID_CF = {
@@ -364,7 +376,8 @@ void BKE_cachefile_eval(Main *bmain, Depsgraph *depsgraph, CacheFile *cache_file
#ifdef WITH_ALEMBIC
if (BLI_path_extension_check_glob(filepath, "*abc")) {
cache_file->type = CACHEFILE_TYPE_ALEMBIC;
- cache_file->handle = ABC_create_handle(bmain, filepath, &cache_file->object_paths);
+ cache_file->handle = ABC_create_handle(
+ bmain, filepath, cache_file->layers.first, &cache_file->object_paths);
BLI_strncpy(cache_file->handle_filepath, filepath, FILE_MAX);
}
#endif
@@ -435,3 +448,35 @@ bool BKE_cache_file_uses_render_procedural(const CacheFile *cache_file,
const bool is_final_render = (eEvaluationMode)dag_eval_mode == DAG_EVAL_RENDER;
return cache_file->use_render_procedural && !is_final_render;
}
+
+CacheFileLayer *BKE_cachefile_add_layer(CacheFile *cache_file, const char filename[1024])
+{
+ for (CacheFileLayer *layer = cache_file->layers.first; layer; layer = layer->next) {
+ if (STREQ(layer->filepath, filename)) {
+ return NULL;
+ }
+ }
+
+ const int num_layers = BLI_listbase_count(&cache_file->layers);
+
+ CacheFileLayer *layer = MEM_callocN(sizeof(CacheFileLayer), "CacheFileLayer");
+ BLI_strncpy(layer->filepath, filename, sizeof(layer->filepath));
+
+ BLI_addtail(&cache_file->layers, layer);
+
+ cache_file->active_layer = (char)(num_layers + 1);
+
+ return layer;
+}
+
+CacheFileLayer *BKE_cachefile_get_active_layer(CacheFile *cache_file)
+{
+ return BLI_findlink(&cache_file->layers, cache_file->active_layer - 1);
+}
+
+void BKE_cachefile_remove_layer(CacheFile *cache_file, CacheFileLayer *layer)
+{
+ cache_file->active_layer = 0;
+ BLI_remlink(&cache_file->layers, layer);
+ MEM_freeN(layer);
+}
diff --git a/source/blender/blenkernel/intern/cdderivedmesh.c b/source/blender/blenkernel/intern/cdderivedmesh.c
index c93d320787a..a4f3e84a2bf 100644
--- a/source/blender/blenkernel/intern/cdderivedmesh.c
+++ b/source/blender/blenkernel/intern/cdderivedmesh.c
@@ -56,6 +56,7 @@ typedef struct {
/* these point to data in the DerivedMesh custom data layers,
* they are only here for efficiency and convenience */
MVert *mvert;
+ const float (*vert_normals)[3];
MEdge *medge;
MFace *mface;
MLoop *mloop;
@@ -143,7 +144,7 @@ static void cdDM_getVertCo(DerivedMesh *dm, int index, float r_co[3])
static void cdDM_getVertNo(DerivedMesh *dm, int index, float r_no[3])
{
CDDerivedMesh *cddm = (CDDerivedMesh *)dm;
- normal_short_to_float_v3(r_no, cddm->mvert[index].no);
+ copy_v3_v3(r_no, cddm->vert_normals[index]);
}
static const MeshElemMap *cdDM_getPolyMap(Object *ob, DerivedMesh *dm)
@@ -281,6 +282,7 @@ static DerivedMesh *cdDM_from_mesh_ex(Mesh *mesh,
CustomData_merge(&mesh->pdata, &dm->polyData, cddata_masks.pmask, alloctype, mesh->totpoly);
cddm->mvert = CustomData_get_layer(&dm->vertData, CD_MVERT);
+ cddm->vert_normals = CustomData_get_layer(&dm->vertData, CD_NORMAL);
cddm->medge = CustomData_get_layer(&dm->edgeData, CD_MEDGE);
cddm->mloop = CustomData_get_layer(&dm->loopData, CD_MLOOP);
cddm->mpoly = CustomData_get_layer(&dm->polyData, CD_MPOLY);
diff --git a/source/blender/blenkernel/intern/cloth.c b/source/blender/blenkernel/intern/cloth.c
index 42633ff3809..43b8690e219 100644
--- a/source/blender/blenkernel/intern/cloth.c
+++ b/source/blender/blenkernel/intern/cloth.c
@@ -1400,8 +1400,7 @@ static bool find_internal_spring_target_vertex(BVHTreeFromMesh *treedata,
float radius;
copy_v3_v3(co, treedata->vert[v_idx].co);
- normal_short_to_float_v3(no, treedata->vert[v_idx].no);
- negate_v3(no);
+ negate_v3_v3(no, treedata->vert_normals[v_idx]);
float vec_len = sin(max_diversion);
float offset[3];
diff --git a/source/blender/blenkernel/intern/collection.c b/source/blender/blenkernel/intern/collection.c
index 21a9159004f..e6ce4eb9440 100644
--- a/source/blender/blenkernel/intern/collection.c
+++ b/source/blender/blenkernel/intern/collection.c
@@ -186,7 +186,7 @@ static ID *collection_owner_get(Main *bmain, ID *id)
Collection *master_collection = (Collection *)id;
BLI_assert((master_collection->flag & COLLECTION_IS_MASTER) != 0);
- for (Scene *scene = bmain->scenes.first; scene != NULL; scene = scene->id.next) {
+ LISTBASE_FOREACH (Scene *, scene, &bmain->scenes) {
if (scene->master_collection == master_collection) {
return &scene->id;
}
@@ -1205,9 +1205,7 @@ static void collection_object_remove_nulls(Collection *collection)
{
bool changed = false;
- for (CollectionObject *cob = collection->gobject.first, *cob_next = NULL; cob; cob = cob_next) {
- cob_next = cob->next;
-
+ LISTBASE_FOREACH_MUTABLE (CollectionObject *, cob, &collection->gobject) {
if (cob->ob == NULL) {
BLI_freelinkN(&collection->gobject, cob);
changed = true;
@@ -1221,22 +1219,61 @@ static void collection_object_remove_nulls(Collection *collection)
void BKE_collections_object_remove_nulls(Main *bmain)
{
- for (Scene *scene = bmain->scenes.first; scene; scene = scene->id.next) {
+ LISTBASE_FOREACH (Scene *, scene, &bmain->scenes) {
collection_object_remove_nulls(scene->master_collection);
}
- for (Collection *collection = bmain->collections.first; collection;
- collection = collection->id.next) {
+ LISTBASE_FOREACH (Collection *, collection, &bmain->collections) {
collection_object_remove_nulls(collection);
}
}
-static void collection_null_children_remove(Collection *collection)
+/*
+ * Remove all duplicate objects from collections.
+ * This is used for library remapping, happens when remapping an object to another one already
+ * present in the collection. Otherwise this should never happen.
+ */
+static void collection_object_remove_duplicates(Collection *collection)
{
- for (CollectionChild *child = collection->children.first, *child_next = NULL; child;
- child = child_next) {
- child_next = child->next;
+ bool changed = false;
+
+ LISTBASE_FOREACH_MUTABLE (CollectionObject *, cob, &collection->gobject) {
+ if (cob->ob->runtime.collection_management) {
+ BLI_freelinkN(&collection->gobject, cob);
+ changed = true;
+ continue;
+ }
+ cob->ob->runtime.collection_management = true;
+ }
+ /* Cleanup. */
+ LISTBASE_FOREACH (CollectionObject *, cob, &collection->gobject) {
+ cob->ob->runtime.collection_management = false;
+ }
+
+ if (changed) {
+ BKE_collection_object_cache_free(collection);
+ }
+}
+
+void BKE_collections_object_remove_duplicates(struct Main *bmain)
+{
+ LISTBASE_FOREACH (Object *, ob, &bmain->objects) {
+ ob->runtime.collection_management = false;
+ }
+
+ LISTBASE_FOREACH (Scene *, scene, &bmain->scenes) {
+ collection_object_remove_duplicates(scene->master_collection);
+ }
+
+ LISTBASE_FOREACH (Collection *, collection, &bmain->collections) {
+ collection_object_remove_duplicates(collection);
+ }
+}
+
+static void collection_null_children_remove(Collection *collection)
+{
+ LISTBASE_FOREACH_MUTABLE (CollectionChild *, child, &collection->children) {
if (child->collection == NULL) {
BLI_freelinkN(&collection->children, child);
}
@@ -1245,9 +1282,7 @@ static void collection_null_children_remove(Collection *collection)
static void collection_missing_parents_remove(Collection *collection)
{
- for (CollectionParent *parent = collection->parents.first, *parent_next; parent != NULL;
- parent = parent_next) {
- parent_next = parent->next;
+ LISTBASE_FOREACH_MUTABLE (CollectionParent *, parent, &collection->parents) {
if ((parent->collection == NULL) || !collection_find_child(parent->collection, collection)) {
BLI_freelinkN(&collection->parents, parent);
}
@@ -1267,28 +1302,23 @@ void BKE_collections_child_remove_nulls(Main *bmain,
* otherwise we can miss some cases...
* Also, master collections are not in bmain, so we also need to loop over scenes.
*/
- for (child_collection = bmain->collections.first; child_collection != NULL;
- child_collection = child_collection->id.next) {
- collection_null_children_remove(child_collection);
+ LISTBASE_FOREACH (Collection *, collection, &bmain->collections) {
+ collection_null_children_remove(collection);
}
- for (Scene *scene = bmain->scenes.first; scene != NULL; scene = scene->id.next) {
+ LISTBASE_FOREACH (Scene *, scene, &bmain->scenes) {
collection_null_children_remove(scene->master_collection);
}
}
- for (child_collection = bmain->collections.first; child_collection != NULL;
- child_collection = child_collection->id.next) {
- collection_missing_parents_remove(child_collection);
+ LISTBASE_FOREACH (Collection *, collection, &bmain->collections) {
+ collection_missing_parents_remove(collection);
}
- for (Scene *scene = bmain->scenes.first; scene != NULL; scene = scene->id.next) {
+ LISTBASE_FOREACH (Scene *, scene, &bmain->scenes) {
collection_missing_parents_remove(scene->master_collection);
}
}
else {
- for (CollectionParent *parent = child_collection->parents.first, *parent_next; parent;
- parent = parent_next) {
- parent_next = parent->next;
-
+ LISTBASE_FOREACH_MUTABLE (CollectionParent *, parent, &child_collection->parents) {
collection_null_children_remove(parent->collection);
if (!collection_find_child(parent->collection, child_collection)) {
@@ -1586,9 +1616,9 @@ static void collection_parents_rebuild_recursive(Collection *collection)
BKE_collection_parent_relations_rebuild(collection);
collection->tag &= ~COLLECTION_TAG_RELATION_REBUILD;
- for (CollectionChild *child = collection->children.first; child != NULL; child = child->next) {
+ LISTBASE_FOREACH (CollectionChild *, child, &collection->children) {
/* See comment above in `BKE_collection_parent_relations_rebuild`. */
- if ((collection->id.tag & (LIB_TAG_NO_MAIN | LIB_TAG_COPIED_ON_WRITE)) != 0) {
+ if ((child->collection->id.tag & (LIB_TAG_NO_MAIN | LIB_TAG_COPIED_ON_WRITE)) != 0) {
continue;
}
collection_parents_rebuild_recursive(child->collection);
@@ -1598,8 +1628,7 @@ static void collection_parents_rebuild_recursive(Collection *collection)
void BKE_main_collections_parent_relations_rebuild(Main *bmain)
{
/* Only collections not in bmain (master ones in scenes) have no parent... */
- for (Collection *collection = bmain->collections.first; collection != NULL;
- collection = collection->id.next) {
+ LISTBASE_FOREACH (Collection *, collection, &bmain->collections) {
BLI_freelistN(&collection->parents);
collection->tag |= COLLECTION_TAG_RELATION_REBUILD;
@@ -1607,7 +1636,7 @@ void BKE_main_collections_parent_relations_rebuild(Main *bmain)
/* Scene's master collections will be 'root' parent of most of our collections, so start with
* them. */
- for (Scene *scene = bmain->scenes.first; scene != NULL; scene = scene->id.next) {
+ LISTBASE_FOREACH (Scene *, scene, &bmain->scenes) {
/* This function can be called from readfile.c, when this pointer is not guaranteed to be NULL.
*/
if (scene->master_collection != NULL) {
@@ -1619,8 +1648,7 @@ void BKE_main_collections_parent_relations_rebuild(Main *bmain)
/* We may have parent chains outside of scene's master_collection context? At least, readfile's
* lib_link_collection_data() seems to assume that, so do the same here. */
- for (Collection *collection = bmain->collections.first; collection != NULL;
- collection = collection->id.next) {
+ LISTBASE_FOREACH (Collection *, collection, &bmain->collections) {
if (collection->tag & COLLECTION_TAG_RELATION_REBUILD) {
/* NOTE: we do not have easy access to 'which collections is root' info in that case, which
* means test for cycles in collection relationships may fail here. I don't think that is an
diff --git a/source/blender/blenkernel/intern/constraint.c b/source/blender/blenkernel/intern/constraint.c
index d284c32b1df..f013ef99dde 100644
--- a/source/blender/blenkernel/intern/constraint.c
+++ b/source/blender/blenkernel/intern/constraint.c
@@ -34,6 +34,7 @@
#include "BLI_blenlib.h"
#include "BLI_kdopbvh.h"
+#include "BLI_listbase.h"
#include "BLI_math.h"
#include "BLI_string_utils.h"
#include "BLI_utildefines.h"
@@ -71,6 +72,7 @@
#include "BKE_global.h"
#include "BKE_idprop.h"
#include "BKE_lib_id.h"
+#include "BKE_mesh.h"
#include "BKE_mesh_runtime.h"
#include "BKE_movieclip.h"
#include "BKE_object.h"
@@ -543,6 +545,7 @@ static void contarget_get_mesh_mat(Object *ob, const char *substring, float mat[
float vec[3] = {0.0f, 0.0f, 0.0f};
float normal[3] = {0.0f, 0.0f, 0.0f};
float weightsum = 0.0f;
+ const float(*vert_normals)[3] = BKE_mesh_vertex_normals_ensure(me_eval);
if (me_eval) {
const MDeformVert *dvert = CustomData_get_layer(&me_eval->vdata, CD_MDEFORMVERT);
int numVerts = me_eval->totvert;
@@ -557,10 +560,8 @@ static void contarget_get_mesh_mat(Object *ob, const char *substring, float mat[
const MDeformWeight *dw = BKE_defvert_find_index(dv, defgroup);
if (dw && dw->weight > 0.0f) {
- float nor[3];
- normal_short_to_float_v3(nor, mv->no);
madd_v3_v3fl(vec, mv->co, dw->weight);
- madd_v3_v3fl(normal, nor, dw->weight);
+ madd_v3_v3fl(normal, vert_normals[i], dw->weight);
weightsum += dw->weight;
}
}
@@ -3755,7 +3756,7 @@ static void minmax_evaluate(bConstraint *con, bConstraintOb *cob, ListBase *targ
copy_m4_m4(tarmat, ct->matrix);
if (data->flag & MINMAX_USEROT) {
- /* take rotation of target into account by doing the transaction in target's localspace */
+ /* Take rotation of target into account by doing the transaction in target's local-space. */
invert_m4_m4(imat, tarmat);
mul_m4_m4m4(tmat, imat, obmat);
copy_m4_m4(obmat, tmat);
@@ -3800,7 +3801,7 @@ static void minmax_evaluate(bConstraint *con, bConstraintOb *cob, ListBase *targ
if (val1 > val2) {
obmat[3][index] = tarmat[3][index] + data->offset;
if (data->flag & MINMAX_USEROT) {
- /* get out of localspace */
+ /* Get out of local-space. */
mul_m4_m4m4(tmat, ct->matrix, obmat);
copy_m4_m4(cob->matrix, tmat);
}
@@ -5676,13 +5677,19 @@ bool BKE_constraint_apply_for_object(Depsgraph *depsgraph,
const float ctime = BKE_scene_frame_get(scene);
- bConstraint *new_con = BKE_constraint_duplicate_ex(con, 0, !ID_IS_LINKED(ob));
+ /* Do this all in the evaluated domain (e.g. shrinkwrap needs to access evaluated constraint
+ * target mesh). */
+ Scene *scene_eval = DEG_get_evaluated_scene(depsgraph);
+ Object *ob_eval = DEG_get_evaluated_object(depsgraph, ob);
+ bConstraint *con_eval = BKE_constraints_find_name(&ob_eval->constraints, con->name);
+
+ bConstraint *new_con = BKE_constraint_duplicate_ex(con_eval, 0, !ID_IS_LINKED(ob));
ListBase single_con = {new_con, new_con};
bConstraintOb *cob = BKE_constraints_make_evalob(
- depsgraph, scene, ob, NULL, CONSTRAINT_OBTYPE_OBJECT);
+ depsgraph, scene_eval, ob_eval, NULL, CONSTRAINT_OBTYPE_OBJECT);
/* Undo the effect of the current constraint stack evaluation. */
- mul_m4_m4m4(cob->matrix, ob->constinv, cob->matrix);
+ mul_m4_m4m4(cob->matrix, ob_eval->constinv, cob->matrix);
/* Evaluate single constraint. */
BKE_constraints_solve(depsgraph, &single_con, cob, ctime);
@@ -5695,7 +5702,7 @@ bool BKE_constraint_apply_for_object(Depsgraph *depsgraph,
BLI_freelinkN(&single_con, new_con);
/* Apply transform from matrix. */
- BKE_object_apply_mat4(ob, ob->obmat, true, true);
+ BKE_object_apply_mat4(ob, ob_eval->obmat, true, true);
return true;
}
@@ -5722,18 +5729,25 @@ bool BKE_constraint_apply_for_pose(
const float ctime = BKE_scene_frame_get(scene);
- bConstraint *new_con = BKE_constraint_duplicate_ex(con, 0, !ID_IS_LINKED(ob));
+ /* Do this all in the evaluated domain (e.g. shrinkwrap needs to access evaluated constraint
+ * target mesh). */
+ Scene *scene_eval = DEG_get_evaluated_scene(depsgraph);
+ Object *ob_eval = DEG_get_evaluated_object(depsgraph, ob);
+ bPoseChannel *pchan_eval = BKE_pose_channel_find_name(ob_eval->pose, pchan->name);
+ bConstraint *con_eval = BKE_constraints_find_name(&pchan_eval->constraints, con->name);
+
+ bConstraint *new_con = BKE_constraint_duplicate_ex(con_eval, 0, !ID_IS_LINKED(ob));
ListBase single_con;
single_con.first = new_con;
single_con.last = new_con;
float vec[3];
- copy_v3_v3(vec, pchan->pose_mat[3]);
+ copy_v3_v3(vec, pchan_eval->pose_mat[3]);
bConstraintOb *cob = BKE_constraints_make_evalob(
- depsgraph, scene, ob, pchan, CONSTRAINT_OBTYPE_BONE);
+ depsgraph, scene_eval, ob_eval, pchan_eval, CONSTRAINT_OBTYPE_BONE);
/* Undo the effects of currently applied constraints. */
- mul_m4_m4m4(cob->matrix, pchan->constinv, cob->matrix);
+ mul_m4_m4m4(cob->matrix, pchan_eval->constinv, cob->matrix);
/* Evaluate single constraint. */
BKE_constraints_solve(depsgraph, &single_con, cob, ctime);
BKE_constraints_clear_evalob(cob);
@@ -5744,12 +5758,12 @@ bool BKE_constraint_apply_for_pose(
/* Prevent constraints breaking a chain. */
if (pchan->bone->flag & BONE_CONNECTED) {
- copy_v3_v3(pchan->pose_mat[3], vec);
+ copy_v3_v3(pchan_eval->pose_mat[3], vec);
}
/* Apply transform from matrix. */
float mat[4][4];
- BKE_armature_mat_pose_to_bone(pchan, pchan->pose_mat, mat);
+ BKE_armature_mat_pose_to_bone(pchan, pchan_eval->pose_mat, mat);
BKE_pchan_apply_mat4(pchan, mat, true);
return true;
diff --git a/source/blender/blenkernel/intern/crazyspace.c b/source/blender/blenkernel/intern/crazyspace.c
index 6bbb9957b03..573595b6f90 100644
--- a/source/blender/blenkernel/intern/crazyspace.c
+++ b/source/blender/blenkernel/intern/crazyspace.c
@@ -41,6 +41,7 @@
#include "BKE_mesh_wrapper.h"
#include "BKE_modifier.h"
#include "BKE_multires.h"
+#include "BKE_report.h"
#include "DEG_depsgraph_query.h"
@@ -109,7 +110,7 @@ float (*BKE_crazyspace_get_mapped_editverts(struct Depsgraph *depsgraph, Object
/* disable subsurf temporal, get mapped cos, and enable it */
if (modifiers_disable_subsurf_temporary(scene_eval, obedit_eval)) {
/* need to make new derivemesh */
- makeDerivedMesh(depsgraph, scene_eval, obedit_eval, editmesh_eval, &CD_MASK_BAREMESH);
+ makeDerivedMesh(depsgraph, scene_eval, obedit_eval, &CD_MASK_BAREMESH);
}
/* now get the cage */
@@ -516,3 +517,85 @@ void BKE_crazyspace_build_sculpt(struct Depsgraph *depsgraph,
}
}
}
+
+/* -------------------------------------------------------------------- */
+/** \name Crazyspace API
+ * \{ */
+
+void BKE_crazyspace_api_eval(Depsgraph *depsgraph,
+ Scene *scene,
+ Object *object,
+ struct ReportList *reports)
+{
+ if (object->runtime.crazyspace_deform_imats != NULL ||
+ object->runtime.crazyspace_deform_cos != NULL) {
+ return;
+ }
+
+ if (object->type != OB_MESH) {
+ BKE_report(reports,
+ RPT_ERROR,
+ "Crazyspace transformation is only available for Mesh type of objects");
+ return;
+ }
+
+ const Mesh *mesh = (const Mesh *)object->data;
+ object->runtime.crazyspace_num_verts = mesh->totvert;
+ BKE_crazyspace_build_sculpt(depsgraph,
+ scene,
+ object,
+ &object->runtime.crazyspace_deform_imats,
+ &object->runtime.crazyspace_deform_cos);
+}
+
+void BKE_crazyspace_api_displacement_to_deformed(struct Object *object,
+ struct ReportList *reports,
+ int vertex_index,
+ float displacement[3],
+ float r_displacement_deformed[3])
+{
+ if (vertex_index < 0 || vertex_index >= object->runtime.crazyspace_num_verts) {
+ BKE_reportf(reports,
+ RPT_ERROR,
+ "Invalid vertex index %d (expected to be within 0 to %d range)",
+ vertex_index,
+ object->runtime.crazyspace_num_verts);
+ return;
+ }
+
+ mul_v3_m3v3(r_displacement_deformed,
+ object->runtime.crazyspace_deform_imats[vertex_index],
+ displacement);
+}
+
+void BKE_crazyspace_api_displacement_to_original(struct Object *object,
+ struct ReportList *reports,
+ int vertex_index,
+ float displacement_deformed[3],
+ float r_displacement[3])
+{
+ if (vertex_index < 0 || vertex_index >= object->runtime.crazyspace_num_verts) {
+ BKE_reportf(reports,
+ RPT_ERROR,
+ "Invalid vertex index %d (expected to be within 0 to %d range))",
+ vertex_index,
+ object->runtime.crazyspace_num_verts);
+ return;
+ }
+
+ float mat[3][3];
+ if (!invert_m3_m3(mat, object->runtime.crazyspace_deform_imats[vertex_index])) {
+ copy_v3_v3(r_displacement, displacement_deformed);
+ return;
+ }
+
+ mul_v3_m3v3(r_displacement, mat, displacement_deformed);
+}
+
+void BKE_crazyspace_api_eval_clear(Object *object)
+{
+ MEM_SAFE_FREE(object->runtime.crazyspace_deform_imats);
+ MEM_SAFE_FREE(object->runtime.crazyspace_deform_cos);
+}
+
+/** \} */
diff --git a/source/blender/blenkernel/intern/cryptomatte.cc b/source/blender/blenkernel/intern/cryptomatte.cc
index d532ed9e4b2..7481d4df351 100644
--- a/source/blender/blenkernel/intern/cryptomatte.cc
+++ b/source/blender/blenkernel/intern/cryptomatte.cc
@@ -278,13 +278,13 @@ void BKE_cryptomatte_matte_id_to_entries(NodeCryptomatte *node_storage, const ch
token = token.substr(first, (last - first + 1));
if (*token.begin() == '<' && *(--token.end()) == '>') {
float encoded_hash = atof(token.substr(1, token.length() - 2).c_str());
- entry = (CryptomatteEntry *)MEM_callocN(sizeof(CryptomatteEntry), __func__);
+ entry = MEM_cnew<CryptomatteEntry>(__func__);
entry->encoded_hash = encoded_hash;
}
else {
const char *name = token.c_str();
int name_len = token.length();
- entry = (CryptomatteEntry *)MEM_callocN(sizeof(CryptomatteEntry), __func__);
+ entry = MEM_cnew<CryptomatteEntry>(__func__);
STRNCPY(entry->name, name);
uint32_t hash = BKE_cryptomatte_hash(name, name_len);
entry->encoded_hash = BKE_cryptomatte_hash_to_float(hash);
diff --git a/source/blender/blenkernel/intern/curve.c b/source/blender/blenkernel/intern/curve.cc
index 2f1b01316a1..70edaccb244 100644
--- a/source/blender/blenkernel/intern/curve.c
+++ b/source/blender/blenkernel/intern/curve.cc
@@ -21,15 +21,16 @@
* \ingroup bke
*/
-#include <math.h> /* floor */
-#include <stdlib.h>
-#include <string.h>
+#include <cmath> /* floor */
+#include <cstdlib>
+#include <cstring>
#include "MEM_guardedalloc.h"
#include "BLI_blenlib.h"
#include "BLI_endian_switch.h"
#include "BLI_ghash.h"
+#include "BLI_index_range.hh"
#include "BLI_math.h"
#include "BLI_utildefines.h"
@@ -43,7 +44,7 @@
#include "DNA_defaults.h"
#include "DNA_material_types.h"
-/* for dereferencing pointers */
+/* For dereferencing pointers. */
#include "DNA_key_types.h"
#include "DNA_object_types.h"
#include "DNA_vfont_types.h"
@@ -67,10 +68,12 @@
#include "BLO_read_write.h"
+using blender::IndexRange;
+
/* globals */
/* local */
-static CLG_LogRef LOG = {"bke.curve"};
+// static CLG_LogRef LOG = {"bke.curve"};
static void curve_init_data(ID *id)
{
@@ -89,12 +92,12 @@ static void curve_copy_data(Main *bmain, ID *id_dst, const ID *id_src, const int
BLI_listbase_clear(&curve_dst->nurb);
BKE_nurbList_duplicate(&(curve_dst->nurb), &(curve_src->nurb));
- curve_dst->mat = MEM_dupallocN(curve_src->mat);
+ curve_dst->mat = (Material **)MEM_dupallocN(curve_src->mat);
- curve_dst->str = MEM_dupallocN(curve_src->str);
- curve_dst->strinfo = MEM_dupallocN(curve_src->strinfo);
- curve_dst->tb = MEM_dupallocN(curve_src->tb);
- curve_dst->batch_cache = NULL;
+ curve_dst->str = (char *)MEM_dupallocN(curve_src->str);
+ curve_dst->strinfo = (CharInfo *)MEM_dupallocN(curve_src->strinfo);
+ curve_dst->tb = (TextBox *)MEM_dupallocN(curve_src->tb);
+ curve_dst->batch_cache = nullptr;
curve_dst->bevel_profile = BKE_curveprofile_copy(curve_src->bevel_profile);
@@ -104,8 +107,8 @@ static void curve_copy_data(Main *bmain, ID *id_dst, const ID *id_src, const int
curve_dst->key->from = &curve_dst->id;
}
- curve_dst->editnurb = NULL;
- curve_dst->editfont = NULL;
+ curve_dst->editnurb = nullptr;
+ curve_dst->editfont = nullptr;
}
static void curve_free_data(ID *id)
@@ -148,9 +151,9 @@ static void curve_blend_write(BlendWriter *writer, ID *id, const void *id_addres
Curve *cu = (Curve *)id;
/* Clean up, important in undo case to reduce false detection of changed datablocks. */
- cu->editnurb = NULL;
- cu->editfont = NULL;
- cu->batch_cache = NULL;
+ cu->editnurb = nullptr;
+ cu->editfont = nullptr;
+ cu->batch_cache = nullptr;
/* write LibData */
BLO_write_id_struct(writer, Curve, id_address, &cu->id);
@@ -188,7 +191,7 @@ static void curve_blend_write(BlendWriter *writer, ID *id, const void *id_addres
}
}
- if (cu->bevel_profile != NULL) {
+ if (cu->bevel_profile != nullptr) {
BKE_curveprofile_blend_write(writer, cu->bevel_profile);
}
}
@@ -218,13 +221,13 @@ static void curve_blend_read_data(BlendDataReader *reader, ID *id)
BLO_read_data_address(reader, &cu->strinfo);
BLO_read_data_address(reader, &cu->tb);
- if (cu->vfont == NULL) {
+ if (cu->vfont == nullptr) {
BLO_read_list(reader, &(cu->nurb));
}
else {
- cu->nurb.first = cu->nurb.last = NULL;
+ cu->nurb.first = cu->nurb.last = nullptr;
- TextBox *tb = MEM_calloc_arrayN(MAXTEXTBOX, sizeof(TextBox), "TextBoxread");
+ TextBox *tb = (TextBox *)MEM_calloc_arrayN(MAXTEXTBOX, sizeof(TextBox), "TextBoxread");
if (cu->tb) {
memcpy(tb, cu->tb, cu->totbox * sizeof(TextBox));
MEM_freeN(cu->tb);
@@ -241,16 +244,16 @@ static void curve_blend_read_data(BlendDataReader *reader, ID *id)
}
}
- cu->editnurb = NULL;
- cu->editfont = NULL;
- cu->batch_cache = NULL;
+ cu->editnurb = nullptr;
+ cu->editfont = nullptr;
+ cu->batch_cache = nullptr;
LISTBASE_FOREACH (Nurb *, nu, &cu->nurb) {
BLO_read_data_address(reader, &nu->bezt);
BLO_read_data_address(reader, &nu->bp);
BLO_read_data_address(reader, &nu->knotsu);
BLO_read_data_address(reader, &nu->knotsv);
- if (cu->vfont == NULL) {
+ if (cu->vfont == nullptr) {
nu->charidx = 0;
}
@@ -261,7 +264,7 @@ static void curve_blend_read_data(BlendDataReader *reader, ID *id)
cu->texflag &= ~CU_AUTOSPACE_EVALUATED;
BLO_read_data_address(reader, &cu->bevel_profile);
- if (cu->bevel_profile != NULL) {
+ if (cu->bevel_profile != nullptr) {
BKE_curveprofile_blend_read(reader, cu->bevel_profile);
}
}
@@ -304,33 +307,33 @@ static void curve_blend_read_expand(BlendExpander *expander, ID *id)
}
IDTypeInfo IDType_ID_CU = {
- .id_code = ID_CU,
- .id_filter = FILTER_ID_CU,
- .main_listbase_index = INDEX_ID_CU,
- .struct_size = sizeof(Curve),
- .name = "Curve",
- .name_plural = "curves",
- .translation_context = BLT_I18NCONTEXT_ID_CURVE,
- .flags = IDTYPE_FLAGS_APPEND_IS_REUSABLE,
- .asset_type_info = NULL,
-
- .init_data = curve_init_data,
- .copy_data = curve_copy_data,
- .free_data = curve_free_data,
- .make_local = NULL,
- .foreach_id = curve_foreach_id,
- .foreach_cache = NULL,
- .foreach_path = NULL,
- .owner_get = NULL,
-
- .blend_write = curve_blend_write,
- .blend_read_data = curve_blend_read_data,
- .blend_read_lib = curve_blend_read_lib,
- .blend_read_expand = curve_blend_read_expand,
-
- .blend_read_undo_preserve = NULL,
-
- .lib_override_apply_post = NULL,
+ /* id_code */ ID_CU,
+ /* id_filter */ FILTER_ID_CU,
+ /* main_listbase_index */ INDEX_ID_CU,
+ /* struct_size */ sizeof(Curve),
+ /* name */ "Curve",
+ /* name_plural */ "curves",
+ /* translation_context */ BLT_I18NCONTEXT_ID_CURVE,
+ /* flags */ IDTYPE_FLAGS_APPEND_IS_REUSABLE,
+ /* asset_type_info */ nullptr,
+
+ /* init_data */ curve_init_data,
+ /* copy_data */ curve_copy_data,
+ /* free_data */ curve_free_data,
+ /* make_local */ nullptr,
+ /* foreach_id */ curve_foreach_id,
+ /* foreach_cache */ nullptr,
+ /* foreach_path */ nullptr,
+ /* owner_get */ nullptr,
+
+ /* blend_write */ curve_blend_write,
+ /* blend_read_data */ curve_blend_read_data,
+ /* blend_read_lib */ curve_blend_read_lib,
+ /* blend_read_expand */ curve_blend_read_expand,
+
+ /* blend_read_undo_preserve */ nullptr,
+
+ /* lib_override_apply_post */ nullptr,
};
void BKE_curve_editfont_free(Curve *cu)
@@ -349,21 +352,21 @@ void BKE_curve_editfont_free(Curve *cu)
}
MEM_freeN(ef);
- cu->editfont = NULL;
+ cu->editfont = nullptr;
}
}
static void curve_editNurb_keyIndex_cv_free_cb(void *val)
{
- CVKeyIndex *index = val;
+ CVKeyIndex *index = (CVKeyIndex *)val;
MEM_freeN(index->orig_cv);
MEM_freeN(val);
}
void BKE_curve_editNurb_keyIndex_delCV(GHash *keyindex, const void *cv)
{
- BLI_assert(keyindex != NULL);
- BLI_ghash_remove(keyindex, cv, NULL, curve_editNurb_keyIndex_cv_free_cb);
+ BLI_assert(keyindex != nullptr);
+ BLI_ghash_remove(keyindex, cv, nullptr, curve_editNurb_keyIndex_cv_free_cb);
}
void BKE_curve_editNurb_keyIndex_free(GHash **keyindex)
@@ -371,8 +374,8 @@ void BKE_curve_editNurb_keyIndex_free(GHash **keyindex)
if (!(*keyindex)) {
return;
}
- BLI_ghash_free(*keyindex, NULL, curve_editNurb_keyIndex_cv_free_cb);
- *keyindex = NULL;
+ BLI_ghash_free(*keyindex, nullptr, curve_editNurb_keyIndex_cv_free_cb);
+ *keyindex = nullptr;
}
void BKE_curve_editNurb_free(Curve *cu)
@@ -381,7 +384,7 @@ void BKE_curve_editNurb_free(Curve *cu)
BKE_nurbList_free(&cu->editnurb->nurbs);
BKE_curve_editNurb_keyIndex_free(&cu->editnurb->keyindex);
MEM_freeN(cu->editnurb);
- cu->editnurb = NULL;
+ cu->editnurb = nullptr;
}
}
@@ -395,12 +398,12 @@ void BKE_curve_init(Curve *cu, const short curve_type)
cu->flag |= CU_FRONT | CU_BACK;
cu->vfont = cu->vfontb = cu->vfonti = cu->vfontbi = BKE_vfont_builtin_get();
cu->vfont->id.us += 4;
- cu->str = MEM_malloc_arrayN(12, sizeof(unsigned char), "str");
+ cu->str = (char *)MEM_malloc_arrayN(12, sizeof(unsigned char), "str");
BLI_strncpy(cu->str, "Text", 12);
cu->len = cu->len_char32 = cu->pos = 4;
- cu->strinfo = MEM_calloc_arrayN(12, sizeof(CharInfo), "strinfo new");
+ cu->strinfo = (CharInfo *)MEM_calloc_arrayN(12, sizeof(CharInfo), "strinfo new");
cu->totbox = cu->actbox = 1;
- cu->tb = MEM_calloc_arrayN(MAXTEXTBOX, sizeof(TextBox), "textbox");
+ cu->tb = (TextBox *)MEM_calloc_arrayN(MAXTEXTBOX, sizeof(TextBox), "textbox");
cu->tb[0].w = cu->tb[0].h = 0.0;
}
else if (cu->type == OB_SURF) {
@@ -408,7 +411,7 @@ void BKE_curve_init(Curve *cu, const short curve_type)
cu->resolu = 4;
cu->resolv = 4;
}
- cu->bevel_profile = NULL;
+ cu->bevel_profile = nullptr;
}
Curve *BKE_curve_add(Main *bmain, const char *name, int type)
@@ -416,7 +419,7 @@ Curve *BKE_curve_add(Main *bmain, const char *name, int type)
Curve *cu;
/* We cannot use #BKE_id_new here as we need some custom initialization code. */
- cu = BKE_libblock_alloc(bmain, ID_CU, name, 0);
+ cu = (Curve *)BKE_libblock_alloc(bmain, ID_CU, name, 0);
BKE_curve_init(cu, type);
@@ -429,7 +432,7 @@ ListBase *BKE_curve_editNurbs_get(Curve *cu)
return &cu->editnurb->nurbs;
}
- return NULL;
+ return nullptr;
}
const ListBase *BKE_curve_editNurbs_get_for_read(const Curve *cu)
@@ -438,7 +441,7 @@ const ListBase *BKE_curve_editNurbs_get_for_read(const Curve *cu)
return &cu->editnurb->nurbs;
}
- return NULL;
+ return nullptr;
}
short BKE_curve_type_get(const Curve *cu)
@@ -481,10 +484,10 @@ void BKE_curve_dimension_update(Curve *cu)
void BKE_curve_type_test(Object *ob)
{
- ob->type = BKE_curve_type_get(ob->data);
+ ob->type = BKE_curve_type_get((Curve *)ob->data);
if (ob->type == OB_CURVE) {
- Curve *cu = ob->data;
+ Curve *cu = (Curve *)ob->data;
if (CU_IS_2D(cu)) {
BKE_curve_dimension_update(cu);
}
@@ -495,15 +498,15 @@ BoundBox *BKE_curve_boundbox_get(Object *ob)
{
/* This is Object-level data access,
* DO NOT touch to Mesh's bb, would be totally thread-unsafe. */
- if (ob->runtime.bb == NULL || ob->runtime.bb->flag & BOUNDBOX_DIRTY) {
- Curve *cu = ob->data;
+ if (ob->runtime.bb == nullptr || ob->runtime.bb->flag & BOUNDBOX_DIRTY) {
+ Curve *cu = (Curve *)ob->data;
float min[3], max[3];
INIT_MINMAX(min, max);
BKE_curve_minmax(cu, true, min, max);
- if (ob->runtime.bb == NULL) {
- ob->runtime.bb = MEM_mallocN(sizeof(*ob->runtime.bb), __func__);
+ if (ob->runtime.bb == nullptr) {
+ ob->runtime.bb = (BoundBox *)MEM_mallocN(sizeof(*ob->runtime.bb), __func__);
}
BKE_boundbox_init_from_minmax(ob->runtime.bb, min, max);
ob->runtime.bb->flag &= ~BOUNDBOX_DIRTY;
@@ -618,26 +621,26 @@ int BKE_nurbList_verts_count_without_handles(const ListBase *nurb)
void BKE_nurb_free(Nurb *nu)
{
- if (nu == NULL) {
+ if (nu == nullptr) {
return;
}
if (nu->bezt) {
MEM_freeN(nu->bezt);
}
- nu->bezt = NULL;
+ nu->bezt = nullptr;
if (nu->bp) {
MEM_freeN(nu->bp);
}
- nu->bp = NULL;
+ nu->bp = nullptr;
if (nu->knotsu) {
MEM_freeN(nu->knotsu);
}
- nu->knotsu = NULL;
+ nu->knotsu = nullptr;
if (nu->knotsv) {
MEM_freeN(nu->knotsv);
}
- nu->knotsv = NULL;
+ nu->knotsv = nullptr;
// if (nu->trim.first) freeNurblist(&(nu->trim));
MEM_freeN(nu);
@@ -645,7 +648,7 @@ void BKE_nurb_free(Nurb *nu)
void BKE_nurbList_free(ListBase *lb)
{
- if (lb == NULL) {
+ if (lb == nullptr) {
return;
}
@@ -661,8 +664,8 @@ Nurb *BKE_nurb_duplicate(const Nurb *nu)
int len;
newnu = (Nurb *)MEM_mallocN(sizeof(Nurb), "duplicateNurb");
- if (newnu == NULL) {
- return NULL;
+ if (newnu == nullptr) {
+ return nullptr;
}
memcpy(newnu, nu, sizeof(Nurb));
@@ -675,19 +678,19 @@ Nurb *BKE_nurb_duplicate(const Nurb *nu)
newnu->bp = (BPoint *)MEM_malloc_arrayN(len, sizeof(BPoint), "duplicateNurb3");
memcpy(newnu->bp, nu->bp, len * sizeof(BPoint));
- newnu->knotsu = newnu->knotsv = NULL;
+ newnu->knotsu = newnu->knotsv = nullptr;
if (nu->knotsu) {
len = KNOTSU(nu);
if (len) {
- newnu->knotsu = MEM_malloc_arrayN(len, sizeof(float), "duplicateNurb4");
+ newnu->knotsu = (float *)MEM_malloc_arrayN(len, sizeof(float), "duplicateNurb4");
memcpy(newnu->knotsu, nu->knotsu, sizeof(float) * len);
}
}
if (nu->pntsv > 1 && nu->knotsv) {
len = KNOTSV(nu);
if (len) {
- newnu->knotsv = MEM_malloc_arrayN(len, sizeof(float), "duplicateNurb5");
+ newnu->knotsv = (float *)MEM_malloc_arrayN(len, sizeof(float), "duplicateNurb5");
memcpy(newnu->knotsv, nu->knotsv, sizeof(float) * len);
}
}
@@ -707,8 +710,8 @@ Nurb *BKE_nurb_copy(Nurb *src, int pntsu, int pntsv)
newnu->pntsv = pntsv;
/* caller can manually handle these arrays */
- newnu->knotsu = NULL;
- newnu->knotsv = NULL;
+ newnu->knotsu = nullptr;
+ newnu->knotsv = nullptr;
if (src->bezt) {
newnu->bezt = (BezTriple *)MEM_malloc_arrayN(pntsu * pntsv, sizeof(BezTriple), "copyNurb2");
@@ -837,7 +840,7 @@ float BKE_nurb_calc_length(const Nurb *nu, int resolution)
}
}
else if (nu->type == CU_BEZIER) {
- points = MEM_mallocN(sizeof(float[3]) * (resolu + 1), "getLength_bezier");
+ points = (float *)MEM_mallocN(sizeof(float[3]) * (resolu + 1), "getLength_bezier");
a = nu->pntsu - 1;
bezt = nu->bezt;
if (nu->flagu & CU_NURB_CYCLIC) {
@@ -881,9 +884,9 @@ float BKE_nurb_calc_length(const Nurb *nu, int resolution)
else if (nu->type == CU_NURBS) {
if (nu->pntsv == 1) {
/* important to zero for BKE_nurb_makeCurve. */
- points = MEM_callocN(sizeof(float[3]) * pntsu * resolu, "getLength_nurbs");
+ points = (float *)MEM_callocN(sizeof(float[3]) * pntsu * resolu, "getLength_nurbs");
- BKE_nurb_makeCurve(nu, points, NULL, NULL, NULL, resolu, sizeof(float[3]));
+ BKE_nurb_makeCurve(nu, points, nullptr, nullptr, nullptr, resolu, sizeof(float[3]));
if (nu->flagu & CU_NURB_CYCLIC) {
b = pntsu * resolu + 1;
@@ -911,7 +914,7 @@ float BKE_nurb_calc_length(const Nurb *nu, int resolution)
void BKE_nurb_points_add(Nurb *nu, int number)
{
- nu->bp = MEM_recallocN(nu->bp, (nu->pntsu + number) * sizeof(BPoint));
+ nu->bp = (BPoint *)MEM_recallocN(nu->bp, (nu->pntsu + number) * sizeof(BPoint));
BPoint *bp;
int i;
@@ -927,7 +930,7 @@ void BKE_nurb_bezierPoints_add(Nurb *nu, int number)
BezTriple *bezt;
int i;
- nu->bezt = MEM_recallocN(nu->bezt, (nu->pntsu + number) * sizeof(BezTriple));
+ nu->bezt = (BezTriple *)MEM_recallocN(nu->bezt, (nu->pntsu + number) * sizeof(BezTriple));
for (i = 0, bezt = &nu->bezt[nu->pntsu]; i < number; i++, bezt++) {
bezt->radius = 1.0f;
@@ -978,7 +981,7 @@ BezTriple *BKE_nurb_bezt_get_next(Nurb *nu, BezTriple *bezt)
bezt_next = nu->bezt;
}
else {
- bezt_next = NULL;
+ bezt_next = nullptr;
}
}
else {
@@ -999,7 +1002,7 @@ BPoint *BKE_nurb_bpoint_get_next(Nurb *nu, BPoint *bp)
bp_next = nu->bp;
}
else {
- bp_next = NULL;
+ bp_next = nullptr;
}
}
else {
@@ -1021,7 +1024,7 @@ BezTriple *BKE_nurb_bezt_get_prev(Nurb *nu, BezTriple *bezt)
bezt_prev = &nu->bezt[nu->pntsu - 1];
}
else {
- bezt_prev = NULL;
+ bezt_prev = nullptr;
}
}
else {
@@ -1043,7 +1046,7 @@ BPoint *BKE_nurb_bpoint_get_prev(Nurb *nu, BPoint *bp)
bp_prev = &nu->bp[nu->pntsu - 1];
}
else {
- bp_prev = NULL;
+ bp_prev = nullptr;
}
}
else {
@@ -1160,81 +1163,34 @@ void BKE_nurb_bpoint_calc_plane(struct Nurb *nu, BPoint *bp, float r_plane[3])
static void calcknots(float *knots, const int pnts, const short order, const short flag)
{
- /* knots: number of pnts NOT corrected for cyclic */
- const int pnts_order = pnts + order;
- float k;
- int a;
-
- switch (flag & (CU_NURB_ENDPOINT | CU_NURB_BEZIER)) {
- case CU_NURB_ENDPOINT:
- k = 0.0;
- for (a = 1; a <= pnts_order; a++) {
- knots[a - 1] = k;
- if (a >= order && a <= pnts) {
- k += 1.0f;
- }
- }
- break;
- case CU_NURB_BEZIER:
- /* Warning, the order MUST be 2 or 4,
- * if this is not enforced, the displist will be corrupt */
- if (order == 4) {
- k = 0.34;
- for (a = 0; a < pnts_order; a++) {
- knots[a] = floorf(k);
- k += (1.0f / 3.0f);
- }
- }
- else if (order == 3) {
- k = 0.6f;
- for (a = 0; a < pnts_order; a++) {
- if (a >= order && a <= pnts) {
- k += 0.5f;
- }
- knots[a] = floorf(k);
- }
- }
- else {
- CLOG_ERROR(&LOG, "bez nurb curve order is not 3 or 4, should never happen");
- }
- break;
- default:
- for (a = 0; a < pnts_order; a++) {
- knots[a] = (float)a;
- }
- break;
- }
-}
-
-static void makecyclicknots(float *knots, int pnts, short order)
-/* pnts, order: number of pnts NOT corrected for cyclic */
-{
- int a, b, order2, c;
+ const bool is_cyclic = flag & CU_NURB_CYCLIC;
+ const bool is_bezier = flag & CU_NURB_BEZIER && !(flag & CU_NURB_ENDPOINT);
+ const bool is_end_point = flag & CU_NURB_ENDPOINT && !(flag & CU_NURB_BEZIER);
+ /* Inner knots are always repeated once except on Bezier case. */
+ const int repeat_inner = is_bezier ? order - 1 : 1;
+ /* How many times to repeat 0.0 at the beginning of knot. */
+ const int head = is_end_point && !is_cyclic ? order : (is_bezier ? order / 2 : 1);
+ /* Number of knots replicating widths of the starting knots.
+ * Covers both Cyclic and EndPoint cases. */
+ const int tail = is_cyclic ? 2 * order - 1 : (is_end_point ? order : 0);
- if (knots == NULL) {
- return;
- }
+ const int knot_count = pnts + order + (is_cyclic ? order - 1 : 0);
- order2 = order - 1;
+ int r = head;
+ float current = 0.0f;
- /* do first long rows (order -1), remove identical knots at endpoints */
- if (order > 2) {
- b = pnts + order2;
- for (a = 1; a < order2; a++) {
- if (knots[b] != knots[b - a]) {
- break;
- }
- }
- if (a == order2) {
- knots[pnts + order - 2] += 1.0f;
+ for (const int i : IndexRange(knot_count - tail)) {
+ knots[i] = current;
+ r--;
+ if (r == 0) {
+ current += 1.0;
+ r = repeat_inner;
}
}
- b = order;
- c = pnts + order + order2;
- for (a = pnts + order2; a < c; a++) {
- knots[a] = knots[a - 1] + (knots[b] - knots[b - 1]);
- b--;
+ const int tail_index = knot_count - tail;
+ for (const int i : IndexRange(tail)) {
+ knots[tail_index + i] = current + (knots[i] - knots[0]);
}
}
@@ -1246,17 +1202,11 @@ static void makeknots(Nurb *nu, short uv)
MEM_freeN(nu->knotsu);
}
if (BKE_nurb_check_valid_u(nu)) {
- nu->knotsu = MEM_calloc_arrayN(KNOTSU(nu) + 1, sizeof(float), "makeknots");
- if (nu->flagu & CU_NURB_CYCLIC) {
- calcknots(nu->knotsu, nu->pntsu, nu->orderu, 0); /* cyclic should be uniform */
- makecyclicknots(nu->knotsu, nu->pntsu, nu->orderu);
- }
- else {
- calcknots(nu->knotsu, nu->pntsu, nu->orderu, nu->flagu);
- }
+ nu->knotsu = (float *)MEM_calloc_arrayN(KNOTSU(nu) + 1, sizeof(float), "makeknots");
+ calcknots(nu->knotsu, nu->pntsu, nu->orderu, nu->flagu);
}
else {
- nu->knotsu = NULL;
+ nu->knotsu = nullptr;
}
}
else if (uv == 2) {
@@ -1264,17 +1214,11 @@ static void makeknots(Nurb *nu, short uv)
MEM_freeN(nu->knotsv);
}
if (BKE_nurb_check_valid_v(nu)) {
- nu->knotsv = MEM_calloc_arrayN(KNOTSV(nu) + 1, sizeof(float), "makeknots");
- if (nu->flagv & CU_NURB_CYCLIC) {
- calcknots(nu->knotsv, nu->pntsv, nu->orderv, 0); /* cyclic should be uniform */
- makecyclicknots(nu->knotsv, nu->pntsv, nu->orderv);
- }
- else {
- calcknots(nu->knotsv, nu->pntsv, nu->orderv, nu->flagv);
- }
+ nu->knotsv = (float *)MEM_calloc_arrayN(KNOTSV(nu) + 1, sizeof(float), "makeknots");
+ calcknots(nu->knotsv, nu->pntsv, nu->orderv, nu->flagv);
}
else {
- nu->knotsv = NULL;
+ nu->knotsv = nullptr;
}
}
}
@@ -1378,7 +1322,7 @@ void BKE_nurb_makeFaces(const Nurb *nu, float *coord_array, int rowstride, int r
int totu = nu->pntsu * resolu, totv = nu->pntsv * resolv;
- if (nu->knotsu == NULL || nu->knotsv == NULL) {
+ if (nu->knotsu == nullptr || nu->knotsv == nullptr) {
return;
}
if (nu->orderu > nu->pntsu) {
@@ -1387,7 +1331,7 @@ void BKE_nurb_makeFaces(const Nurb *nu, float *coord_array, int rowstride, int r
if (nu->orderv > nu->pntsv) {
return;
}
- if (coord_array == NULL) {
+ if (coord_array == nullptr) {
return;
}
@@ -1438,7 +1382,7 @@ void BKE_nurb_makeFaces(const Nurb *nu, float *coord_array, int rowstride, int r
jstart = (int *)MEM_malloc_arrayN(totv, sizeof(float), "makeNurbfaces4");
jend = (int *)MEM_malloc_arrayN(totv, sizeof(float), "makeNurbfaces5");
- /* precalculation of basisv and jstart, jend */
+ /* Pre-calculation of `basisv` and `jstart`, `jend`. */
if (nu->flagv & CU_NURB_CYCLIC) {
cycl = nu->orderv - 1;
}
@@ -1576,13 +1520,13 @@ void BKE_nurb_makeCurve(const Nurb *nu,
*weight_fp = weight_array;
int i, len, istart, iend, cycl;
- if (nu->knotsu == NULL) {
+ if (nu->knotsu == nullptr) {
return;
}
if (nu->orderu > nu->pntsu) {
return;
}
- if (coord_array == NULL) {
+ if (coord_array == nullptr) {
return;
}
@@ -1676,16 +1620,16 @@ void BKE_nurb_makeCurve(const Nurb *nu,
}
}
- coord_fp = POINTER_OFFSET(coord_fp, stride);
+ coord_fp = (float *)POINTER_OFFSET(coord_fp, stride);
if (tilt_fp) {
- tilt_fp = POINTER_OFFSET(tilt_fp, stride);
+ tilt_fp = (float *)POINTER_OFFSET(tilt_fp, stride);
}
if (radius_fp) {
- radius_fp = POINTER_OFFSET(radius_fp, stride);
+ radius_fp = (float *)POINTER_OFFSET(radius_fp, stride);
}
if (weight_fp) {
- weight_fp = POINTER_OFFSET(weight_fp, stride);
+ weight_fp = (float *)POINTER_OFFSET(weight_fp, stride);
}
u += ustep;
@@ -1734,7 +1678,7 @@ void BKE_curve_calc_coords_axis(const BezTriple *bezt_array,
r_points_offset,
(int)resolu,
stride);
- r_points_offset = POINTER_OFFSET(r_points_offset, resolu_stride);
+ r_points_offset = (float *)POINTER_OFFSET(r_points_offset, resolu_stride);
}
if (is_cyclic) {
@@ -1747,19 +1691,19 @@ void BKE_curve_calc_coords_axis(const BezTriple *bezt_array,
r_points_offset,
(int)resolu,
stride);
- r_points_offset = POINTER_OFFSET(r_points_offset, resolu_stride);
+ r_points_offset = (float *)POINTER_OFFSET(r_points_offset, resolu_stride);
if (use_cyclic_duplicate_endpoint) {
*r_points_offset = *r_points;
- r_points_offset = POINTER_OFFSET(r_points_offset, stride);
+ r_points_offset = (float *)POINTER_OFFSET(r_points_offset, stride);
}
}
else {
- float *r_points_last = POINTER_OFFSET(r_points, bezt_array_last * resolu_stride);
+ float *r_points_last = (float *)POINTER_OFFSET(r_points, bezt_array_last * resolu_stride);
*r_points_last = bezt_array[bezt_array_last].vec[1][axis];
- r_points_offset = POINTER_OFFSET(r_points_offset, stride);
+ r_points_offset = (float *)POINTER_OFFSET(r_points_offset, stride);
}
- BLI_assert(POINTER_OFFSET(r_points, points_len * stride) == r_points_offset);
+ BLI_assert((float *)POINTER_OFFSET(r_points, points_len * stride) == r_points_offset);
UNUSED_VARS_NDEBUG(points_len);
}
@@ -1784,7 +1728,7 @@ void BKE_curve_forward_diff_bezier(
for (a = 0; a <= it; a++) {
*p = q0;
- p = POINTER_OFFSET(p, stride);
+ p = (float *)POINTER_OFFSET(p, stride);
q0 += q1;
q1 += q2;
q2 += q3;
@@ -1809,7 +1753,7 @@ void BKE_curve_forward_diff_tangent_bezier(
for (a = 0; a <= it; a++) {
*p = q0;
- p = POINTER_OFFSET(p, stride);
+ p = (float *)POINTER_OFFSET(p, stride);
q0 += q1;
q1 += q2;
}
@@ -1835,7 +1779,7 @@ static void forward_diff_bezier_cotangent(const float p0[3],
(-18.0f * t + 6.0f) * p2[i] + (6.0f * t) * p3[i];
}
normalize_v3(p);
- p = POINTER_OFFSET(p, stride);
+ p = (float *)POINTER_OFFSET(p, stride);
}
}
@@ -1948,7 +1892,7 @@ struct BevelSort {
static int vergxcobev(const void *a1, const void *a2)
{
- const struct BevelSort *x1 = a1, *x2 = a2;
+ const struct BevelSort *x1 = (BevelSort *)a1, *x2 = (BevelSort *)a2;
if (x1->left > x2->left) {
return 1;
@@ -2022,7 +1966,7 @@ static void tilt_bezpart(const BezTriple *prevbezt,
float fac, dfac, t[4];
int a;
- if (tilt_array == NULL && radius_array == NULL) {
+ if (tilt_array == nullptr && radius_array == nullptr) {
return;
}
@@ -2070,7 +2014,7 @@ static void tilt_bezpart(const BezTriple *prevbezt,
t[3] * next->tilt;
}
- tilt_array = POINTER_OFFSET(tilt_array, stride);
+ tilt_array = (float *)POINTER_OFFSET(tilt_array, stride);
}
if (radius_array) {
@@ -2084,14 +2028,14 @@ static void tilt_bezpart(const BezTriple *prevbezt,
else {
/* reuse interpolation from tilt if we can */
- if (tilt_array == NULL || nu->tilt_interp != nu->radius_interp) {
+ if (tilt_array == nullptr || nu->tilt_interp != nu->radius_interp) {
key_curve_position_weights(fac, t, nu->radius_interp);
}
*radius_array = t[0] * pprev->radius + t[1] * prevbezt->radius + t[2] * bezt->radius +
t[3] * next->radius;
}
- radius_array = POINTER_OFFSET(radius_array, stride);
+ radius_array = (float *)POINTER_OFFSET(radius_array, stride);
}
if (weight_array) {
@@ -2099,15 +2043,15 @@ static void tilt_bezpart(const BezTriple *prevbezt,
*weight_array = prevbezt->weight + (bezt->weight - prevbezt->weight) *
(3.0f * fac * fac - 2.0f * fac * fac * fac);
- weight_array = POINTER_OFFSET(weight_array, stride);
+ weight_array = (float *)POINTER_OFFSET(weight_array, stride);
}
}
}
-/* make_bevel_list_3D_* funcs, at a minimum these must
- * fill in the bezp->quat and bezp->dir values */
+/* `make_bevel_list_3D_*` functions, at a minimum these must
+ * fill in the #BevPoint.quat and #BevPoint.dir values. */
-/* utility for make_bevel_list_3D_* funcs */
+/** Utility for `make_bevel_list_3D_*` functions. */
static void bevel_list_calc_bisect(BevList *bl)
{
BevPoint *bevp2, *bevp1, *bevp0;
@@ -2329,14 +2273,14 @@ static void make_bevel_list_3D_minimum_twist(BevList *bl)
/* Need to correct for the start/end points not matching
* do this by calculating the tilt angle difference, then apply
- * the rotation gradually over the entire curve
+ * the rotation gradually over the entire curve.
*
- * note that the split is between last and second last, rather than first/last as youd expect.
+ * Note that the split is between last and second last, rather than first/last as you'd expect.
*
* real order is like this
* 0,1,2,3,4 --> 1,2,3,4,0
*
- * this is why we compare last with second last
+ * This is why we compare last with second last.
*/
float vec_1[3] = {0, 1, 0}, vec_2[3] = {0, 1, 0}, angle, ang_fac, cross_tmp[3];
@@ -2489,12 +2433,15 @@ static void make_bevel_list_segment_3D(BevList *bl)
normalize_v3(bevp1->dir);
vec_to_quat(bevp1->quat, bevp1->dir, 5, 1);
-
axis_angle_to_quat(q, bevp1->dir, bevp1->tilt);
mul_qt_qtqt(bevp1->quat, q, bevp1->quat);
normalize_qt(bevp1->quat);
+
copy_v3_v3(bevp2->dir, bevp1->dir);
- copy_qt_qt(bevp2->quat, bevp1->quat);
+ vec_to_quat(bevp2->quat, bevp2->dir, 5, 1);
+ axis_angle_to_quat(q, bevp2->dir, bevp2->tilt);
+ mul_qt_qtqt(bevp2->quat, q, bevp2->quat);
+ normalize_qt(bevp2->quat);
}
/* only for 2 points */
@@ -2597,13 +2544,13 @@ static void bevlist_firstlast_direction_calc_from_bpoint(const Nurb *nu, BevList
void BKE_curve_bevelList_free(ListBase *bev)
{
LISTBASE_FOREACH_MUTABLE (BevList *, bl, bev) {
- if (bl->seglen != NULL) {
+ if (bl->seglen != nullptr) {
MEM_freeN(bl->seglen);
}
- if (bl->segbevcount != NULL) {
+ if (bl->segbevcount != nullptr) {
MEM_freeN(bl->segbevcount);
}
- if (bl->bevpoints != NULL) {
+ if (bl->bevpoints != nullptr) {
MEM_freeN(bl->bevpoints);
}
MEM_freeN(bl);
@@ -2614,22 +2561,21 @@ void BKE_curve_bevelList_free(ListBase *bev)
void BKE_curve_bevelList_make(Object *ob, const ListBase *nurbs, const bool for_render)
{
- /*
- * - convert all curves to polys, with indication of resol and flags for double-vertices
- * - possibly; do a smart vertice removal (in case Nurb)
- * - separate in individual blocks with BoundBox
- * - AutoHole detection
+ /* - Convert all curves to polys, with indication of resolution and flags for double-vertices.
+ * - Possibly; do a smart vertex removal (in case #Nurb).
+ * - Separate in individual blocks with #BoundBox.
+ * - Auto-hole detection.
*/
- /* this function needs an object, because of tflag and upflag */
- Curve *cu = ob->data;
+ /* This function needs an object, because of `tflag` and `upflag`. */
+ Curve *cu = (Curve *)ob->data;
BezTriple *bezt, *prevbezt;
BPoint *bp;
BevList *blnew;
- BevPoint *bevp2, *bevp1 = NULL, *bevp0;
+ BevPoint *bevp2, *bevp1 = nullptr, *bevp0;
const float threshold = 0.00001f;
float min, inp;
- float *seglen = NULL;
+ float *seglen = nullptr;
struct BevelSort *sortdata, *sd, *sd1;
int a, b, nr, poly, resolu = 0, len = 0, segcount;
int *segbevcount;
@@ -2637,7 +2583,7 @@ void BKE_curve_bevelList_make(Object *ob, const ListBase *nurbs, const bool for_
bool is_editmode = false;
ListBase *bev;
- /* segbevcount alsp requires seglen. */
+ /* segbevcount also requires seglen. */
const bool need_seglen = ELEM(
cu->bevfac1_mapping, CU_BEVFAC_MAP_SEGMENT, CU_BEVFAC_MAP_SPLINE) ||
ELEM(cu->bevfac2_mapping, CU_BEVFAC_MAP_SEGMENT, CU_BEVFAC_MAP_SPLINE);
@@ -2654,7 +2600,7 @@ void BKE_curve_bevelList_make(Object *ob, const ListBase *nurbs, const bool for_
BKE_curve_bevelList_free(&ob->runtime.curve_cache->bev);
if (cu->editnurb && ob->type != OB_FONT) {
- is_editmode = 1;
+ is_editmode = true;
}
LISTBASE_FOREACH (const Nurb *, nu, nurbs) {
@@ -2665,8 +2611,8 @@ void BKE_curve_bevelList_make(Object *ob, const ListBase *nurbs, const bool for_
/* check we are a single point? also check we are not a surface and that the orderu is sane,
* enforced in the UI but can go wrong possibly */
if (!BKE_nurb_check_valid_u(nu)) {
- BevList *bl = MEM_callocN(sizeof(BevList), "makeBevelList1");
- bl->bevpoints = MEM_calloc_arrayN(1, sizeof(BevPoint), "makeBevelPoints1");
+ BevList *bl = (BevList *)MEM_callocN(sizeof(BevList), "makeBevelList1");
+ bl->bevpoints = (BevPoint *)MEM_calloc_arrayN(1, sizeof(BevPoint), "makeBevelPoints1");
BLI_addtail(bev, bl);
bl->nr = 0;
bl->charidx = nu->charidx;
@@ -2695,11 +2641,11 @@ void BKE_curve_bevelList_make(Object *ob, const ListBase *nurbs, const bool for_
if (nu->type == CU_POLY) {
len = nu->pntsu;
- BevList *bl = MEM_callocN(sizeof(BevList), "makeBevelList2");
- bl->bevpoints = MEM_calloc_arrayN(len, sizeof(BevPoint), "makeBevelPoints2");
+ BevList *bl = MEM_cnew<BevList>(__func__);
+ bl->bevpoints = (BevPoint *)MEM_calloc_arrayN(len, sizeof(BevPoint), __func__);
if (need_seglen && (nu->flagu & CU_NURB_CYCLIC) == 0) {
- bl->seglen = MEM_malloc_arrayN(segcount, sizeof(float), "makeBevelList2_seglen");
- bl->segbevcount = MEM_malloc_arrayN(segcount, sizeof(int), "makeBevelList2_segbevcount");
+ bl->seglen = (float *)MEM_malloc_arrayN(segcount, sizeof(float), __func__);
+ bl->segbevcount = (int *)MEM_malloc_arrayN(segcount, sizeof(int), __func__);
}
BLI_addtail(bev, bl);
@@ -2719,7 +2665,7 @@ void BKE_curve_bevelList_make(Object *ob, const ListBase *nurbs, const bool for_
bevp->radius = bp->radius;
bevp->weight = bp->weight;
bp++;
- if (seglen != NULL && len != 0) {
+ if (seglen != nullptr && len != 0) {
*seglen = len_v3v3(bevp->vec, bp->vec);
bevp++;
bevp->offset = *seglen;
@@ -2745,11 +2691,11 @@ void BKE_curve_bevelList_make(Object *ob, const ListBase *nurbs, const bool for_
/* in case last point is not cyclic */
len = segcount * resolu + 1;
- BevList *bl = MEM_callocN(sizeof(BevList), "makeBevelBPoints");
- bl->bevpoints = MEM_calloc_arrayN(len, sizeof(BevPoint), "makeBevelBPointsPoints");
+ BevList *bl = MEM_cnew<BevList>(__func__);
+ bl->bevpoints = (BevPoint *)MEM_calloc_arrayN(len, sizeof(BevPoint), __func__);
if (need_seglen && (nu->flagu & CU_NURB_CYCLIC) == 0) {
- bl->seglen = MEM_malloc_arrayN(segcount, sizeof(float), "makeBevelBPoints_seglen");
- bl->segbevcount = MEM_malloc_arrayN(segcount, sizeof(int), "makeBevelBPoints_segbevcount");
+ bl->seglen = (float *)MEM_malloc_arrayN(segcount, sizeof(float), __func__);
+ bl->segbevcount = (int *)MEM_malloc_arrayN(segcount, sizeof(int), __func__);
}
BLI_addtail(bev, bl);
@@ -2761,7 +2707,7 @@ void BKE_curve_bevelList_make(Object *ob, const ListBase *nurbs, const bool for_
segbevcount = bl->segbevcount;
bevp->offset = 0;
- if (seglen != NULL) {
+ if (seglen != nullptr) {
*seglen = 0;
*segbevcount = 0;
}
@@ -2793,7 +2739,7 @@ void BKE_curve_bevelList_make(Object *ob, const ListBase *nurbs, const bool for_
bevp++;
bl->nr++;
bl->dupe_nr = 1;
- if (seglen != NULL) {
+ if (seglen != nullptr) {
*seglen = len_v3v3(prevbezt->vec[1], bezt->vec[1]);
bevp->offset = *seglen;
seglen++;
@@ -2805,10 +2751,10 @@ void BKE_curve_bevelList_make(Object *ob, const ListBase *nurbs, const bool for_
}
}
else {
- /* always do all three, to prevent data hanging around */
+ /* Always do all three, to prevent data hanging around. */
int j;
- /* BevPoint must stay aligned to 4 so sizeof(BevPoint)/sizeof(float) works */
+ /* #BevPoint must stay aligned to 4 so `sizeof(BevPoint) / sizeof(float)` works. */
for (j = 0; j < 3; j++) {
BKE_curve_forward_diff_bezier(prevbezt->vec[1][j],
prevbezt->vec[2][j],
@@ -2819,13 +2765,13 @@ void BKE_curve_bevelList_make(Object *ob, const ListBase *nurbs, const bool for_
sizeof(BevPoint));
}
- /* if both arrays are NULL do nothiong */
+ /* If both arrays are `nullptr` do nothing. */
tilt_bezpart(prevbezt,
bezt,
nu,
- do_tilt ? &bevp->tilt : NULL,
- do_radius ? &bevp->radius : NULL,
- do_weight ? &bevp->weight : NULL,
+ do_tilt ? &bevp->tilt : nullptr,
+ do_radius ? &bevp->radius : nullptr,
+ do_weight ? &bevp->weight : nullptr,
resolu,
sizeof(BevPoint));
@@ -2839,15 +2785,15 @@ void BKE_curve_bevelList_make(Object *ob, const ListBase *nurbs, const bool for_
sizeof(BevPoint));
}
- /* seglen */
- if (seglen != NULL) {
+ /* `seglen`. */
+ if (seglen != nullptr) {
*seglen = 0;
*segbevcount = 0;
for (j = 0; j < resolu; j++) {
bevp0 = bevp;
bevp++;
bevp->offset = len_v3v3(bevp0->vec, bevp->vec);
- /* match seglen and segbevcount to the cleaned up bevel lists (see STEP 2) */
+ /* Match `seglen` and `segbevcount` to the cleaned up bevel lists (see STEP 2). */
if (bevp->offset > threshold) {
*seglen += bevp->offset;
*segbevcount += 1;
@@ -2881,11 +2827,11 @@ void BKE_curve_bevelList_make(Object *ob, const ListBase *nurbs, const bool for_
if (nu->pntsv == 1) {
len = (resolu * segcount);
- BevList *bl = MEM_callocN(sizeof(BevList), "makeBevelList3");
- bl->bevpoints = MEM_calloc_arrayN(len, sizeof(BevPoint), "makeBevelPoints3");
+ BevList *bl = MEM_cnew<BevList>(__func__);
+ bl->bevpoints = (BevPoint *)MEM_calloc_arrayN(len, sizeof(BevPoint), __func__);
if (need_seglen && (nu->flagu & CU_NURB_CYCLIC) == 0) {
- bl->seglen = MEM_malloc_arrayN(segcount, sizeof(float), "makeBevelList3_seglen");
- bl->segbevcount = MEM_malloc_arrayN(segcount, sizeof(int), "makeBevelList3_segbevcount");
+ bl->seglen = (float *)MEM_malloc_arrayN(segcount, sizeof(float), __func__);
+ bl->segbevcount = (int *)MEM_malloc_arrayN(segcount, sizeof(int), __func__);
}
BLI_addtail(bev, bl);
bl->nr = len;
@@ -2899,14 +2845,14 @@ void BKE_curve_bevelList_make(Object *ob, const ListBase *nurbs, const bool for_
BKE_nurb_makeCurve(nu,
&bevp->vec[0],
- do_tilt ? &bevp->tilt : NULL,
- do_radius ? &bevp->radius : NULL,
- do_weight ? &bevp->weight : NULL,
+ do_tilt ? &bevp->tilt : nullptr,
+ do_radius ? &bevp->radius : nullptr,
+ do_weight ? &bevp->weight : nullptr,
resolu,
sizeof(BevPoint));
/* match seglen and segbevcount to the cleaned up bevel lists (see STEP 2) */
- if (seglen != NULL) {
+ if (seglen != nullptr) {
nr = segcount;
bevp0 = bevp;
bevp++;
@@ -2958,7 +2904,7 @@ void BKE_curve_bevelList_make(Object *ob, const ListBase *nurbs, const bool for_
}
nr--;
while (nr--) {
- if (seglen != NULL) {
+ if (seglen != nullptr) {
if (fabsf(bevp1->offset) < threshold) {
bevp0->dupe_tag = true;
bl->dupe_nr++;
@@ -2980,10 +2926,10 @@ void BKE_curve_bevelList_make(Object *ob, const ListBase *nurbs, const bool for_
continue;
}
- nr = bl->nr - bl->dupe_nr + 1; /* +1 because vectorbezier sets flag too */
- blnew = MEM_mallocN(sizeof(BevList), "makeBevelList4");
+ nr = bl->nr - bl->dupe_nr + 1; /* +1 because vector-bezier sets flag too. */
+ blnew = (BevList *)MEM_mallocN(sizeof(BevList), "makeBevelList4");
memcpy(blnew, bl, sizeof(BevList));
- blnew->bevpoints = MEM_calloc_arrayN(nr, sizeof(BevPoint), "makeBevelPoints4");
+ blnew->bevpoints = (BevPoint *)MEM_calloc_arrayN(nr, sizeof(BevPoint), "makeBevelPoints4");
if (!blnew->bevpoints) {
MEM_freeN(blnew);
break;
@@ -2992,7 +2938,7 @@ void BKE_curve_bevelList_make(Object *ob, const ListBase *nurbs, const bool for_
blnew->seglen = bl->seglen;
blnew->nr = 0;
BLI_remlink(bev, bl);
- BLI_insertlinkbefore(bev, bl->next, blnew); /* to make sure bevlist is tuned with nurblist */
+ BLI_insertlinkbefore(bev, bl->next, blnew); /* Ensure `bevlist` is tuned with `nurblist`. */
bevp0 = bl->bevpoints;
bevp1 = blnew->bevpoints;
nr = bl->nr;
@@ -3004,7 +2950,7 @@ void BKE_curve_bevelList_make(Object *ob, const ListBase *nurbs, const bool for_
}
bevp0++;
}
- if (bl->bevpoints != NULL) {
+ if (bl->bevpoints != nullptr) {
MEM_freeN(bl->bevpoints);
}
MEM_freeN(bl);
@@ -3023,7 +2969,7 @@ void BKE_curve_bevelList_make(Object *ob, const ListBase *nurbs, const bool for_
/* find extreme left points, also test (turning) direction */
if (poly > 0) {
- sd = sortdata = MEM_malloc_arrayN(poly, sizeof(struct BevelSort), "makeBevelList5");
+ sd = sortdata = (BevelSort *)MEM_malloc_arrayN(poly, sizeof(struct BevelSort), __func__);
LISTBASE_FOREACH (BevList *, bl, bev) {
if (bl->poly > 0) {
BevPoint *bevp;
@@ -3114,7 +3060,7 @@ void BKE_curve_bevelList_make(Object *ob, const ListBase *nurbs, const bool for_
BevPoint *bevp = bl->bevpoints;
unit_qt(bevp->quat);
}
- else if (bl->nr == 2) { /* 2 pnt, treat separate */
+ else if (bl->nr == 2) { /* 2 points, treat separately. */
make_bevel_list_segment_2D(bl);
}
else {
@@ -3129,7 +3075,7 @@ void BKE_curve_bevelList_make(Object *ob, const ListBase *nurbs, const bool for_
BevPoint *bevp = bl->bevpoints;
unit_qt(bevp->quat);
}
- else if (bl->nr == 2) { /* 2 pnt, treat separate */
+ else if (bl->nr == 2) { /* 2 points, treat separately. */
make_bevel_list_segment_3D(bl);
}
else {
@@ -3169,7 +3115,7 @@ static void calchandleNurb_intern(BezTriple *bezt,
p2 = bezt->vec[1];
- if (prev == NULL) {
+ if (prev == nullptr) {
p3 = next->vec[1];
pt[0] = 2.0f * p2[0] - p3[0];
pt[1] = 2.0f * p2[1] - p3[1];
@@ -3180,7 +3126,7 @@ static void calchandleNurb_intern(BezTriple *bezt,
p1 = prev->vec[1];
}
- if (next == NULL) {
+ if (next == nullptr) {
pt[0] = 2.0f * p2[0] - p1[0];
pt[1] = 2.0f * p2[1] - p1[1];
pt[2] = 2.0f * p2[2] - p1[2];
@@ -3258,13 +3204,13 @@ static void calchandleNurb_intern(BezTriple *bezt,
if (ydiff1 <= 0.0f) {
if (prev->vec[1][1] > bezt->vec[0][1]) {
bezt->vec[0][1] = prev->vec[1][1];
- leftviolate = 1;
+ leftviolate = true;
}
}
else {
if (prev->vec[1][1] < bezt->vec[0][1]) {
bezt->vec[0][1] = prev->vec[1][1];
- leftviolate = 1;
+ leftviolate = true;
}
}
}
@@ -3285,13 +3231,13 @@ static void calchandleNurb_intern(BezTriple *bezt,
if (ydiff1 <= 0.0f) {
if (next->vec[1][1] < bezt->vec[2][1]) {
bezt->vec[2][1] = next->vec[1][1];
- rightviolate = 1;
+ rightviolate = true;
}
}
else {
if (next->vec[1][1] > bezt->vec[2][1]) {
bezt->vec[2][1] = next->vec[1][1];
- rightviolate = 1;
+ rightviolate = true;
}
}
}
@@ -3321,13 +3267,13 @@ static void calchandleNurb_intern(BezTriple *bezt,
}
if (skip_align ||
- /* when one handle is free, alignming makes no sense, see: T35952 */
+ /* When one handle is free, aligning makes no sense, see: T35952 */
ELEM(HD_FREE, bezt->h1, bezt->h2) ||
- /* also when no handles are aligned, skip this step */
+ /* Also when no handles are aligned, skip this step. */
(!ELEM(HD_ALIGN, bezt->h1, bezt->h2) && !ELEM(HD_ALIGN_DOUBLESIDE, bezt->h1, bezt->h2))) {
- /* handles need to be updated during animation and applying stuff like hooks,
+ /* Handles need to be updated during animation and applying stuff like hooks,
* but in such situations it's quite difficult to distinguish in which order
- * align handles should be aligned so skip them for now */
+ * align handles should be aligned so skip them for now. */
return;
}
@@ -3402,19 +3348,19 @@ static void calchandlesNurb_intern(Nurb *nu, eBezTriple_Flag handle_sel_flag, bo
prev = bezt + (a - 1);
}
else {
- prev = NULL;
+ prev = nullptr;
}
next = bezt + 1;
while (a--) {
- calchandleNurb_intern(bezt, prev, next, handle_sel_flag, 0, skip_align, 0);
+ calchandleNurb_intern(bezt, prev, next, handle_sel_flag, false, skip_align, 0);
prev = bezt;
if (a == 1) {
if (nu->flagu & CU_NURB_CYCLIC) {
next = nu->bezt;
}
else {
- next = NULL;
+ next = nullptr;
}
}
else {
@@ -3430,7 +3376,8 @@ static void calchandlesNurb_intern(Nurb *nu, eBezTriple_Flag handle_sel_flag, bo
* with easy error checking and de-allocation, and an easy way to add or remove
* arrays that are processed in this way when changing code.
*
- * floats, chars: NULL-terminated arrays of pointers to array pointers that need to be allocated.
+ * floats, chars: null-terminated arrays of pointers to array pointers that need to be
+ * allocated.
*
* Returns: pointer to the buffer that contains all of the arrays.
*/
@@ -3449,10 +3396,10 @@ static void *allocate_arrays(int count, float ***floats, char ***chars, const ch
void *buffer = (float *)MEM_malloc_arrayN(count, (sizeof(float) * num_floats + num_chars), name);
if (!buffer) {
- return NULL;
+ return nullptr;
}
- float *fptr = buffer;
+ float *fptr = (float *)buffer;
for (int i = 0; i < num_floats; i++, fptr += count) {
*floats[i] = fptr;
@@ -3521,9 +3468,9 @@ static bool tridiagonal_solve_with_limits(float *a,
int solve_count)
{
float *a0, *b0, *c0, *d0;
- float **arrays[] = {&a0, &b0, &c0, &d0, NULL};
+ float **arrays[] = {&a0, &b0, &c0, &d0, nullptr};
char *is_locked, *num_unlocks;
- char **flagarrays[] = {&is_locked, &num_unlocks, NULL};
+ char **flagarrays[] = {&is_locked, &num_unlocks, nullptr};
void *tmps = allocate_arrays(solve_count, arrays, flagarrays, "tridiagonal_solve_with_limits");
if (!tmps) {
@@ -3790,7 +3737,7 @@ static void bezier_handle_calc_smooth_fcurve(
BezTriple *bezt, int total, int start, int count, bool cycle)
{
float *dx, *dy, *l, *a, *b, *c, *d, *h, *hmax, *hmin;
- float **arrays[] = {&dx, &dy, &l, &a, &b, &c, &d, &h, &hmax, &hmin, NULL};
+ float **arrays[] = {&dx, &dy, &l, &a, &b, &c, &d, &h, &hmax, &hmin, nullptr};
int solve_count = count;
@@ -3819,7 +3766,7 @@ static void bezier_handle_calc_smooth_fcurve(
/* allocate all */
- void *tmp_buffer = allocate_arrays(count, arrays, NULL, "bezier_calc_smooth_tmp");
+ void *tmp_buffer = allocate_arrays(count, arrays, nullptr, "bezier_calc_smooth_tmp");
if (!tmp_buffer) {
return;
}
@@ -3995,8 +3942,8 @@ void BKE_nurb_handle_smooth_fcurve(BezTriple *bezt, int total, bool cyclic)
}
}
- /* Find continuous subsequences of free auto handles and smooth them, starting at
- * search_base. In cyclic mode these subsequences can span the cycle boundary. */
+ /* Find continuous sub-sequences of free auto handles and smooth them, starting at search_base.
+ * In cyclic mode these sub-sequences can span the cycle boundary. */
int start = search_base, count = 1;
for (int i = 1, j = start + 1; i < total; i++, j++) {
@@ -4024,7 +3971,7 @@ void BKE_nurb_handle_smooth_fcurve(BezTriple *bezt, int total, bool cyclic)
void BKE_nurb_handle_calc(
BezTriple *bezt, BezTriple *prev, BezTriple *next, const bool is_fcurve, const char smoothing)
{
- calchandleNurb_intern(bezt, prev, next, SELECT, is_fcurve, false, smoothing);
+ calchandleNurb_intern(bezt, prev, next, (eBezTriple_Flag)SELECT, is_fcurve, false, smoothing);
}
void BKE_nurb_handle_calc_ex(BezTriple *bezt,
@@ -4034,12 +3981,13 @@ void BKE_nurb_handle_calc_ex(BezTriple *bezt,
const bool is_fcurve,
const char smoothing)
{
- calchandleNurb_intern(bezt, prev, next, handle_sel_flag, is_fcurve, false, smoothing);
+ calchandleNurb_intern(
+ bezt, prev, next, (eBezTriple_Flag)handle_sel_flag, is_fcurve, false, smoothing);
}
void BKE_nurb_handles_calc(Nurb *nu) /* first, if needed, set handle flags */
{
- calchandlesNurb_intern(nu, SELECT, false);
+ calchandlesNurb_intern(nu, (eBezTriple_Flag)SELECT, false);
}
/**
@@ -4072,7 +4020,7 @@ void BKE_nurb_handle_calc_simple(Nurb *nu, BezTriple *bezt)
if (nu->pntsu > 1) {
BezTriple *prev = BKE_nurb_bezt_get_prev(nu, bezt);
BezTriple *next = BKE_nurb_bezt_get_next(nu, bezt);
- BKE_nurb_handle_calc(bezt, prev, next, 0, 0);
+ BKE_nurb_handle_calc(bezt, prev, next, false, 0);
}
}
@@ -4172,7 +4120,7 @@ void BKE_nurb_handles_autocalc(Nurb *nu, uint8_t flag)
const float eps = 0.0001f;
const float eps_sq = eps * eps;
- if (nu == NULL || nu->bezt == NULL) {
+ if (nu == nullptr || nu->bezt == nullptr) {
return;
}
@@ -4187,13 +4135,13 @@ void BKE_nurb_handles_autocalc(Nurb *nu, uint8_t flag)
/* left handle: */
if (flag == 0 || (bezt1->f1 & flag)) {
bezt1->h1 = HD_FREE;
- /* distance too short: vectorhandle */
+ /* Distance too short: vector-handle. */
if (len_squared_v3v3(bezt1->vec[1], bezt0->vec[1]) < eps_sq) {
bezt1->h1 = HD_VECT;
leftsmall = true;
}
else {
- /* aligned handle? */
+ /* Aligned handle? */
if (dist_squared_to_line_v3(bezt1->vec[1], bezt1->vec[0], bezt1->vec[2]) < eps_sq) {
align = true;
bezt1->h1 = HD_ALIGN;
@@ -4207,13 +4155,13 @@ void BKE_nurb_handles_autocalc(Nurb *nu, uint8_t flag)
/* right handle: */
if (flag == 0 || (bezt1->f3 & flag)) {
bezt1->h2 = HD_FREE;
- /* distance too short: vectorhandle */
+ /* Distance too short: vector-handle. */
if (len_squared_v3v3(bezt1->vec[1], bezt2->vec[1]) < eps_sq) {
bezt1->h2 = HD_VECT;
rightsmall = true;
}
else {
- /* aligned handle? */
+ /* Aligned handle? */
if (align) {
bezt1->h2 = HD_ALIGN;
}
@@ -4545,7 +4493,7 @@ void BKE_nurb_direction_switch(Nurb *nu)
/* and make in increasing order again */
a = KNOTSU(nu);
fp1 = nu->knotsu;
- fp2 = tempf = MEM_malloc_arrayN(a, sizeof(float), "switchdirect");
+ fp2 = tempf = (float *)MEM_malloc_arrayN(a, sizeof(float), "switchdirect");
a--;
fp2[a] = fp1[a];
while (a--) {
@@ -4615,7 +4563,8 @@ void BKE_curve_nurbs_vert_coords_get(const ListBase *lb, float (*vert_coords)[3]
float (*BKE_curve_nurbs_vert_coords_alloc(const ListBase *lb, int *r_vert_len))[3]
{
const int vert_len = BKE_nurbList_verts_count(lb);
- float(*vert_coords)[3] = MEM_malloc_arrayN(vert_len, sizeof(*vert_coords), __func__);
+ float(*vert_coords)[3] = (float(*)[3])MEM_malloc_arrayN(
+ vert_len, sizeof(*vert_coords), __func__);
BKE_curve_nurbs_vert_coords_get(lb, vert_coords, vert_len);
*r_vert_len = vert_len;
return vert_coords;
@@ -4654,7 +4603,7 @@ void BKE_curve_nurbs_vert_coords_apply_with_mat4(ListBase *lb,
BKE_nurb_project_2d(nu);
}
- calchandlesNurb_intern(nu, SELECT, true);
+ calchandlesNurb_intern(nu, (eBezTriple_Flag)SELECT, true);
}
}
@@ -4690,14 +4639,14 @@ void BKE_curve_nurbs_vert_coords_apply(ListBase *lb,
BKE_nurb_project_2d(nu);
}
- calchandlesNurb_intern(nu, SELECT, true);
+ calchandlesNurb_intern(nu, (eBezTriple_Flag)SELECT, true);
}
}
float (*BKE_curve_nurbs_key_vert_coords_alloc(const ListBase *lb, float *key, int *r_vert_len))[3]
{
int vert_len = BKE_nurbList_verts_count(lb);
- float(*cos)[3] = MEM_malloc_arrayN(vert_len, sizeof(*cos), __func__);
+ float(*cos)[3] = (float(*)[3])MEM_malloc_arrayN(vert_len, sizeof(*cos), __func__);
float *co = cos[0];
LISTBASE_FOREACH (const Nurb *, nu, lb) {
@@ -4857,7 +4806,7 @@ bool BKE_nurb_type_convert(Nurb *nu,
int a, c, nr;
if (nu->type == CU_POLY) {
- if (type == CU_BEZIER) { /* To Bezier with vecthandles. */
+ if (type == CU_BEZIER) { /* To Bezier with vector-handles. */
nr = nu->pntsu;
bezt = (BezTriple *)MEM_calloc_arrayN(nr, sizeof(BezTriple), "setsplinetype2");
nu->bezt = bezt;
@@ -4873,7 +4822,7 @@ bool BKE_nurb_type_convert(Nurb *nu,
bezt++;
}
MEM_freeN(nu->bp);
- nu->bp = NULL;
+ nu->bp = nullptr;
nu->pntsu = nr;
nu->pntsv = 0;
nu->type = CU_BEZIER;
@@ -4895,7 +4844,7 @@ bool BKE_nurb_type_convert(Nurb *nu,
else if (nu->type == CU_BEZIER) { /* Bezier */
if (ELEM(type, CU_POLY, CU_NURBS)) {
nr = use_handles ? (3 * nu->pntsu) : nu->pntsu;
- nu->bp = MEM_calloc_arrayN(nr, sizeof(BPoint), "setsplinetype");
+ nu->bp = (BPoint *)MEM_calloc_arrayN(nr, sizeof(BPoint), "setsplinetype");
a = nu->pntsu;
bezt = nu->bezt;
bp = nu->bp;
@@ -4927,7 +4876,7 @@ bool BKE_nurb_type_convert(Nurb *nu,
bezt++;
}
MEM_freeN(nu->bezt);
- nu->bezt = NULL;
+ nu->bezt = nullptr;
nu->pntsu = nr;
nu->pntsv = 1;
nu->orderu = 4;
@@ -4947,20 +4896,20 @@ bool BKE_nurb_type_convert(Nurb *nu,
if (nu->knotsu) {
MEM_freeN(nu->knotsu); /* python created nurbs have a knotsu of zero */
}
- nu->knotsu = NULL;
+ nu->knotsu = nullptr;
MEM_SAFE_FREE(nu->knotsv);
}
else if (type == CU_BEZIER) { /* to Bezier */
nr = nu->pntsu / 3;
if (nr < 2) {
- if (r_err_msg != NULL) {
+ if (r_err_msg != nullptr) {
*r_err_msg = "At least 6 points required for conversion";
}
return false; /* conversion impossible */
}
- bezt = MEM_calloc_arrayN(nr, sizeof(BezTriple), "setsplinetype2");
+ bezt = (BezTriple *)MEM_calloc_arrayN(nr, sizeof(BezTriple), "setsplinetype2");
nu->bezt = bezt;
a = nr;
bp = nu->bp;
@@ -4979,9 +4928,9 @@ bool BKE_nurb_type_convert(Nurb *nu,
bezt++;
}
MEM_freeN(nu->bp);
- nu->bp = NULL;
+ nu->bp = nullptr;
MEM_freeN(nu->knotsu);
- nu->knotsu = NULL;
+ nu->knotsu = nullptr;
nu->pntsu = nr;
nu->type = CU_BEZIER;
}
@@ -5010,7 +4959,7 @@ const ListBase *BKE_curve_nurbs_get_for_read(const Curve *cu)
void BKE_curve_nurb_active_set(Curve *cu, const Nurb *nu)
{
- if (nu == NULL) {
+ if (nu == nullptr) {
cu->actnu = CU_ACT_NONE;
}
else {
@@ -5023,13 +4972,13 @@ void BKE_curve_nurb_active_set(Curve *cu, const Nurb *nu)
Nurb *BKE_curve_nurb_active_get(Curve *cu)
{
ListBase *nurbs = BKE_curve_editNurbs_get(cu);
- return BLI_findlink(nurbs, cu->actnu);
+ return (Nurb *)BLI_findlink(nurbs, cu->actnu);
}
void *BKE_curve_vert_active_get(Curve *cu)
{
- Nurb *nu = NULL;
- void *vert = NULL;
+ Nurb *nu = nullptr;
+ void *vert = nullptr;
BKE_curve_nurb_vert_active_get(cu, &nu, &vert);
return vert;
@@ -5065,12 +5014,12 @@ void BKE_curve_nurb_vert_active_set(Curve *cu, const Nurb *nu, const void *vert)
bool BKE_curve_nurb_vert_active_get(Curve *cu, Nurb **r_nu, void **r_vert)
{
- Nurb *nu = NULL;
- void *vert = NULL;
+ Nurb *nu = nullptr;
+ void *vert = nullptr;
if (cu->actvert != CU_ACT_NONE) {
ListBase *nurbs = BKE_curve_editNurbs_get(cu);
- nu = BLI_findlink(nurbs, cu->actnu);
+ nu = (Nurb *)BLI_findlink(nurbs, cu->actnu);
if (nu) {
if (nu->type == CU_BEZIER) {
@@ -5087,7 +5036,7 @@ bool BKE_curve_nurb_vert_active_get(Curve *cu, Nurb **r_nu, void **r_vert)
*r_nu = nu;
*r_vert = vert;
- return (*r_vert != NULL);
+ return (*r_vert != nullptr);
}
void BKE_curve_nurb_vert_active_validate(Curve *cu)
@@ -5097,13 +5046,13 @@ void BKE_curve_nurb_vert_active_validate(Curve *cu)
if (BKE_curve_nurb_vert_active_get(cu, &nu, &vert)) {
if (nu->type == CU_BEZIER) {
- BezTriple *bezt = vert;
+ BezTriple *bezt = (BezTriple *)vert;
if (BEZT_ISSEL_ANY(bezt) == 0) {
cu->actvert = CU_ACT_NONE;
}
}
else {
- BPoint *bp = vert;
+ BPoint *bp = (BPoint *)vert;
if ((bp->f1 & SELECT) == 0) {
cu->actvert = CU_ACT_NONE;
}
@@ -5118,7 +5067,7 @@ void BKE_curve_nurb_vert_active_validate(Curve *cu)
bool BKE_curve_minmax(Curve *cu, bool use_radius, float min[3], float max[3])
{
ListBase *nurb_lb = BKE_curve_nurbs_get(cu);
- ListBase temp_nurb_lb = {NULL, NULL};
+ ListBase temp_nurb_lb = {nullptr, nullptr};
const bool is_font = (BLI_listbase_is_empty(nurb_lb)) && (cu->len != 0);
/* For font curves we generate temp list of splines.
*
@@ -5127,7 +5076,7 @@ bool BKE_curve_minmax(Curve *cu, bool use_radius, float min[3], float max[3])
*/
if (is_font) {
nurb_lb = &temp_nurb_lb;
- BKE_vfont_to_curve_ex(NULL, cu, FO_EDIT, nurb_lb, NULL, NULL, NULL, NULL);
+ BKE_vfont_to_curve_ex(nullptr, cu, FO_EDIT, nurb_lb, nullptr, nullptr, nullptr, nullptr);
use_radius = false;
}
/* Do bounding box based on splines. */
@@ -5232,7 +5181,7 @@ void BKE_curve_transform_ex(Curve *cu,
if (do_keys && cu->key) {
LISTBASE_FOREACH (KeyBlock *, kb, &cu->key->block) {
- float *fp = kb->data;
+ float *fp = (float *)kb->data;
int n = kb->totelem;
LISTBASE_FOREACH (Nurb *, nu, &cu->nurb) {
@@ -5290,7 +5239,7 @@ void BKE_curve_translate(Curve *cu, const float offset[3], const bool do_keys)
if (do_keys && cu->key) {
LISTBASE_FOREACH (KeyBlock *, kb, &cu->key->block) {
- float *fp = kb->data;
+ float *fp = (float *)kb->data;
int n = kb->totelem;
LISTBASE_FOREACH (Nurb *, nu, &cu->nurb) {
@@ -5442,11 +5391,10 @@ void BKE_curve_material_remap(Curve *cu, const unsigned int *remap, unsigned int
}
}
else {
- Nurb *nu;
ListBase *nurbs = BKE_curve_editNurbs_get(cu);
if (nurbs) {
- for (nu = nurbs->first; nu; nu = nu->next) {
+ LISTBASE_FOREACH (Nurb *, nu, nurbs) {
MAT_NR_REMAP(nu->mat_nr);
}
}
@@ -5536,8 +5484,8 @@ void BKE_curve_eval_geometry(Depsgraph *depsgraph, Curve *curve)
}
/* Draw Engine */
-void (*BKE_curve_batch_cache_dirty_tag_cb)(Curve *cu, int mode) = NULL;
-void (*BKE_curve_batch_cache_free_cb)(Curve *cu) = NULL;
+void (*BKE_curve_batch_cache_dirty_tag_cb)(Curve *cu, int mode) = nullptr;
+void (*BKE_curve_batch_cache_free_cb)(Curve *cu) = nullptr;
void BKE_curve_batch_cache_dirty_tag(Curve *cu, int mode)
{
diff --git a/source/blender/blenkernel/intern/curve_eval.cc b/source/blender/blenkernel/intern/curve_eval.cc
index 38f736e6907..e2461adaaca 100644
--- a/source/blender/blenkernel/intern/curve_eval.cc
+++ b/source/blender/blenkernel/intern/curve_eval.cc
@@ -100,11 +100,17 @@ void CurveEval::transform(const float4x4 &matrix)
}
}
-void CurveEval::bounds_min_max(float3 &min, float3 &max, const bool use_evaluated) const
+bool CurveEval::bounds_min_max(float3 &min, float3 &max, const bool use_evaluated) const
{
+ bool have_minmax = false;
for (const SplinePtr &spline : this->splines()) {
- spline->bounds_min_max(min, max, use_evaluated);
+ if (spline->size()) {
+ spline->bounds_min_max(min, max, use_evaluated);
+ have_minmax = true;
+ }
}
+
+ return have_minmax;
}
float CurveEval::total_length() const
diff --git a/source/blender/blenkernel/intern/curve_to_mesh_convert.cc b/source/blender/blenkernel/intern/curve_to_mesh_convert.cc
index 5522a84d094..073d9d18a04 100644
--- a/source/blender/blenkernel/intern/curve_to_mesh_convert.cc
+++ b/source/blender/blenkernel/intern/curve_to_mesh_convert.cc
@@ -401,10 +401,8 @@ struct ResultAttributes {
};
static ResultAttributes create_result_attributes(const CurveEval &curve,
const CurveEval &profile,
- Mesh &mesh)
+ MeshComponent &mesh_component)
{
- MeshComponent mesh_component;
- mesh_component.replace(&mesh, GeometryOwnershipType::Editable);
Set<AttributeIDRef> curve_attributes;
/* In order to prefer attributes on the main curve input when there are name collisions, first
@@ -708,7 +706,11 @@ Mesh *curve_to_mesh_sweep(const CurveEval &curve, const CurveEval &profile, cons
mesh->smoothresh = DEG2RADF(180.0f);
BKE_mesh_normals_tag_dirty(mesh);
- ResultAttributes attributes = create_result_attributes(curve, profile, *mesh);
+ /* Create the mesh component for retrieving attributes at this scope, since output attributes
+ * can keep a reference to the component for updating after retrieving write access. */
+ MeshComponent mesh_component;
+ mesh_component.replace(mesh, GeometryOwnershipType::Editable);
+ ResultAttributes attributes = create_result_attributes(curve, profile, mesh_component);
threading::parallel_for(curves.index_range(), 128, [&](IndexRange curves_range) {
for (const int i_spline : curves_range) {
@@ -760,7 +762,10 @@ static CurveEval get_curve_single_vert()
{
CurveEval curve;
std::unique_ptr<PolySpline> spline = std::make_unique<PolySpline>();
- spline->add_point(float3(0), 0, 0.0f);
+ spline->resize(1.0f);
+ spline->positions().fill(float3(0));
+ spline->radii().fill(1.0f);
+ spline->tilts().fill(0.0f);
curve.add_spline(std::move(spline));
return curve;
diff --git a/source/blender/blenkernel/intern/curveprofile.cc b/source/blender/blenkernel/intern/curveprofile.cc
index 387709fca29..8f387be41d3 100644
--- a/source/blender/blenkernel/intern/curveprofile.cc
+++ b/source/blender/blenkernel/intern/curveprofile.cc
@@ -46,7 +46,7 @@
struct CurveProfile *BKE_curveprofile_add(eCurveProfilePresets preset)
{
- CurveProfile *profile = (CurveProfile *)MEM_callocN(sizeof(CurveProfile), __func__);
+ CurveProfile *profile = MEM_cnew<CurveProfile>(__func__);
BKE_curveprofile_set_defaults(profile);
profile->preset = preset;
diff --git a/source/blender/blenkernel/intern/customdata.c b/source/blender/blenkernel/intern/customdata.cc
index 090de26c230..5e3beab9b72 100644
--- a/source/blender/blenkernel/intern/customdata.c
+++ b/source/blender/blenkernel/intern/customdata.cc
@@ -44,6 +44,10 @@
#include "BLI_string_utils.h"
#include "BLI_utildefines.h"
+#ifndef NDEBUG
+# include "BLI_dynstr.h"
+#endif
+
#include "BLT_translation.h"
#include "BKE_anonymous_attribute.h"
@@ -69,7 +73,7 @@
#define CUSTOMDATA_GROW 5
/* ensure typemap size is ok */
-BLI_STATIC_ASSERT(ARRAY_SIZE(((CustomData *)NULL)->typemap) == CD_NUMTYPES, "size mismatch");
+BLI_STATIC_ASSERT(ARRAY_SIZE(((CustomData *)nullptr)->typemap) == CD_NUMTYPES, "size mismatch");
static CLG_LogRef LOG = {"bke.customdata"};
@@ -94,7 +98,7 @@ bool CustomData_MeshMasks_are_matching(const CustomData_MeshMasks *mask_ref,
}
/********************* Layer type information **********************/
-typedef struct LayerTypeInfo {
+struct LayerTypeInfo {
int size; /* the memory size of one element of this layer's data */
/** name of the struct used, for file writing */
@@ -105,7 +109,7 @@ typedef struct LayerTypeInfo {
/**
* default layer name.
*
- * \note when NULL this is a way to ensure there is only ever one item
+ * \note when null this is a way to ensure there is only ever one item
* see: CustomData_layertype_is_singleton().
*/
const char *defaultname;
@@ -113,7 +117,7 @@ typedef struct LayerTypeInfo {
/**
* a function to copy count elements of this layer's data
* (deep copy if appropriate)
- * if NULL, memcpy is used
+ * if null, memcpy is used
*/
cd_copy copy;
@@ -128,7 +132,7 @@ typedef struct LayerTypeInfo {
/**
* a function to interpolate between count source elements of this
* layer's data and store the result in dest
- * if weights == NULL or sub_weights == NULL, they should default to 1
+ * if weights == null or sub_weights == null, they should default to 1
*
* weights gives the weight for each element in sources
* sub_weights gives the sub-element weights for each element in sources
@@ -146,7 +150,7 @@ typedef struct LayerTypeInfo {
void (*swap)(void *data, const int *corner_indices);
/**
- * a function to set a layer's data to default values. if NULL, the
+ * a function to set a layer's data to default values. if null, the
* default is assumed to be all zeros */
void (*set_default)(void *data, int count);
@@ -171,9 +175,9 @@ typedef struct LayerTypeInfo {
size_t (*filesize)(CDataFile *cdf, const void *data, int count);
/** a function to determine max allowed number of layers,
- * should be NULL or return -1 if no limit */
- int (*layers_max)(void);
-} LayerTypeInfo;
+ * should be null or return -1 if no limit */
+ int (*layers_max)();
+};
static void layerCopy_mdeformvert(const void *source, void *dest, int count)
{
@@ -182,17 +186,17 @@ static void layerCopy_mdeformvert(const void *source, void *dest, int count)
memcpy(dest, source, count * size);
for (i = 0; i < count; i++) {
- MDeformVert *dvert = POINTER_OFFSET(dest, i * size);
+ MDeformVert *dvert = static_cast<MDeformVert *>(POINTER_OFFSET(dest, i * size));
if (dvert->totweight) {
- MDeformWeight *dw = MEM_malloc_arrayN(
- dvert->totweight, sizeof(*dw), "layerCopy_mdeformvert dw");
+ MDeformWeight *dw = static_cast<MDeformWeight *>(
+ MEM_malloc_arrayN(dvert->totweight, sizeof(*dw), __func__));
memcpy(dw, dvert->dw, dvert->totweight * sizeof(*dw));
dvert->dw = dw;
}
else {
- dvert->dw = NULL;
+ dvert->dw = nullptr;
}
}
}
@@ -200,11 +204,11 @@ static void layerCopy_mdeformvert(const void *source, void *dest, int count)
static void layerFree_mdeformvert(void *data, int count, int size)
{
for (int i = 0; i < count; i++) {
- MDeformVert *dvert = POINTER_OFFSET(data, i * size);
+ MDeformVert *dvert = static_cast<MDeformVert *>(POINTER_OFFSET(data, i * size));
if (dvert->dw) {
MEM_freeN(dvert->dw);
- dvert->dw = NULL;
+ dvert->dw = nullptr;
dvert->totweight = 0;
}
}
@@ -216,8 +220,8 @@ static void layerCopy_bmesh_elem_py_ptr(const void *UNUSED(source), void *dest,
const int size = sizeof(void *);
for (int i = 0; i < count; i++) {
- void **ptr = POINTER_OFFSET(dest, i * size);
- *ptr = NULL;
+ void **ptr = (void **)POINTER_OFFSET(dest, i * size);
+ *ptr = nullptr;
}
}
@@ -231,9 +235,9 @@ void bpy_bm_generic_invalidate(struct BPy_BMGeneric *UNUSED(self))
static void layerFree_bmesh_elem_py_ptr(void *data, int count, int size)
{
for (int i = 0; i < count; i++) {
- void **ptr = POINTER_OFFSET(data, i * size);
+ void **ptr = (void **)POINTER_OFFSET(data, i * size);
if (*ptr) {
- bpy_bm_generic_invalidate(*ptr);
+ bpy_bm_generic_invalidate(static_cast<BPy_BMGeneric *>(*ptr));
}
}
}
@@ -251,14 +255,14 @@ static void layerInterp_mdeformvert(const void **sources,
MDeformWeight dw;
};
- MDeformVert *dvert = dest;
- struct MDeformWeight_Link *dest_dwlink = NULL;
+ MDeformVert *dvert = static_cast<MDeformVert *>(dest);
+ struct MDeformWeight_Link *dest_dwlink = nullptr;
struct MDeformWeight_Link *node;
/* build a list of unique def_nrs for dest */
int totweight = 0;
for (int i = 0; i < count; i++) {
- const MDeformVert *source = sources[i];
+ const MDeformVert *source = static_cast<const MDeformVert *>(sources[i]);
float interp_weight = weights[i];
for (int j = 0; j < source->totweight; j++) {
@@ -280,11 +284,12 @@ static void layerInterp_mdeformvert(const void **sources,
/* if this def_nr is not in the list, add it */
if (!node) {
- struct MDeformWeight_Link *tmp_dwlink = alloca(sizeof(*tmp_dwlink));
+ struct MDeformWeight_Link *tmp_dwlink = static_cast<MDeformWeight_Link *>(
+ alloca(sizeof(*tmp_dwlink)));
tmp_dwlink->dw.def_nr = dw->def_nr;
tmp_dwlink->dw.weight = weight;
- /* inline linklist */
+ /* Inline linked-list. */
tmp_dwlink->next = dest_dwlink;
dest_dwlink = tmp_dwlink;
@@ -305,7 +310,8 @@ static void layerInterp_mdeformvert(const void **sources,
}
if (totweight) {
- dvert->dw = MEM_malloc_arrayN(totweight, sizeof(*dvert->dw), __func__);
+ dvert->dw = static_cast<MDeformWeight *>(
+ MEM_malloc_arrayN(totweight, sizeof(*dvert->dw), __func__));
}
}
@@ -343,37 +349,13 @@ static void layerInterp_normal(const void **sources,
normalize_v3_v3((float *)dest, no);
}
-static bool layerValidate_normal(void *data, const uint totitems, const bool do_fixes)
-{
- static const float no_default[3] = {0.0f, 0.0f, 1.0f}; /* Z-up default normal... */
- float(*no)[3] = data;
- bool has_errors = false;
-
- for (int i = 0; i < totitems; i++, no++) {
- if (!is_finite_v3((float *)no)) {
- has_errors = true;
- if (do_fixes) {
- copy_v3_v3((float *)no, no_default);
- }
- }
- else if (!compare_ff(len_squared_v3((float *)no), 1.0f, 1e-6f)) {
- has_errors = true;
- if (do_fixes) {
- normalize_v3((float *)no);
- }
- }
- }
-
- return has_errors;
-}
-
static void layerCopyValue_normal(const void *source,
void *dest,
const int mixmode,
const float mixfactor)
{
- const float *no_src = source;
- float *no_dst = dest;
+ const float *no_src = (const float *)source;
+ float *no_dst = (float *)dest;
float no_tmp[3];
if (ELEM(mixmode,
@@ -416,13 +398,13 @@ static void layerCopy_tface(const void *source, void *dest, int count)
static void layerInterp_tface(
const void **sources, const float *weights, const float *sub_weights, int count, void *dest)
{
- MTFace *tf = dest;
+ MTFace *tf = static_cast<MTFace *>(dest);
float uv[4][2] = {{0.0f}};
const float *sub_weight = sub_weights;
for (int i = 0; i < count; i++) {
const float interp_weight = weights[i];
- const MTFace *src = sources[i];
+ const MTFace *src = static_cast<const MTFace *>(sources[i]);
for (int j = 0; j < 4; j++) {
if (sub_weights) {
@@ -443,7 +425,7 @@ static void layerInterp_tface(
static void layerSwap_tface(void *data, const int *corner_indices)
{
- MTFace *tf = data;
+ MTFace *tf = static_cast<MTFace *>(data);
float uv[4][2];
for (int j = 0; j < 4; j++) {
@@ -464,7 +446,7 @@ static void layerDefault_tface(void *data, int count)
}
}
-static int layerMaxNum_tface(void)
+static int layerMaxNum_tface()
{
return MAX_MTFACE;
}
@@ -491,7 +473,7 @@ static void layerInterp_propFloat(const void **sources,
static bool layerValidate_propFloat(void *data, const uint totitems, const bool do_fixes)
{
- MFloatProperty *fp = data;
+ MFloatProperty *fp = static_cast<MFloatProperty *>(data);
bool has_errors = false;
for (int i = 0; i < totitems; i++, fp++) {
@@ -529,13 +511,13 @@ static void layerCopy_origspace_face(const void *source, void *dest, int count)
static void layerInterp_origspace_face(
const void **sources, const float *weights, const float *sub_weights, int count, void *dest)
{
- OrigSpaceFace *osf = dest;
+ OrigSpaceFace *osf = static_cast<OrigSpaceFace *>(dest);
float uv[4][2] = {{0.0f}};
const float *sub_weight = sub_weights;
for (int i = 0; i < count; i++) {
const float interp_weight = weights[i];
- const OrigSpaceFace *src = sources[i];
+ const OrigSpaceFace *src = static_cast<const OrigSpaceFace *>(sources[i]);
for (int j = 0; j < 4; j++) {
if (sub_weights) {
@@ -555,7 +537,7 @@ static void layerInterp_origspace_face(
static void layerSwap_origspace_face(void *data, const int *corner_indices)
{
- OrigSpaceFace *osf = data;
+ OrigSpaceFace *osf = static_cast<OrigSpaceFace *>(data);
float uv[4][2];
for (int j = 0; j < 4; j++) {
@@ -576,7 +558,7 @@ static void layerDefault_origspace_face(void *data, int count)
static void layerSwap_mdisps(void *data, const int *ci)
{
- MDisps *s = data;
+ MDisps *s = static_cast<MDisps *>(data);
if (s->disps) {
int nverts = (ci[1] == 3) ? 4 : 3; /* silly way to know vertex count of face */
@@ -589,11 +571,11 @@ static void layerSwap_mdisps(void *data, const int *ci)
MEM_freeN(s->disps);
s->totdisp = (s->totdisp / corners) * nverts;
- s->disps = MEM_calloc_arrayN(s->totdisp, sizeof(float[3]), "mdisp swap");
+ s->disps = (float(*)[3])MEM_calloc_arrayN(s->totdisp, sizeof(float[3]), "mdisp swap");
return;
}
- float(*d)[3] = MEM_calloc_arrayN(s->totdisp, sizeof(float[3]), "mdisps swap");
+ float(*d)[3] = (float(*)[3])MEM_calloc_arrayN(s->totdisp, sizeof(float[3]), "mdisps swap");
for (int S = 0; S < corners; S++) {
memcpy(d + cornersize * S, s->disps + cornersize * ci[S], sizeof(float[3]) * cornersize);
@@ -606,17 +588,17 @@ static void layerSwap_mdisps(void *data, const int *ci)
static void layerCopy_mdisps(const void *source, void *dest, int count)
{
- const MDisps *s = source;
- MDisps *d = dest;
+ const MDisps *s = static_cast<const MDisps *>(source);
+ MDisps *d = static_cast<MDisps *>(dest);
for (int i = 0; i < count; i++) {
if (s[i].disps) {
- d[i].disps = MEM_dupallocN(s[i].disps);
- d[i].hidden = MEM_dupallocN(s[i].hidden);
+ d[i].disps = static_cast<float(*)[3]>(MEM_dupallocN(s[i].disps));
+ d[i].hidden = static_cast<unsigned int *>(MEM_dupallocN(s[i].hidden));
}
else {
- d[i].disps = NULL;
- d[i].hidden = NULL;
+ d[i].disps = nullptr;
+ d[i].hidden = nullptr;
}
/* still copy even if not in memory, displacement can be external */
@@ -627,7 +609,7 @@ static void layerCopy_mdisps(const void *source, void *dest, int count)
static void layerFree_mdisps(void *data, int count, int UNUSED(size))
{
- MDisps *d = data;
+ MDisps *d = static_cast<MDisps *>(data);
for (int i = 0; i < count; i++) {
if (d[i].disps) {
@@ -636,8 +618,8 @@ static void layerFree_mdisps(void *data, int count, int UNUSED(size))
if (d[i].hidden) {
MEM_freeN(d[i].hidden);
}
- d[i].disps = NULL;
- d[i].hidden = NULL;
+ d[i].disps = nullptr;
+ d[i].hidden = nullptr;
d[i].totdisp = 0;
d[i].level = 0;
}
@@ -645,16 +627,16 @@ static void layerFree_mdisps(void *data, int count, int UNUSED(size))
static bool layerRead_mdisps(CDataFile *cdf, void *data, int count)
{
- MDisps *d = data;
+ MDisps *d = static_cast<MDisps *>(data);
for (int i = 0; i < count; i++) {
if (!d[i].disps) {
- d[i].disps = MEM_calloc_arrayN(d[i].totdisp, sizeof(float[3]), "mdisps read");
+ d[i].disps = (float(*)[3])MEM_calloc_arrayN(d[i].totdisp, sizeof(float[3]), "mdisps read");
}
if (!cdf_read_data(cdf, sizeof(float[3]) * d[i].totdisp, d[i].disps)) {
CLOG_ERROR(&LOG, "failed to read multires displacement %d/%d %d", i, count, d[i].totdisp);
- return 0;
+ return false;
}
}
@@ -663,12 +645,12 @@ static bool layerRead_mdisps(CDataFile *cdf, void *data, int count)
static bool layerWrite_mdisps(CDataFile *cdf, const void *data, int count)
{
- const MDisps *d = data;
+ const MDisps *d = static_cast<const MDisps *>(data);
for (int i = 0; i < count; i++) {
if (!cdf_write_data(cdf, sizeof(float[3]) * d[i].totdisp, d[i].disps)) {
CLOG_ERROR(&LOG, "failed to write multires displacement %d/%d %d", i, count, d[i].totdisp);
- return 0;
+ return false;
}
}
@@ -677,7 +659,7 @@ static bool layerWrite_mdisps(CDataFile *cdf, const void *data, int count)
static size_t layerFilesize_mdisps(CDataFile *UNUSED(cdf), const void *data, int count)
{
- const MDisps *d = data;
+ const MDisps *d = static_cast<const MDisps *>(data);
size_t size = 0;
for (int i = 0; i < count; i++) {
@@ -695,7 +677,7 @@ static void layerInterp_paint_mask(const void **sources,
float mask = 0.0f;
for (int i = 0; i < count; i++) {
const float interp_weight = weights[i];
- const float *src = sources[i];
+ const float *src = static_cast<const float *>(sources[i]);
mask += (*src) * interp_weight;
}
*(float *)dest = mask;
@@ -703,16 +685,16 @@ static void layerInterp_paint_mask(const void **sources,
static void layerCopy_grid_paint_mask(const void *source, void *dest, int count)
{
- const GridPaintMask *s = source;
- GridPaintMask *d = dest;
+ const GridPaintMask *s = static_cast<const GridPaintMask *>(source);
+ GridPaintMask *d = static_cast<GridPaintMask *>(dest);
for (int i = 0; i < count; i++) {
if (s[i].data) {
- d[i].data = MEM_dupallocN(s[i].data);
+ d[i].data = static_cast<float *>(MEM_dupallocN(s[i].data));
d[i].level = s[i].level;
}
else {
- d[i].data = NULL;
+ d[i].data = nullptr;
d[i].level = 0;
}
}
@@ -720,7 +702,7 @@ static void layerCopy_grid_paint_mask(const void *source, void *dest, int count)
static void layerFree_grid_paint_mask(void *data, int count, int UNUSED(size))
{
- GridPaintMask *gpm = data;
+ GridPaintMask *gpm = static_cast<GridPaintMask *>(data);
for (int i = 0; i < count; i++) {
MEM_SAFE_FREE(gpm[i].data);
@@ -734,8 +716,8 @@ static void layerCopyValue_mloopcol(const void *source,
const int mixmode,
const float mixfactor)
{
- const MLoopCol *m1 = source;
- MLoopCol *m2 = dest;
+ const MLoopCol *m1 = static_cast<const MLoopCol *>(source);
+ MLoopCol *m2 = static_cast<MLoopCol *>(dest);
unsigned char tmp_col[4];
if (ELEM(mixmode,
@@ -789,7 +771,8 @@ static void layerCopyValue_mloopcol(const void *source,
static bool layerEqual_mloopcol(const void *data1, const void *data2)
{
- const MLoopCol *m1 = data1, *m2 = data2;
+ const MLoopCol *m1 = static_cast<const MLoopCol *>(data1);
+ const MLoopCol *m2 = static_cast<const MLoopCol *>(data2);
float r, g, b, a;
r = m1->r - m2->r;
@@ -802,7 +785,7 @@ static bool layerEqual_mloopcol(const void *data1, const void *data2)
static void layerMultiply_mloopcol(void *data, float fac)
{
- MLoopCol *m = data;
+ MLoopCol *m = static_cast<MLoopCol *>(data);
m->r = (float)m->r * fac;
m->g = (float)m->g * fac;
@@ -812,8 +795,8 @@ static void layerMultiply_mloopcol(void *data, float fac)
static void layerAdd_mloopcol(void *data1, const void *data2)
{
- MLoopCol *m = data1;
- const MLoopCol *m2 = data2;
+ MLoopCol *m = static_cast<MLoopCol *>(data1);
+ const MLoopCol *m2 = static_cast<const MLoopCol *>(data2);
m->r += m2->r;
m->g += m2->g;
@@ -823,8 +806,9 @@ static void layerAdd_mloopcol(void *data1, const void *data2)
static void layerDoMinMax_mloopcol(const void *data, void *vmin, void *vmax)
{
- const MLoopCol *m = data;
- MLoopCol *min = vmin, *max = vmax;
+ const MLoopCol *m = static_cast<const MLoopCol *>(data);
+ MLoopCol *min = static_cast<MLoopCol *>(vmin);
+ MLoopCol *max = static_cast<MLoopCol *>(vmax);
if (m->r < min->r) {
min->r = m->r;
@@ -854,7 +838,8 @@ static void layerDoMinMax_mloopcol(const void *data, void *vmin, void *vmax)
static void layerInitMinMax_mloopcol(void *vmin, void *vmax)
{
- MLoopCol *min = vmin, *max = vmax;
+ MLoopCol *min = static_cast<MLoopCol *>(vmin);
+ MLoopCol *max = static_cast<MLoopCol *>(vmax);
min->r = 255;
min->g = 255;
@@ -882,7 +867,7 @@ static void layerInterp_mloopcol(const void **sources,
int count,
void *dest)
{
- MLoopCol *mc = dest;
+ MLoopCol *mc = static_cast<MLoopCol *>(dest);
struct {
float a;
float r;
@@ -892,7 +877,7 @@ static void layerInterp_mloopcol(const void **sources,
for (int i = 0; i < count; i++) {
const float interp_weight = weights[i];
- const MLoopCol *src = sources[i];
+ const MLoopCol *src = static_cast<const MLoopCol *>(sources[i]);
col.r += src->r * interp_weight;
col.g += src->g * interp_weight;
col.b += src->b * interp_weight;
@@ -909,7 +894,7 @@ static void layerInterp_mloopcol(const void **sources,
mc->a = round_fl_to_uchar_clamp(col.a);
}
-static int layerMaxNum_mloopcol(void)
+static int layerMaxNum_mloopcol()
{
return MAX_MCOL;
}
@@ -919,8 +904,8 @@ static void layerCopyValue_mloopuv(const void *source,
const int mixmode,
const float mixfactor)
{
- const MLoopUV *luv1 = source;
- MLoopUV *luv2 = dest;
+ const MLoopUV *luv1 = static_cast<const MLoopUV *>(source);
+ MLoopUV *luv2 = static_cast<MLoopUV *>(dest);
/* We only support a limited subset of advanced mixing here -
* namely the mixfactor interpolation. */
@@ -935,37 +920,40 @@ static void layerCopyValue_mloopuv(const void *source,
static bool layerEqual_mloopuv(const void *data1, const void *data2)
{
- const MLoopUV *luv1 = data1, *luv2 = data2;
+ const MLoopUV *luv1 = static_cast<const MLoopUV *>(data1);
+ const MLoopUV *luv2 = static_cast<const MLoopUV *>(data2);
return len_squared_v2v2(luv1->uv, luv2->uv) < 0.00001f;
}
static void layerMultiply_mloopuv(void *data, float fac)
{
- MLoopUV *luv = data;
+ MLoopUV *luv = static_cast<MLoopUV *>(data);
mul_v2_fl(luv->uv, fac);
}
static void layerInitMinMax_mloopuv(void *vmin, void *vmax)
{
- MLoopUV *min = vmin, *max = vmax;
+ MLoopUV *min = static_cast<MLoopUV *>(vmin);
+ MLoopUV *max = static_cast<MLoopUV *>(vmax);
INIT_MINMAX2(min->uv, max->uv);
}
static void layerDoMinMax_mloopuv(const void *data, void *vmin, void *vmax)
{
- const MLoopUV *luv = data;
- MLoopUV *min = vmin, *max = vmax;
+ const MLoopUV *luv = static_cast<const MLoopUV *>(data);
+ MLoopUV *min = static_cast<MLoopUV *>(vmin);
+ MLoopUV *max = static_cast<MLoopUV *>(vmax);
minmax_v2v2_v2(min->uv, max->uv, luv->uv);
}
static void layerAdd_mloopuv(void *data1, const void *data2)
{
- MLoopUV *l1 = data1;
- const MLoopUV *l2 = data2;
+ MLoopUV *l1 = static_cast<MLoopUV *>(data1);
+ const MLoopUV *l2 = static_cast<const MLoopUV *>(data2);
add_v2_v2(l1->uv, l2->uv);
}
@@ -983,7 +971,7 @@ static void layerInterp_mloopuv(const void **sources,
for (int i = 0; i < count; i++) {
const float interp_weight = weights[i];
- const MLoopUV *src = sources[i];
+ const MLoopUV *src = static_cast<const MLoopUV *>(sources[i]);
madd_v2_v2fl(uv, src->uv, interp_weight);
if (interp_weight > 0.0f) {
flag |= src->flag;
@@ -997,7 +985,7 @@ static void layerInterp_mloopuv(const void **sources,
static bool layerValidate_mloopuv(void *data, const uint totitems, const bool do_fixes)
{
- MLoopUV *uv = data;
+ MLoopUV *uv = static_cast<MLoopUV *>(data);
bool has_errors = false;
for (int i = 0; i < totitems; i++, uv++) {
@@ -1018,45 +1006,48 @@ static void layerCopyValue_mloop_origspace(const void *source,
const int UNUSED(mixmode),
const float UNUSED(mixfactor))
{
- const OrigSpaceLoop *luv1 = source;
- OrigSpaceLoop *luv2 = dest;
+ const OrigSpaceLoop *luv1 = static_cast<const OrigSpaceLoop *>(source);
+ OrigSpaceLoop *luv2 = static_cast<OrigSpaceLoop *>(dest);
copy_v2_v2(luv2->uv, luv1->uv);
}
static bool layerEqual_mloop_origspace(const void *data1, const void *data2)
{
- const OrigSpaceLoop *luv1 = data1, *luv2 = data2;
+ const OrigSpaceLoop *luv1 = static_cast<const OrigSpaceLoop *>(data1);
+ const OrigSpaceLoop *luv2 = static_cast<const OrigSpaceLoop *>(data2);
return len_squared_v2v2(luv1->uv, luv2->uv) < 0.00001f;
}
static void layerMultiply_mloop_origspace(void *data, float fac)
{
- OrigSpaceLoop *luv = data;
+ OrigSpaceLoop *luv = static_cast<OrigSpaceLoop *>(data);
mul_v2_fl(luv->uv, fac);
}
static void layerInitMinMax_mloop_origspace(void *vmin, void *vmax)
{
- OrigSpaceLoop *min = vmin, *max = vmax;
+ OrigSpaceLoop *min = static_cast<OrigSpaceLoop *>(vmin);
+ OrigSpaceLoop *max = static_cast<OrigSpaceLoop *>(vmax);
INIT_MINMAX2(min->uv, max->uv);
}
static void layerDoMinMax_mloop_origspace(const void *data, void *vmin, void *vmax)
{
- const OrigSpaceLoop *luv = data;
- OrigSpaceLoop *min = vmin, *max = vmax;
+ const OrigSpaceLoop *luv = static_cast<const OrigSpaceLoop *>(data);
+ OrigSpaceLoop *min = static_cast<OrigSpaceLoop *>(vmin);
+ OrigSpaceLoop *max = static_cast<OrigSpaceLoop *>(vmax);
minmax_v2v2_v2(min->uv, max->uv, luv->uv);
}
static void layerAdd_mloop_origspace(void *data1, const void *data2)
{
- OrigSpaceLoop *l1 = data1;
- const OrigSpaceLoop *l2 = data2;
+ OrigSpaceLoop *l1 = static_cast<OrigSpaceLoop *>(data1);
+ const OrigSpaceLoop *l2 = static_cast<const OrigSpaceLoop *>(data2);
add_v2_v2(l1->uv, l2->uv);
}
@@ -1072,7 +1063,7 @@ static void layerInterp_mloop_origspace(const void **sources,
for (int i = 0; i < count; i++) {
const float interp_weight = weights[i];
- const OrigSpaceLoop *src = sources[i];
+ const OrigSpaceLoop *src = static_cast<const OrigSpaceLoop *>(sources[i]);
madd_v2_v2fl(uv, src->uv, interp_weight);
}
@@ -1084,7 +1075,7 @@ static void layerInterp_mloop_origspace(const void **sources,
static void layerInterp_mcol(
const void **sources, const float *weights, const float *sub_weights, int count, void *dest)
{
- MCol *mc = dest;
+ MCol *mc = static_cast<MCol *>(dest);
struct {
float a;
float r;
@@ -1098,7 +1089,7 @@ static void layerInterp_mcol(
for (int j = 0; j < 4; j++) {
if (sub_weights) {
- const MCol *src = sources[i];
+ const MCol *src = static_cast<const MCol *>(sources[i]);
for (int k = 0; k < 4; k++, sub_weight++, src++) {
const float w = (*sub_weight) * interp_weight;
col[j].a += src->a * w;
@@ -1108,7 +1099,7 @@ static void layerInterp_mcol(
}
}
else {
- const MCol *src = sources[i];
+ const MCol *src = static_cast<const MCol *>(sources[i]);
col[j].a += src[j].a * interp_weight;
col[j].r += src[j].r * interp_weight;
col[j].g += src[j].g * interp_weight;
@@ -1131,7 +1122,7 @@ static void layerInterp_mcol(
static void layerSwap_mcol(void *data, const int *corner_indices)
{
- MCol *mcol = data;
+ MCol *mcol = static_cast<MCol *>(data);
MCol col[4];
for (int j = 0; j < 4; j++) {
@@ -1205,7 +1196,7 @@ static void layerInterp_shapekey(const void **sources,
static void layerDefault_mvert_skin(void *data, int count)
{
- MVertSkin *vs = data;
+ MVertSkin *vs = static_cast<MVertSkin *>(data);
for (int i = 0; i < count; i++) {
copy_v3_fl(vs[i].radius, 0.25f);
@@ -1229,20 +1220,20 @@ static void layerInterp_mvert_skin(const void **sources,
for (int i = 0; i < count; i++) {
const float interp_weight = weights[i];
- const MVertSkin *vs_src = sources[i];
+ const MVertSkin *vs_src = static_cast<const MVertSkin *>(sources[i]);
madd_v3_v3fl(radius, vs_src->radius, interp_weight);
}
/* Delay writing to the destination in case dest is in sources. */
- MVertSkin *vs_dst = dest;
+ MVertSkin *vs_dst = static_cast<MVertSkin *>(dest);
copy_v3_v3(vs_dst->radius, radius);
vs_dst->flag &= ~MVERT_SKIN_ROOT;
}
static void layerSwap_flnor(void *data, const int *corner_indices)
{
- short(*flnors)[4][3] = data;
+ short(*flnors)[4][3] = static_cast<short(*)[4][3]>(data);
short nors[4][3];
int i = 4;
@@ -1266,8 +1257,8 @@ static void layerCopyValue_propcol(const void *source,
const int mixmode,
const float mixfactor)
{
- const MPropCol *m1 = source;
- MPropCol *m2 = dest;
+ const MPropCol *m1 = static_cast<const MPropCol *>(source);
+ MPropCol *m2 = static_cast<MPropCol *>(dest);
float tmp_col[4];
if (ELEM(mixmode,
@@ -1311,7 +1302,8 @@ static void layerCopyValue_propcol(const void *source,
static bool layerEqual_propcol(const void *data1, const void *data2)
{
- const MPropCol *m1 = data1, *m2 = data2;
+ const MPropCol *m1 = static_cast<const MPropCol *>(data1);
+ const MPropCol *m2 = static_cast<const MPropCol *>(data2);
float tot = 0;
for (int i = 0; i < 4; i++) {
@@ -1324,27 +1316,29 @@ static bool layerEqual_propcol(const void *data1, const void *data2)
static void layerMultiply_propcol(void *data, float fac)
{
- MPropCol *m = data;
+ MPropCol *m = static_cast<MPropCol *>(data);
mul_v4_fl(m->color, fac);
}
static void layerAdd_propcol(void *data1, const void *data2)
{
- MPropCol *m = data1;
- const MPropCol *m2 = data2;
+ MPropCol *m = static_cast<MPropCol *>(data1);
+ const MPropCol *m2 = static_cast<const MPropCol *>(data2);
add_v4_v4(m->color, m2->color);
}
static void layerDoMinMax_propcol(const void *data, void *vmin, void *vmax)
{
- const MPropCol *m = data;
- MPropCol *min = vmin, *max = vmax;
+ const MPropCol *m = static_cast<const MPropCol *>(data);
+ MPropCol *min = static_cast<MPropCol *>(vmin);
+ MPropCol *max = static_cast<MPropCol *>(vmax);
minmax_v4v4_v4(min->color, max->color, m->color);
}
static void layerInitMinMax_propcol(void *vmin, void *vmax)
{
- MPropCol *min = vmin, *max = vmax;
+ MPropCol *min = static_cast<MPropCol *>(vmin);
+ MPropCol *max = static_cast<MPropCol *>(vmax);
copy_v4_fl(min->color, FLT_MAX);
copy_v4_fl(max->color, FLT_MIN);
@@ -1366,17 +1360,17 @@ static void layerInterp_propcol(const void **sources,
int count,
void *dest)
{
- MPropCol *mc = dest;
+ MPropCol *mc = static_cast<MPropCol *>(dest);
float col[4] = {0.0f, 0.0f, 0.0f, 0.0f};
for (int i = 0; i < count; i++) {
const float interp_weight = weights[i];
- const MPropCol *src = sources[i];
+ const MPropCol *src = static_cast<const MPropCol *>(sources[i]);
madd_v4_v4fl(col, src->color, interp_weight);
}
copy_v4_v4(mc->color, col);
}
-static int layerMaxNum_propcol(void)
+static int layerMaxNum_propcol()
{
return MAX_MCOL;
}
@@ -1390,7 +1384,7 @@ static void layerInterp_propfloat3(const void **sources,
vec3f result = {0.0f, 0.0f, 0.0f};
for (int i = 0; i < count; i++) {
const float interp_weight = weights[i];
- const vec3f *src = sources[i];
+ const vec3f *src = static_cast<const vec3f *>(sources[i]);
madd_v3_v3fl(&result.x, &src->x, interp_weight);
}
copy_v3_v3((float *)dest, &result.x);
@@ -1398,7 +1392,7 @@ static void layerInterp_propfloat3(const void **sources,
static void layerMultiply_propfloat3(void *data, float fac)
{
- vec3f *vec = data;
+ vec3f *vec = static_cast<vec3f *>(data);
vec->x *= fac;
vec->y *= fac;
vec->z *= fac;
@@ -1406,8 +1400,8 @@ static void layerMultiply_propfloat3(void *data, float fac)
static void layerAdd_propfloat3(void *data1, const void *data2)
{
- vec3f *vec1 = data1;
- const vec3f *vec2 = data2;
+ vec3f *vec1 = static_cast<vec3f *>(data1);
+ const vec3f *vec2 = static_cast<const vec3f *>(data2);
vec1->x += vec2->x;
vec1->y += vec2->y;
vec1->z += vec2->z;
@@ -1415,7 +1409,7 @@ static void layerAdd_propfloat3(void *data1, const void *data2)
static bool layerValidate_propfloat3(void *data, const uint totitems, const bool do_fixes)
{
- float *values = data;
+ float *values = static_cast<float *>(data);
bool has_errors = false;
for (int i = 0; i < totitems * 3; i++) {
if (!isfinite(values[i])) {
@@ -1437,7 +1431,7 @@ static void layerInterp_propfloat2(const void **sources,
vec2f result = {0.0f, 0.0f};
for (int i = 0; i < count; i++) {
const float interp_weight = weights[i];
- const vec2f *src = sources[i];
+ const vec2f *src = static_cast<const vec2f *>(sources[i]);
madd_v2_v2fl(&result.x, &src->x, interp_weight);
}
copy_v2_v2((float *)dest, &result.x);
@@ -1445,22 +1439,22 @@ static void layerInterp_propfloat2(const void **sources,
static void layerMultiply_propfloat2(void *data, float fac)
{
- vec2f *vec = data;
+ vec2f *vec = static_cast<vec2f *>(data);
vec->x *= fac;
vec->y *= fac;
}
static void layerAdd_propfloat2(void *data1, const void *data2)
{
- vec2f *vec1 = data1;
- const vec2f *vec2 = data2;
+ vec2f *vec1 = static_cast<vec2f *>(data1);
+ const vec2f *vec2 = static_cast<const vec2f *>(data2);
vec1->x += vec2->x;
vec1->y += vec2->y;
}
static bool layerValidate_propfloat2(void *data, const uint totitems, const bool do_fixes)
{
- float *values = data;
+ float *values = static_cast<float *>(data);
bool has_errors = false;
for (int i = 0; i < totitems * 2; i++) {
if (!isfinite(values[i])) {
@@ -1475,136 +1469,130 @@ static bool layerValidate_propfloat2(void *data, const uint totitems, const bool
static const LayerTypeInfo LAYERTYPEINFO[CD_NUMTYPES] = {
/* 0: CD_MVERT */
- {sizeof(MVert), "MVert", 1, NULL, NULL, NULL, NULL, NULL, NULL},
+ {sizeof(MVert), "MVert", 1, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr},
/* 1: CD_MSTICKY */ /* DEPRECATED */
- {sizeof(float[2]), "", 1, NULL, NULL, NULL, NULL, NULL, NULL},
+ {sizeof(float[2]), "", 1, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr},
/* 2: CD_MDEFORMVERT */
{sizeof(MDeformVert),
"MDeformVert",
1,
- NULL,
+ nullptr,
layerCopy_mdeformvert,
layerFree_mdeformvert,
layerInterp_mdeformvert,
- NULL,
- NULL},
+ nullptr,
+ nullptr},
/* 3: CD_MEDGE */
- {sizeof(MEdge), "MEdge", 1, NULL, NULL, NULL, NULL, NULL, NULL},
+ {sizeof(MEdge), "MEdge", 1, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr},
/* 4: CD_MFACE */
- {sizeof(MFace), "MFace", 1, NULL, NULL, NULL, NULL, NULL, NULL},
+ {sizeof(MFace), "MFace", 1, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr},
/* 5: CD_MTFACE */
- {sizeof(MTFace),
- "MTFace",
- 1,
- N_("UVMap"),
- layerCopy_tface,
- NULL,
- layerInterp_tface,
- layerSwap_tface,
- layerDefault_tface,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- layerMaxNum_tface},
+ {sizeof(MTFace), "MTFace", 1,
+ N_("UVMap"), layerCopy_tface, nullptr,
+ layerInterp_tface, layerSwap_tface, layerDefault_tface,
+ nullptr, nullptr, nullptr,
+ nullptr, nullptr, nullptr,
+ nullptr, nullptr, nullptr,
+ nullptr, layerMaxNum_tface},
/* 6: CD_MCOL */
/* 4 MCol structs per face */
{sizeof(MCol[4]),
"MCol",
4,
N_("Col"),
- NULL,
- NULL,
+ nullptr,
+ nullptr,
layerInterp_mcol,
layerSwap_mcol,
layerDefault_mcol,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
layerMaxNum_mloopcol},
/* 7: CD_ORIGINDEX */
- {sizeof(int), "", 0, NULL, NULL, NULL, NULL, NULL, layerDefault_origindex},
+ {sizeof(int), "", 0, nullptr, nullptr, nullptr, nullptr, nullptr, layerDefault_origindex},
/* 8: CD_NORMAL */
/* 3 floats per normal vector */
{sizeof(float[3]),
"vec3f",
1,
- NULL,
- NULL,
- NULL,
+ nullptr,
+ nullptr,
+ nullptr,
layerInterp_normal,
- NULL,
- NULL,
- layerValidate_normal,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
layerCopyValue_normal},
/* 9: CD_FACEMAP */
- {sizeof(int), "", 0, NULL, NULL, NULL, NULL, NULL, layerDefault_fmap, NULL},
+ {sizeof(int), "", 0, nullptr, nullptr, nullptr, nullptr, nullptr, layerDefault_fmap, nullptr},
/* 10: CD_PROP_FLOAT */
{sizeof(MFloatProperty),
"MFloatProperty",
1,
N_("Float"),
layerCopy_propFloat,
- NULL,
+ nullptr,
layerInterp_propFloat,
- NULL,
- NULL,
+ nullptr,
+ nullptr,
layerValidate_propFloat},
/* 11: CD_PROP_INT32 */
- {sizeof(MIntProperty), "MIntProperty", 1, N_("Int"), layerCopy_propInt, NULL, NULL, NULL},
+ {sizeof(MIntProperty),
+ "MIntProperty",
+ 1,
+ N_("Int"),
+ layerCopy_propInt,
+ nullptr,
+ nullptr,
+ nullptr},
/* 12: CD_PROP_STRING */
{sizeof(MStringProperty),
"MStringProperty",
1,
N_("String"),
layerCopy_propString,
- NULL,
- NULL,
- NULL},
+ nullptr,
+ nullptr,
+ nullptr},
/* 13: CD_ORIGSPACE */
{sizeof(OrigSpaceFace),
"OrigSpaceFace",
1,
N_("UVMap"),
layerCopy_origspace_face,
- NULL,
+ nullptr,
layerInterp_origspace_face,
layerSwap_origspace_face,
layerDefault_origspace_face},
/* 14: CD_ORCO */
- {sizeof(float[3]), "", 0, NULL, NULL, NULL, NULL, NULL, NULL},
+ {sizeof(float[3]), "", 0, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr},
/* 15: CD_MTEXPOLY */ /* DEPRECATED */
/* NOTE: when we expose the UV Map / TexFace split to the user,
* change this back to face Texture. */
- {sizeof(int), "", 0, NULL, NULL, NULL, NULL, NULL, NULL},
+ {sizeof(int), "", 0, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr},
/* 16: CD_MLOOPUV */
{sizeof(MLoopUV),
"MLoopUV",
1,
N_("UVMap"),
- NULL,
- NULL,
+ nullptr,
+ nullptr,
layerInterp_mloopuv,
- NULL,
- NULL,
+ nullptr,
+ nullptr,
layerValidate_mloopuv,
layerEqual_mloopuv,
layerMultiply_mloopuv,
@@ -1612,50 +1600,50 @@ static const LayerTypeInfo LAYERTYPEINFO[CD_NUMTYPES] = {
layerAdd_mloopuv,
layerDoMinMax_mloopuv,
layerCopyValue_mloopuv,
- NULL,
- NULL,
- NULL,
+ nullptr,
+ nullptr,
+ nullptr,
layerMaxNum_tface},
/* 17: CD_MLOOPCOL */
{sizeof(MLoopCol),
"MLoopCol",
1,
N_("Col"),
- NULL,
- NULL,
+ nullptr,
+ nullptr,
layerInterp_mloopcol,
- NULL,
+ nullptr,
layerDefault_mloopcol,
- NULL,
+ nullptr,
layerEqual_mloopcol,
layerMultiply_mloopcol,
layerInitMinMax_mloopcol,
layerAdd_mloopcol,
layerDoMinMax_mloopcol,
layerCopyValue_mloopcol,
- NULL,
- NULL,
- NULL,
+ nullptr,
+ nullptr,
+ nullptr,
layerMaxNum_mloopcol},
/* 18: CD_TANGENT */
- {sizeof(float[4][4]), "", 0, N_("Tangent"), NULL, NULL, NULL, NULL, NULL},
+ {sizeof(float[4][4]), "", 0, N_("Tangent"), nullptr, nullptr, nullptr, nullptr, nullptr},
/* 19: CD_MDISPS */
{sizeof(MDisps),
"MDisps",
1,
- NULL,
+ nullptr,
layerCopy_mdisps,
layerFree_mdisps,
- NULL,
+ nullptr,
layerSwap_mdisps,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
layerRead_mdisps,
layerWrite_mdisps,
layerFilesize_mdisps},
@@ -1664,52 +1652,60 @@ static const LayerTypeInfo LAYERTYPEINFO[CD_NUMTYPES] = {
"MCol",
4,
N_("PreviewCol"),
- NULL,
- NULL,
+ nullptr,
+ nullptr,
layerInterp_mcol,
layerSwap_mcol,
layerDefault_mcol},
/* 21: CD_ID_MCOL */ /* DEPRECATED */
- {sizeof(MCol[4]), "", 0, NULL, NULL, NULL, NULL, NULL, NULL},
+ {sizeof(MCol[4]), "", 0, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr},
/* 22: CD_TEXTURE_MCOL */
{sizeof(MCol[4]),
"MCol",
4,
N_("TexturedCol"),
- NULL,
- NULL,
+ nullptr,
+ nullptr,
layerInterp_mcol,
layerSwap_mcol,
layerDefault_mcol},
/* 23: CD_CLOTH_ORCO */
- {sizeof(float[3]), "", 0, NULL, NULL, NULL, NULL, NULL, NULL},
+ {sizeof(float[3]), "", 0, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr},
/* 24: CD_RECAST */
- {sizeof(MRecast), "MRecast", 1, N_("Recast"), NULL, NULL, NULL, NULL},
-
- /* BMESH ONLY */
+ {sizeof(MRecast), "MRecast", 1, N_("Recast"), nullptr, nullptr, nullptr, nullptr},
/* 25: CD_MPOLY */
- {sizeof(MPoly), "MPoly", 1, N_("NGon Face"), NULL, NULL, NULL, NULL, NULL},
+ {sizeof(MPoly), "MPoly", 1, N_("NGon Face"), nullptr, nullptr, nullptr, nullptr, nullptr},
/* 26: CD_MLOOP */
- {sizeof(MLoop), "MLoop", 1, N_("NGon Face-Vertex"), NULL, NULL, NULL, NULL, NULL},
+ {sizeof(MLoop),
+ "MLoop",
+ 1,
+ N_("NGon Face-Vertex"),
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr},
/* 27: CD_SHAPE_KEYINDEX */
- {sizeof(int), "", 0, NULL, NULL, NULL, NULL, NULL, NULL},
+ {sizeof(int), "", 0, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr},
/* 28: CD_SHAPEKEY */
- {sizeof(float[3]), "", 0, N_("ShapeKey"), NULL, NULL, layerInterp_shapekey},
+ {sizeof(float[3]), "", 0, N_("ShapeKey"), nullptr, nullptr, layerInterp_shapekey},
/* 29: CD_BWEIGHT */
- {sizeof(float), "", 0, N_("BevelWeight"), NULL, NULL, layerInterp_bweight},
+ {sizeof(float), "", 0, N_("BevelWeight"), nullptr, nullptr, layerInterp_bweight},
/* 30: CD_CREASE */
- {sizeof(float), "", 0, N_("SubSurfCrease"), NULL, NULL, layerInterp_bweight},
+ /* NOTE: we do not interpolate crease data as it should be either inherited for subdivided
+ * edges, or for vertex creases, only present on the original vertex. */
+ {sizeof(float), "", 0, N_("SubSurfCrease"), nullptr, nullptr, nullptr},
/* 31: CD_ORIGSPACE_MLOOP */
{sizeof(OrigSpaceLoop),
"OrigSpaceLoop",
1,
N_("OS Loop"),
- NULL,
- NULL,
+ nullptr,
+ nullptr,
layerInterp_mloop_origspace,
- NULL,
- NULL,
- NULL,
+ nullptr,
+ nullptr,
+ nullptr,
layerEqual_mloop_origspace,
layerMultiply_mloop_origspace,
layerInitMinMax_mloop_origspace,
@@ -1721,12 +1717,12 @@ static const LayerTypeInfo LAYERTYPEINFO[CD_NUMTYPES] = {
"MLoopCol",
1,
N_("PreviewLoopCol"),
- NULL,
- NULL,
+ nullptr,
+ nullptr,
layerInterp_mloopcol,
- NULL,
+ nullptr,
layerDefault_mloopcol,
- NULL,
+ nullptr,
layerEqual_mloopcol,
layerMultiply_mloopcol,
layerInitMinMax_mloopcol,
@@ -1737,125 +1733,138 @@ static const LayerTypeInfo LAYERTYPEINFO[CD_NUMTYPES] = {
{sizeof(void *),
"",
1,
- NULL,
+ nullptr,
layerCopy_bmesh_elem_py_ptr,
layerFree_bmesh_elem_py_ptr,
- NULL,
- NULL,
- NULL},
-
- /* END BMESH ONLY */
-
+ nullptr,
+ nullptr,
+ nullptr},
/* 34: CD_PAINT_MASK */
- {sizeof(float), "", 0, NULL, NULL, NULL, layerInterp_paint_mask, NULL, NULL},
+ {sizeof(float), "", 0, nullptr, nullptr, nullptr, layerInterp_paint_mask, nullptr, nullptr},
/* 35: CD_GRID_PAINT_MASK */
{sizeof(GridPaintMask),
"GridPaintMask",
1,
- NULL,
+ nullptr,
layerCopy_grid_paint_mask,
layerFree_grid_paint_mask,
- NULL,
- NULL,
- NULL},
+ nullptr,
+ nullptr,
+ nullptr},
/* 36: CD_MVERT_SKIN */
{sizeof(MVertSkin),
"MVertSkin",
1,
- NULL,
+ nullptr,
layerCopy_mvert_skin,
- NULL,
+ nullptr,
layerInterp_mvert_skin,
- NULL,
+ nullptr,
layerDefault_mvert_skin},
/* 37: CD_FREESTYLE_EDGE */
- {sizeof(FreestyleEdge), "FreestyleEdge", 1, NULL, NULL, NULL, NULL, NULL, NULL},
+ {sizeof(FreestyleEdge),
+ "FreestyleEdge",
+ 1,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr},
/* 38: CD_FREESTYLE_FACE */
- {sizeof(FreestyleFace), "FreestyleFace", 1, NULL, NULL, NULL, NULL, NULL, NULL},
+ {sizeof(FreestyleFace),
+ "FreestyleFace",
+ 1,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr},
/* 39: CD_MLOOPTANGENT */
- {sizeof(float[4]), "", 0, NULL, NULL, NULL, NULL, NULL, NULL},
+ {sizeof(float[4]), "", 0, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr},
/* 40: CD_TESSLOOPNORMAL */
- {sizeof(short[4][3]), "", 0, NULL, NULL, NULL, NULL, layerSwap_flnor, NULL},
+ {sizeof(short[4][3]), "", 0, nullptr, nullptr, nullptr, nullptr, layerSwap_flnor, nullptr},
/* 41: CD_CUSTOMLOOPNORMAL */
- {sizeof(short[2]), "vec2s", 1, NULL, NULL, NULL, NULL, NULL, NULL},
+ {sizeof(short[2]), "vec2s", 1, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr},
/* 42: CD_SCULPT_FACE_SETS */
- {sizeof(int), "", 0, NULL, NULL, NULL, NULL, NULL, NULL},
+ {sizeof(int), "", 0, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr},
/* 43: CD_LOCATION */
- {sizeof(float[3]), "vec3f", 1, NULL, NULL, NULL, NULL, NULL, NULL},
+ {sizeof(float[3]), "vec3f", 1, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr},
/* 44: CD_RADIUS */
- {sizeof(float), "MFloatProperty", 1, NULL, NULL, NULL, NULL, NULL, NULL},
+ {sizeof(float), "MFloatProperty", 1, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr},
/* 45: CD_HAIRCURVE */
- {sizeof(HairCurve), "HairCurve", 1, NULL, NULL, NULL, NULL, NULL, NULL},
+ {sizeof(HairCurve), "HairCurve", 1, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr},
/* 46: CD_HAIRMAPPING */
- {sizeof(HairMapping), "HairMapping", 1, NULL, NULL, NULL, NULL, NULL, NULL},
+ {sizeof(HairMapping), "HairMapping", 1, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr},
/* 47: CD_PROP_COLOR */
{sizeof(MPropCol),
"MPropCol",
1,
N_("Color"),
- NULL,
- NULL,
+ nullptr,
+ nullptr,
layerInterp_propcol,
- NULL,
+ nullptr,
layerDefault_propcol,
- NULL,
+ nullptr,
layerEqual_propcol,
layerMultiply_propcol,
layerInitMinMax_propcol,
layerAdd_propcol,
layerDoMinMax_propcol,
layerCopyValue_propcol,
- NULL,
- NULL,
- NULL,
+ nullptr,
+ nullptr,
+ nullptr,
layerMaxNum_propcol},
/* 48: CD_PROP_FLOAT3 */
{sizeof(float[3]),
"vec3f",
1,
N_("Float3"),
- NULL,
- NULL,
+ nullptr,
+ nullptr,
layerInterp_propfloat3,
- NULL,
- NULL,
+ nullptr,
+ nullptr,
layerValidate_propfloat3,
- NULL,
+ nullptr,
layerMultiply_propfloat3,
- NULL,
+ nullptr,
layerAdd_propfloat3},
/* 49: CD_PROP_FLOAT2 */
{sizeof(float[2]),
"vec2f",
1,
N_("Float2"),
- NULL,
- NULL,
+ nullptr,
+ nullptr,
layerInterp_propfloat2,
- NULL,
- NULL,
+ nullptr,
+ nullptr,
layerValidate_propfloat2,
- NULL,
+ nullptr,
layerMultiply_propfloat2,
- NULL,
+ nullptr,
layerAdd_propfloat2},
/* 50: CD_PROP_BOOL */
{sizeof(bool),
"bool",
1,
N_("Boolean"),
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL},
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr},
/* 51: CD_HAIRLENGTH */
- {sizeof(float), "float", 1, NULL, NULL, NULL, NULL, NULL, NULL},
+ {sizeof(float), "float", 1, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr},
};
static const char *LAYERTYPENAMES[CD_NUMTYPES] = {
@@ -1916,95 +1925,106 @@ static const char *LAYERTYPENAMES[CD_NUMTYPES] = {
};
const CustomData_MeshMasks CD_MASK_BAREMESH = {
- .vmask = CD_MASK_MVERT | CD_MASK_BWEIGHT,
- .emask = CD_MASK_MEDGE | CD_MASK_BWEIGHT,
- .fmask = 0,
- .lmask = CD_MASK_MLOOP,
- .pmask = CD_MASK_MPOLY | CD_MASK_FACEMAP,
+ /* vmask */ CD_MASK_MVERT | CD_MASK_BWEIGHT,
+ /* emask */ CD_MASK_MEDGE | CD_MASK_BWEIGHT,
+ /* fmask */ 0,
+ /* pmask */ CD_MASK_MPOLY | CD_MASK_FACEMAP,
+ /* lmask */ CD_MASK_MLOOP,
};
const CustomData_MeshMasks CD_MASK_BAREMESH_ORIGINDEX = {
- .vmask = CD_MASK_MVERT | CD_MASK_BWEIGHT | CD_MASK_ORIGINDEX,
- .emask = CD_MASK_MEDGE | CD_MASK_BWEIGHT | CD_MASK_ORIGINDEX,
- .fmask = 0,
- .lmask = CD_MASK_MLOOP,
- .pmask = CD_MASK_MPOLY | CD_MASK_FACEMAP | CD_MASK_ORIGINDEX,
+ /* vmask */ CD_MASK_MVERT | CD_MASK_BWEIGHT | CD_MASK_ORIGINDEX,
+ /* emask */ CD_MASK_MEDGE | CD_MASK_BWEIGHT | CD_MASK_ORIGINDEX,
+ /* fmask */ 0,
+ /* pmask */ CD_MASK_MPOLY | CD_MASK_FACEMAP | CD_MASK_ORIGINDEX,
+ /* lmask */ CD_MASK_MLOOP,
};
const CustomData_MeshMasks CD_MASK_MESH = {
- .vmask = (CD_MASK_MVERT | CD_MASK_MDEFORMVERT | CD_MASK_MVERT_SKIN | CD_MASK_PAINT_MASK |
- CD_MASK_PROP_ALL | CD_MASK_PROP_COLOR),
- .emask = (CD_MASK_MEDGE | CD_MASK_FREESTYLE_EDGE | CD_MASK_PROP_ALL),
- .fmask = 0,
- .lmask = (CD_MASK_MLOOP | CD_MASK_MDISPS | CD_MASK_MLOOPUV | CD_MASK_MLOOPCOL |
- CD_MASK_CUSTOMLOOPNORMAL | CD_MASK_GRID_PAINT_MASK | CD_MASK_PROP_ALL),
- .pmask = (CD_MASK_MPOLY | CD_MASK_FACEMAP | CD_MASK_FREESTYLE_FACE | CD_MASK_PROP_ALL |
- CD_MASK_SCULPT_FACE_SETS),
+ /* vmask */ (CD_MASK_MVERT | CD_MASK_MDEFORMVERT | CD_MASK_MVERT_SKIN | CD_MASK_PAINT_MASK |
+ CD_MASK_PROP_ALL | CD_MASK_PROP_COLOR | CD_MASK_CREASE),
+ /* emask */ (CD_MASK_MEDGE | CD_MASK_FREESTYLE_EDGE | CD_MASK_PROP_ALL),
+ /* fmask */ 0,
+ /* pmask */
+ (CD_MASK_MPOLY | CD_MASK_FACEMAP | CD_MASK_FREESTYLE_FACE | CD_MASK_PROP_ALL |
+ CD_MASK_SCULPT_FACE_SETS),
+ /* lmask */
+ (CD_MASK_MLOOP | CD_MASK_MDISPS | CD_MASK_MLOOPUV | CD_MASK_MLOOPCOL |
+ CD_MASK_CUSTOMLOOPNORMAL | CD_MASK_GRID_PAINT_MASK | CD_MASK_PROP_ALL),
};
const CustomData_MeshMasks CD_MASK_EDITMESH = {
- .vmask = (CD_MASK_MDEFORMVERT | CD_MASK_PAINT_MASK | CD_MASK_MVERT_SKIN | CD_MASK_SHAPEKEY |
- CD_MASK_SHAPE_KEYINDEX | CD_MASK_PROP_ALL | CD_MASK_PROP_COLOR),
- .emask = (CD_MASK_PROP_ALL),
- .fmask = 0,
- .lmask = (CD_MASK_MDISPS | CD_MASK_MLOOPUV | CD_MASK_MLOOPCOL | CD_MASK_CUSTOMLOOPNORMAL |
- CD_MASK_GRID_PAINT_MASK | CD_MASK_PROP_ALL),
- .pmask = (CD_MASK_FACEMAP | CD_MASK_PROP_ALL | CD_MASK_SCULPT_FACE_SETS),
+ /* vmask */ (CD_MASK_MDEFORMVERT | CD_MASK_PAINT_MASK | CD_MASK_MVERT_SKIN | CD_MASK_SHAPEKEY |
+ CD_MASK_SHAPE_KEYINDEX | CD_MASK_PROP_ALL | CD_MASK_PROP_COLOR | CD_MASK_CREASE),
+ /* emask */ (CD_MASK_PROP_ALL),
+ /* fmask */ 0,
+ /* pmask */ (CD_MASK_FACEMAP | CD_MASK_PROP_ALL | CD_MASK_SCULPT_FACE_SETS),
+ /* lmask */
+ (CD_MASK_MDISPS | CD_MASK_MLOOPUV | CD_MASK_MLOOPCOL | CD_MASK_CUSTOMLOOPNORMAL |
+ CD_MASK_GRID_PAINT_MASK | CD_MASK_PROP_ALL),
};
const CustomData_MeshMasks CD_MASK_DERIVEDMESH = {
- .vmask = (CD_MASK_ORIGINDEX | CD_MASK_MDEFORMVERT | CD_MASK_SHAPEKEY | CD_MASK_MVERT_SKIN |
- CD_MASK_PAINT_MASK | CD_MASK_ORCO | CD_MASK_CLOTH_ORCO | CD_MASK_PROP_ALL |
- CD_MASK_PROP_COLOR),
- .emask = (CD_MASK_ORIGINDEX | CD_MASK_FREESTYLE_EDGE | CD_MASK_PROP_ALL),
- .fmask = (CD_MASK_ORIGINDEX | CD_MASK_ORIGSPACE | CD_MASK_PREVIEW_MCOL | CD_MASK_TANGENT),
- .lmask = (CD_MASK_MLOOPUV | CD_MASK_MLOOPCOL | CD_MASK_CUSTOMLOOPNORMAL |
- CD_MASK_PREVIEW_MLOOPCOL | CD_MASK_ORIGSPACE_MLOOP |
- CD_MASK_PROP_ALL), /* XXX MISSING CD_MASK_MLOOPTANGENT ? */
- .pmask = (CD_MASK_ORIGINDEX | CD_MASK_FREESTYLE_FACE | CD_MASK_FACEMAP | CD_MASK_PROP_ALL |
- CD_MASK_SCULPT_FACE_SETS),
+ /* vmask */ (CD_MASK_ORIGINDEX | CD_MASK_MDEFORMVERT | CD_MASK_SHAPEKEY | CD_MASK_MVERT_SKIN |
+ CD_MASK_PAINT_MASK | CD_MASK_ORCO | CD_MASK_CLOTH_ORCO | CD_MASK_PROP_ALL |
+ CD_MASK_PROP_COLOR | CD_MASK_CREASE),
+ /* emask */ (CD_MASK_ORIGINDEX | CD_MASK_FREESTYLE_EDGE | CD_MASK_PROP_ALL),
+ /* fmask */ (CD_MASK_ORIGINDEX | CD_MASK_ORIGSPACE | CD_MASK_PREVIEW_MCOL | CD_MASK_TANGENT),
+ /* pmask */
+ (CD_MASK_ORIGINDEX | CD_MASK_FREESTYLE_FACE | CD_MASK_FACEMAP | CD_MASK_PROP_ALL |
+ CD_MASK_SCULPT_FACE_SETS),
+ /* lmask */
+ (CD_MASK_MLOOPUV | CD_MASK_MLOOPCOL | CD_MASK_CUSTOMLOOPNORMAL | CD_MASK_PREVIEW_MLOOPCOL |
+ CD_MASK_ORIGSPACE_MLOOP | CD_MASK_PROP_ALL), /* XXX MISSING CD_MASK_MLOOPTANGENT ? */
};
const CustomData_MeshMasks CD_MASK_BMESH = {
- .vmask = (CD_MASK_MDEFORMVERT | CD_MASK_BWEIGHT | CD_MASK_MVERT_SKIN | CD_MASK_SHAPEKEY |
- CD_MASK_SHAPE_KEYINDEX | CD_MASK_PAINT_MASK | CD_MASK_PROP_ALL | CD_MASK_PROP_COLOR),
- .emask = (CD_MASK_BWEIGHT | CD_MASK_CREASE | CD_MASK_FREESTYLE_EDGE | CD_MASK_PROP_ALL),
- .fmask = 0,
- .lmask = (CD_MASK_MDISPS | CD_MASK_MLOOPUV | CD_MASK_MLOOPCOL | CD_MASK_CUSTOMLOOPNORMAL |
- CD_MASK_GRID_PAINT_MASK | CD_MASK_PROP_ALL),
- .pmask = (CD_MASK_FREESTYLE_FACE | CD_MASK_FACEMAP | CD_MASK_PROP_ALL |
- CD_MASK_SCULPT_FACE_SETS),
+ /* vmask */ (CD_MASK_MDEFORMVERT | CD_MASK_BWEIGHT | CD_MASK_MVERT_SKIN | CD_MASK_SHAPEKEY |
+ CD_MASK_SHAPE_KEYINDEX | CD_MASK_PAINT_MASK | CD_MASK_PROP_ALL |
+ CD_MASK_PROP_COLOR | CD_MASK_CREASE),
+ /* emask */ (CD_MASK_BWEIGHT | CD_MASK_CREASE | CD_MASK_FREESTYLE_EDGE | CD_MASK_PROP_ALL),
+ /* fmask */ 0,
+ /* pmask */
+ (CD_MASK_FREESTYLE_FACE | CD_MASK_FACEMAP | CD_MASK_PROP_ALL | CD_MASK_SCULPT_FACE_SETS),
+ /* lmask */
+ (CD_MASK_MDISPS | CD_MASK_MLOOPUV | CD_MASK_MLOOPCOL | CD_MASK_CUSTOMLOOPNORMAL |
+ CD_MASK_GRID_PAINT_MASK | CD_MASK_PROP_ALL),
};
/**
* cover values copied by #mesh_loops_to_tessdata
*/
const CustomData_MeshMasks CD_MASK_FACECORNERS = {
- .vmask = 0,
- .emask = 0,
- .fmask = (CD_MASK_MTFACE | CD_MASK_MCOL | CD_MASK_PREVIEW_MCOL | CD_MASK_ORIGSPACE |
- CD_MASK_TESSLOOPNORMAL | CD_MASK_TANGENT),
- .lmask = (CD_MASK_MLOOPUV | CD_MASK_MLOOPCOL | CD_MASK_PREVIEW_MLOOPCOL |
- CD_MASK_ORIGSPACE_MLOOP | CD_MASK_NORMAL | CD_MASK_MLOOPTANGENT),
- .pmask = 0,
+ /* vmask */ 0,
+ /* emask */ 0,
+ /* fmask */
+ (CD_MASK_MTFACE | CD_MASK_MCOL | CD_MASK_PREVIEW_MCOL | CD_MASK_ORIGSPACE |
+ CD_MASK_TESSLOOPNORMAL | CD_MASK_TANGENT),
+ /* pmask */ 0,
+ /* lmask */
+ (CD_MASK_MLOOPUV | CD_MASK_MLOOPCOL | CD_MASK_PREVIEW_MLOOPCOL | CD_MASK_ORIGSPACE_MLOOP |
+ CD_MASK_NORMAL | CD_MASK_MLOOPTANGENT),
};
const CustomData_MeshMasks CD_MASK_EVERYTHING = {
- .vmask = (CD_MASK_MVERT | CD_MASK_BM_ELEM_PYPTR | CD_MASK_ORIGINDEX | CD_MASK_NORMAL |
- CD_MASK_MDEFORMVERT | CD_MASK_BWEIGHT | CD_MASK_MVERT_SKIN | CD_MASK_ORCO |
- CD_MASK_CLOTH_ORCO | CD_MASK_SHAPEKEY | CD_MASK_SHAPE_KEYINDEX | CD_MASK_PAINT_MASK |
- CD_MASK_PROP_ALL | CD_MASK_PROP_COLOR),
- .emask = (CD_MASK_MEDGE | CD_MASK_BM_ELEM_PYPTR | CD_MASK_ORIGINDEX | CD_MASK_BWEIGHT |
- CD_MASK_CREASE | CD_MASK_FREESTYLE_EDGE | CD_MASK_PROP_ALL),
- .fmask = (CD_MASK_MFACE | CD_MASK_ORIGINDEX | CD_MASK_NORMAL | CD_MASK_MTFACE | CD_MASK_MCOL |
- CD_MASK_ORIGSPACE | CD_MASK_TANGENT | CD_MASK_TESSLOOPNORMAL | CD_MASK_PREVIEW_MCOL |
- CD_MASK_PROP_ALL),
- .lmask = (CD_MASK_MLOOP | CD_MASK_BM_ELEM_PYPTR | CD_MASK_MDISPS | CD_MASK_NORMAL |
- CD_MASK_MLOOPUV | CD_MASK_MLOOPCOL | CD_MASK_CUSTOMLOOPNORMAL |
- CD_MASK_MLOOPTANGENT | CD_MASK_PREVIEW_MLOOPCOL | CD_MASK_ORIGSPACE_MLOOP |
- CD_MASK_GRID_PAINT_MASK | CD_MASK_PROP_ALL),
- .pmask = (CD_MASK_MPOLY | CD_MASK_BM_ELEM_PYPTR | CD_MASK_ORIGINDEX | CD_MASK_NORMAL |
- CD_MASK_FACEMAP | CD_MASK_FREESTYLE_FACE | CD_MASK_PROP_ALL |
- CD_MASK_SCULPT_FACE_SETS),
+ /* vmask */ (CD_MASK_MVERT | CD_MASK_BM_ELEM_PYPTR | CD_MASK_ORIGINDEX | CD_MASK_NORMAL |
+ CD_MASK_MDEFORMVERT | CD_MASK_BWEIGHT | CD_MASK_MVERT_SKIN | CD_MASK_ORCO |
+ CD_MASK_CLOTH_ORCO | CD_MASK_SHAPEKEY | CD_MASK_SHAPE_KEYINDEX |
+ CD_MASK_PAINT_MASK | CD_MASK_PROP_ALL | CD_MASK_PROP_COLOR | CD_MASK_CREASE),
+ /* emask */
+ (CD_MASK_MEDGE | CD_MASK_BM_ELEM_PYPTR | CD_MASK_ORIGINDEX | CD_MASK_BWEIGHT | CD_MASK_CREASE |
+ CD_MASK_FREESTYLE_EDGE | CD_MASK_PROP_ALL),
+ /* fmask */
+ (CD_MASK_MFACE | CD_MASK_ORIGINDEX | CD_MASK_NORMAL | CD_MASK_MTFACE | CD_MASK_MCOL |
+ CD_MASK_ORIGSPACE | CD_MASK_TANGENT | CD_MASK_TESSLOOPNORMAL | CD_MASK_PREVIEW_MCOL |
+ CD_MASK_PROP_ALL),
+ /* pmask */
+ (CD_MASK_MPOLY | CD_MASK_BM_ELEM_PYPTR | CD_MASK_ORIGINDEX | CD_MASK_NORMAL | CD_MASK_FACEMAP |
+ CD_MASK_FREESTYLE_FACE | CD_MASK_PROP_ALL | CD_MASK_SCULPT_FACE_SETS),
+ /* lmask */
+ (CD_MASK_MLOOP | CD_MASK_BM_ELEM_PYPTR | CD_MASK_MDISPS | CD_MASK_NORMAL | CD_MASK_MLOOPUV |
+ CD_MASK_MLOOPCOL | CD_MASK_CUSTOMLOOPNORMAL | CD_MASK_MLOOPTANGENT |
+ CD_MASK_PREVIEW_MLOOPCOL | CD_MASK_ORIGSPACE_MLOOP | CD_MASK_GRID_PAINT_MASK |
+ CD_MASK_PROP_ALL),
};
static const LayerTypeInfo *layerType_getInfo(int type)
{
if (type < 0 || type >= CD_NUMTYPES) {
- return NULL;
+ return nullptr;
}
return &LAYERTYPEINFO[type];
@@ -2013,7 +2033,7 @@ static const LayerTypeInfo *layerType_getInfo(int type)
static const char *layerType_getName(int type)
{
if (type < 0 || type >= CD_NUMTYPES) {
- return NULL;
+ return nullptr;
}
return LAYERTYPENAMES[type];
@@ -2147,7 +2167,7 @@ bool CustomData_merge(const struct CustomData *source,
data = layer->data;
break;
default:
- data = NULL;
+ data = nullptr;
break;
}
@@ -2169,7 +2189,7 @@ bool CustomData_merge(const struct CustomData *source,
newlayer->flag |= flag & (CD_FLAG_EXTERNAL | CD_FLAG_IN_MEMORY);
changed = true;
- if (layer->anonymous_id != NULL) {
+ if (layer->anonymous_id != nullptr) {
BKE_anonymous_attribute_id_increment_weak(layer->anonymous_id);
newlayer->anonymous_id = layer->anonymous_id;
}
@@ -2189,7 +2209,9 @@ void CustomData_realloc(CustomData *data, int totelem)
continue;
}
typeInfo = layerType_getInfo(layer->type);
- layer->data = MEM_reallocN(layer->data, (size_t)totelem * typeInfo->size);
+ /* Use calloc to avoid the need to manually initialize new data in layers.
+ * Useful for types like #MDeformVert which contain a pointer. */
+ layer->data = MEM_recallocN(layer->data, (size_t)totelem * typeInfo->size);
}
}
@@ -2202,7 +2224,7 @@ void CustomData_copy(const struct CustomData *source,
CustomData_reset(dest);
if (source->external) {
- dest->external = MEM_dupallocN(source->external);
+ dest->external = static_cast<CustomDataExternal *>(MEM_dupallocN(source->external));
}
CustomData_merge(source, dest, mask, alloctype, totelem);
@@ -2212,9 +2234,9 @@ static void customData_free_layer__internal(CustomDataLayer *layer, int totelem)
{
const LayerTypeInfo *typeInfo;
- if (layer->anonymous_id != NULL) {
+ if (layer->anonymous_id != nullptr) {
BKE_anonymous_attribute_id_decrement_weak(layer->anonymous_id);
- layer->anonymous_id = NULL;
+ layer->anonymous_id = nullptr;
}
if (!(layer->flag & CD_FLAG_NOFREE) && layer->data) {
typeInfo = layerType_getInfo(layer->type);
@@ -2233,7 +2255,7 @@ static void CustomData_external_free(CustomData *data)
{
if (data->external) {
MEM_freeN(data->external);
- data->external = NULL;
+ data->external = nullptr;
}
}
@@ -2499,8 +2521,8 @@ void CustomData_clear_layer_flag(struct CustomData *data, int type, int flag)
static bool customData_resize(CustomData *data, int amount)
{
- CustomDataLayer *tmp = MEM_calloc_arrayN(
- (data->maxlayer + amount), sizeof(*tmp), "CustomData->layers");
+ CustomDataLayer *tmp = static_cast<CustomDataLayer *>(
+ MEM_calloc_arrayN((data->maxlayer + amount), sizeof(*tmp), __func__));
if (!tmp) {
return false;
}
@@ -2524,7 +2546,7 @@ static CustomDataLayer *customData_add_layer__internal(CustomData *data,
{
const LayerTypeInfo *typeInfo = layerType_getInfo(type);
int flag = 0, index = data->totlayer;
- void *newlayerdata = NULL;
+ void *newlayerdata = nullptr;
/* Passing a layer-data to copy from with an alloctype that won't copy is
* most likely a bug */
@@ -2546,7 +2568,7 @@ static CustomDataLayer *customData_add_layer__internal(CustomData *data,
}
if (!newlayerdata) {
- return NULL;
+ return nullptr;
}
}
@@ -2574,7 +2596,7 @@ static CustomDataLayer *customData_add_layer__internal(CustomData *data,
if (newlayerdata != layerdata) {
MEM_freeN(newlayerdata);
}
- return NULL;
+ return nullptr;
}
}
@@ -2640,7 +2662,7 @@ void *CustomData_add_layer(
return layer->data;
}
- return NULL;
+ return nullptr;
}
void *CustomData_add_layer_named(CustomData *data,
@@ -2658,7 +2680,7 @@ void *CustomData_add_layer_named(CustomData *data,
return layer->data;
}
- return NULL;
+ return nullptr;
}
void *CustomData_add_layer_anonymous(struct CustomData *data,
@@ -2673,8 +2695,8 @@ void *CustomData_add_layer_anonymous(struct CustomData *data,
data, type, alloctype, layerdata, totelem, name);
CustomData_update_typemap(data);
- if (layer == NULL) {
- return NULL;
+ if (layer == nullptr) {
+ return nullptr;
}
BKE_anonymous_attribute_id_increment_weak(anonymous_id);
@@ -2787,7 +2809,7 @@ static void *customData_duplicate_referenced_layer_index(CustomData *data,
const int totelem)
{
if (layer_index == -1) {
- return NULL;
+ return nullptr;
}
CustomDataLayer *layer = &data->layers[layer_index];
@@ -2856,7 +2878,7 @@ void *CustomData_duplicate_referenced_layer_anonymous(CustomData *data,
}
}
BLI_assert_unreachable();
- return NULL;
+ return nullptr;
}
void CustomData_duplicate_referenced_layers(CustomData *data, int totelem)
@@ -2952,7 +2974,7 @@ void CustomData_copy_data_layer(const CustomData *source,
const size_t dst_offset = (size_t)dst_index * typeInfo->size;
if (!count || !src_data || !dst_data) {
- if (count && !(src_data == NULL && dst_data == NULL)) {
+ if (count && !(src_data == nullptr && dst_data == nullptr)) {
CLOG_WARN(&LOG,
"null data for %s type (%p --> %p), skipping",
layerType_getName(source->layers[src_layer_index].type),
@@ -3079,15 +3101,16 @@ void CustomData_interp(const CustomData *source,
/* Slow fallback in case we're interpolating a ridiculous number of elements. */
if (count > SOURCE_BUF_SIZE) {
- sources = MEM_malloc_arrayN(count, sizeof(*sources), __func__);
+ sources = static_cast<const void **>(MEM_malloc_arrayN(count, sizeof(*sources), __func__));
}
/* If no weights are given, generate default ones to produce an average result. */
float default_weights_buf[SOURCE_BUF_SIZE];
- float *default_weights = NULL;
- if (weights == NULL) {
+ float *default_weights = nullptr;
+ if (weights == nullptr) {
default_weights = (count > SOURCE_BUF_SIZE) ?
- MEM_mallocN(sizeof(*weights) * (size_t)count, __func__) :
+ static_cast<float *>(
+ MEM_mallocN(sizeof(*weights) * (size_t)count, __func__)) :
default_weights_buf;
copy_vn_fl(default_weights, count, 1.0f / count);
weights = default_weights;
@@ -3139,7 +3162,7 @@ void CustomData_interp(const CustomData *source,
if (count > SOURCE_BUF_SIZE) {
MEM_freeN((void *)sources);
}
- if (!ELEM(default_weights, NULL, default_weights_buf)) {
+ if (!ELEM(default_weights, nullptr, default_weights_buf)) {
MEM_freeN(default_weights);
}
}
@@ -3191,7 +3214,7 @@ void *CustomData_get(const CustomData *data, int index, int type)
/* get the layer index of the active layer of type */
int layer_index = CustomData_get_active_layer_index(data, type);
if (layer_index == -1) {
- return NULL;
+ return nullptr;
}
/* get the offset of the desired element */
@@ -3207,7 +3230,7 @@ void *CustomData_get_n(const CustomData *data, int type, int index, int n)
/* get the layer index of the first layer of type */
int layer_index = data->typemap[type];
if (layer_index == -1) {
- return NULL;
+ return nullptr;
}
const size_t offset = (size_t)index * layerType_getInfo(type)->size;
@@ -3219,7 +3242,7 @@ void *CustomData_get_layer(const CustomData *data, int type)
/* get the layer index of the active layer of type */
int layer_index = CustomData_get_active_layer_index(data, type);
if (layer_index == -1) {
- return NULL;
+ return nullptr;
}
return data->layers[layer_index].data;
@@ -3230,7 +3253,7 @@ void *CustomData_get_layer_n(const CustomData *data, int type, int n)
/* get the layer index of the active layer of type */
int layer_index = CustomData_get_layer_index_n(data, type, n);
if (layer_index == -1) {
- return NULL;
+ return nullptr;
}
return data->layers[layer_index].data;
@@ -3240,7 +3263,7 @@ void *CustomData_get_layer_named(const struct CustomData *data, int type, const
{
int layer_index = CustomData_get_named_layer_index(data, type, name);
if (layer_index == -1) {
- return NULL;
+ return nullptr;
}
return data->layers[layer_index].data;
@@ -3286,7 +3309,7 @@ const char *CustomData_get_layer_name(const CustomData *data, int type, int n)
{
const int layer_index = CustomData_get_layer_index_n(data, type, n);
- return (layer_index == -1) ? NULL : data->layers[layer_index].name;
+ return (layer_index == -1) ? nullptr : data->layers[layer_index].name;
}
void *CustomData_set_layer(const CustomData *data, int type, void *ptr)
@@ -3295,7 +3318,7 @@ void *CustomData_set_layer(const CustomData *data, int type, void *ptr)
int layer_index = CustomData_get_active_layer_index(data, type);
if (layer_index == -1) {
- return NULL;
+ return nullptr;
}
data->layers[layer_index].data = ptr;
@@ -3308,7 +3331,7 @@ void *CustomData_set_layer_n(const struct CustomData *data, int type, int n, voi
/* get the layer index of the first layer of type */
int layer_index = CustomData_get_layer_index_n(data, type, n);
if (layer_index == -1) {
- return NULL;
+ return nullptr;
}
data->layers[layer_index].data = ptr;
@@ -3340,19 +3363,19 @@ void CustomData_to_bmeshpoly(CustomData *fdata, CustomData *ldata, int totloop)
for (int i = 0; i < fdata->totlayer; i++) {
if (fdata->layers[i].type == CD_MTFACE) {
CustomData_add_layer_named(
- ldata, CD_MLOOPUV, CD_CALLOC, NULL, totloop, fdata->layers[i].name);
+ ldata, CD_MLOOPUV, CD_CALLOC, nullptr, totloop, fdata->layers[i].name);
}
else if (fdata->layers[i].type == CD_MCOL) {
CustomData_add_layer_named(
- ldata, CD_MLOOPCOL, CD_CALLOC, NULL, totloop, fdata->layers[i].name);
+ ldata, CD_MLOOPCOL, CD_CALLOC, nullptr, totloop, fdata->layers[i].name);
}
else if (fdata->layers[i].type == CD_MDISPS) {
CustomData_add_layer_named(
- ldata, CD_MDISPS, CD_CALLOC, NULL, totloop, fdata->layers[i].name);
+ ldata, CD_MDISPS, CD_CALLOC, nullptr, totloop, fdata->layers[i].name);
}
else if (fdata->layers[i].type == CD_TESSLOOPNORMAL) {
CustomData_add_layer_named(
- ldata, CD_NORMAL, CD_CALLOC, NULL, totloop, fdata->layers[i].name);
+ ldata, CD_NORMAL, CD_CALLOC, nullptr, totloop, fdata->layers[i].name);
}
}
}
@@ -3364,25 +3387,27 @@ void CustomData_from_bmeshpoly(CustomData *fdata, CustomData *ldata, int total)
for (int i = 0; i < ldata->totlayer; i++) {
if (ldata->layers[i].type == CD_MLOOPUV) {
- CustomData_add_layer_named(fdata, CD_MTFACE, CD_CALLOC, NULL, total, ldata->layers[i].name);
+ CustomData_add_layer_named(
+ fdata, CD_MTFACE, CD_CALLOC, nullptr, total, ldata->layers[i].name);
}
if (ldata->layers[i].type == CD_MLOOPCOL) {
- CustomData_add_layer_named(fdata, CD_MCOL, CD_CALLOC, NULL, total, ldata->layers[i].name);
+ CustomData_add_layer_named(fdata, CD_MCOL, CD_CALLOC, nullptr, total, ldata->layers[i].name);
}
else if (ldata->layers[i].type == CD_PREVIEW_MLOOPCOL) {
CustomData_add_layer_named(
- fdata, CD_PREVIEW_MCOL, CD_CALLOC, NULL, total, ldata->layers[i].name);
+ fdata, CD_PREVIEW_MCOL, CD_CALLOC, nullptr, total, ldata->layers[i].name);
}
else if (ldata->layers[i].type == CD_ORIGSPACE_MLOOP) {
CustomData_add_layer_named(
- fdata, CD_ORIGSPACE, CD_CALLOC, NULL, total, ldata->layers[i].name);
+ fdata, CD_ORIGSPACE, CD_CALLOC, nullptr, total, ldata->layers[i].name);
}
else if (ldata->layers[i].type == CD_NORMAL) {
CustomData_add_layer_named(
- fdata, CD_TESSLOOPNORMAL, CD_CALLOC, NULL, total, ldata->layers[i].name);
+ fdata, CD_TESSLOOPNORMAL, CD_CALLOC, nullptr, total, ldata->layers[i].name);
}
else if (ldata->layers[i].type == CD_TANGENT) {
- CustomData_add_layer_named(fdata, CD_TANGENT, CD_CALLOC, NULL, total, ldata->layers[i].name);
+ CustomData_add_layer_named(
+ fdata, CD_TANGENT, CD_CALLOC, nullptr, total, ldata->layers[i].name);
}
}
@@ -3495,7 +3520,7 @@ void CustomData_bmesh_init_pool(CustomData *data, int totelem, const char htype)
int chunksize;
/* Dispose old pools before calling here to avoid leaks */
- BLI_assert(data->pool == NULL);
+ BLI_assert(data->pool == nullptr);
switch (htype) {
case BM_VERT:
@@ -3538,7 +3563,7 @@ bool CustomData_bmesh_merge(const CustomData *source,
* the new allocation */
CustomData destold = *dest;
if (destold.layers) {
- destold.layers = MEM_dupallocN(destold.layers);
+ destold.layers = static_cast<CustomDataLayer *>(MEM_dupallocN(destold.layers));
}
if (CustomData_merge(source, dest, mask, alloctype, 0) == false) {
@@ -3574,7 +3599,7 @@ bool CustomData_bmesh_merge(const CustomData *source,
break;
}
- dest->pool = NULL;
+ dest->pool = nullptr;
CustomData_bmesh_init_pool(dest, totelem, htype);
if (iter_type != BM_LOOPS_OF_FACE) {
@@ -3582,7 +3607,7 @@ bool CustomData_bmesh_merge(const CustomData *source,
BMIter iter;
/* Ensure all current elements follow new customdata layout. */
BM_ITER_MESH (h, &iter, bm, iter_type) {
- void *tmp = NULL;
+ void *tmp = nullptr;
CustomData_bmesh_copy_data(&destold, dest, h->data, &tmp);
CustomData_bmesh_free_block(&destold, &h->data);
h->data = tmp;
@@ -3597,7 +3622,7 @@ bool CustomData_bmesh_merge(const CustomData *source,
/* Ensure all current elements follow new customdata layout. */
BM_ITER_MESH (f, &iter, bm, BM_FACES_OF_MESH) {
BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
- void *tmp = NULL;
+ void *tmp = nullptr;
CustomData_bmesh_copy_data(&destold, dest, l->head.data, &tmp);
CustomData_bmesh_free_block(&destold, &l->head.data);
l->head.data = tmp;
@@ -3616,7 +3641,7 @@ bool CustomData_bmesh_merge(const CustomData *source,
void CustomData_bmesh_free_block(CustomData *data, void **block)
{
- if (*block == NULL) {
+ if (*block == nullptr) {
return;
}
@@ -3635,12 +3660,12 @@ void CustomData_bmesh_free_block(CustomData *data, void **block)
BLI_mempool_free(data->pool, *block);
}
- *block = NULL;
+ *block = nullptr;
}
void CustomData_bmesh_free_block_data(CustomData *data, void *block)
{
- if (block == NULL) {
+ if (block == nullptr) {
return;
}
for (int i = 0; i < data->totlayer; i++) {
@@ -3667,7 +3692,7 @@ static void CustomData_bmesh_alloc_block(CustomData *data, void **block)
*block = BLI_mempool_alloc(data->pool);
}
else {
- *block = NULL;
+ *block = nullptr;
}
}
@@ -3675,7 +3700,7 @@ void CustomData_bmesh_free_block_data_exclude_by_type(CustomData *data,
void *block,
const CustomDataMask mask_exclude)
{
- if (block == NULL) {
+ if (block == nullptr) {
return;
}
for (int i = 0; i < data->totlayer; i++) {
@@ -3707,7 +3732,7 @@ static void CustomData_bmesh_set_default_n(CustomData *data, void **block, int n
void CustomData_bmesh_set_default(CustomData *data, void **block)
{
- if (*block == NULL) {
+ if (*block == nullptr) {
CustomData_bmesh_alloc_block(data, block);
}
@@ -3726,7 +3751,7 @@ void CustomData_bmesh_copy_data_exclude_by_type(const CustomData *source,
* would cause too much duplicate code, so add a check instead. */
const bool no_mask = (mask_exclude == 0);
- if (*dest_block == NULL) {
+ if (*dest_block == nullptr) {
CustomData_bmesh_alloc_block(dest, dest_block);
if (*dest_block) {
memset(*dest_block, 0, dest->totsize);
@@ -3792,7 +3817,7 @@ void *CustomData_bmesh_get(const CustomData *data, void *block, int type)
/* get the layer index of the first layer of type */
int layer_index = CustomData_get_active_layer_index(data, type);
if (layer_index == -1) {
- return NULL;
+ return nullptr;
}
return POINTER_OFFSET(block, data->layers[layer_index].offset);
@@ -3803,7 +3828,7 @@ void *CustomData_bmesh_get_n(const CustomData *data, void *block, int type, int
/* get the layer index of the first layer of type */
int layer_index = CustomData_get_layer_index(data, type);
if (layer_index == -1) {
- return NULL;
+ return nullptr;
}
return POINTER_OFFSET(block, data->layers[layer_index + n].offset);
@@ -3812,7 +3837,7 @@ void *CustomData_bmesh_get_n(const CustomData *data, void *block, int type, int
void *CustomData_bmesh_get_layer_n(const CustomData *data, void *block, int n)
{
if (n < 0 || n >= data->totlayer) {
- return NULL;
+ return nullptr;
}
return POINTER_OFFSET(block, data->layers[n].offset);
@@ -4028,7 +4053,7 @@ void CustomData_bmesh_interp_n(CustomData *data,
void *dst_block_ofs,
int n)
{
- BLI_assert(weights != NULL);
+ BLI_assert(weights != nullptr);
BLI_assert(count > 0);
CustomDataLayer *layer = &data->layers[n];
@@ -4053,15 +4078,15 @@ void CustomData_bmesh_interp(CustomData *data,
/* Slow fallback in case we're interpolating a ridiculous number of elements. */
if (count > SOURCE_BUF_SIZE) {
- sources = MEM_malloc_arrayN(count, sizeof(*sources), __func__);
+ sources = (const void **)MEM_malloc_arrayN(count, sizeof(*sources), __func__);
}
/* If no weights are given, generate default ones to produce an average result. */
float default_weights_buf[SOURCE_BUF_SIZE];
- float *default_weights = NULL;
- if (weights == NULL) {
+ float *default_weights = nullptr;
+ if (weights == nullptr) {
default_weights = (count > SOURCE_BUF_SIZE) ?
- MEM_mallocN(sizeof(*weights) * (size_t)count, __func__) :
+ (float *)MEM_mallocN(sizeof(*weights) * (size_t)count, __func__) :
default_weights_buf;
copy_vn_fl(default_weights, count, 1.0f / count);
weights = default_weights;
@@ -4083,7 +4108,7 @@ void CustomData_bmesh_interp(CustomData *data,
if (count > SOURCE_BUF_SIZE) {
MEM_freeN((void *)sources);
}
- if (!ELEM(default_weights, NULL, default_weights_buf)) {
+ if (!ELEM(default_weights, nullptr, default_weights_buf)) {
MEM_freeN(default_weights);
}
}
@@ -4094,7 +4119,7 @@ void CustomData_to_bmesh_block(const CustomData *source,
void **dest_block,
bool use_default_init)
{
- if (*dest_block == NULL) {
+ if (*dest_block == nullptr) {
CustomData_bmesh_alloc_block(dest, dest_block);
}
@@ -4216,22 +4241,22 @@ void CustomData_blend_write_prepare(CustomData *data,
for (i = 0, j = 0; i < totlayer; i++) {
CustomDataLayer *layer = &data->layers[i];
/* Layers with this flag set are not written to file. */
- if ((layer->flag & CD_FLAG_NOCOPY) || layer->anonymous_id != NULL) {
+ if ((layer->flag & CD_FLAG_NOCOPY) || layer->anonymous_id != nullptr) {
data->totlayer--;
// CLOG_WARN(&LOG, "skipping layer %p (%s)", layer, layer->name);
}
else {
if (UNLIKELY((size_t)j >= write_layers_size)) {
if (write_layers == write_layers_buff) {
- write_layers = MEM_malloc_arrayN(
+ write_layers = (CustomDataLayer *)MEM_malloc_arrayN(
(write_layers_size + chunk_size), sizeof(*write_layers), __func__);
if (write_layers_buff) {
memcpy(write_layers, write_layers_buff, sizeof(*write_layers) * write_layers_size);
}
}
else {
- write_layers = MEM_reallocN(write_layers,
- sizeof(*write_layers) * (write_layers_size + chunk_size));
+ write_layers = (CustomDataLayer *)MEM_reallocN(
+ write_layers, sizeof(*write_layers) * (write_layers_size + chunk_size));
}
write_layers_size += chunk_size;
}
@@ -4258,14 +4283,14 @@ const char *CustomData_layertype_name(int type)
bool CustomData_layertype_is_singleton(int type)
{
const LayerTypeInfo *typeInfo = layerType_getInfo(type);
- return typeInfo->defaultname == NULL;
+ return typeInfo->defaultname == nullptr;
}
bool CustomData_layertype_is_dynamic(int type)
{
const LayerTypeInfo *typeInfo = layerType_getInfo(type);
- return (typeInfo->free != NULL);
+ return (typeInfo->free != nullptr);
}
int CustomData_layertype_layers_max(const int type)
@@ -4273,10 +4298,10 @@ int CustomData_layertype_layers_max(const int type)
const LayerTypeInfo *typeInfo = layerType_getInfo(type);
/* Same test as for singleton above. */
- if (typeInfo->defaultname == NULL) {
+ if (typeInfo->defaultname == nullptr) {
return 1;
}
- if (typeInfo->layers_max == NULL) {
+ if (typeInfo->layers_max == nullptr) {
return -1;
}
@@ -4306,13 +4331,15 @@ static bool cd_layer_find_dupe(CustomData *data, const char *name, int type, int
return false;
}
+struct CustomDataUniqueCheckData {
+ CustomData *data;
+ int type;
+ int index;
+};
+
static bool customdata_unique_check(void *arg, const char *name)
{
- struct {
- CustomData *data;
- int type;
- int index;
- } *data_arg = arg;
+ CustomDataUniqueCheckData *data_arg = static_cast<CustomDataUniqueCheckData *>(arg);
return cd_layer_find_dupe(data_arg->data, name, data_arg->type, data_arg->index);
}
@@ -4321,14 +4348,7 @@ void CustomData_set_layer_unique_name(CustomData *data, int index)
CustomDataLayer *nlayer = &data->layers[index];
const LayerTypeInfo *typeInfo = layerType_getInfo(nlayer->type);
- struct {
- CustomData *data;
- int type;
- int index;
- } data_arg;
- data_arg.data = data;
- data_arg.type = nlayer->type;
- data_arg.index = index;
+ CustomDataUniqueCheckData data_arg{data, nlayer->type, index};
if (!typeInfo->defaultname) {
return;
@@ -4341,7 +4361,7 @@ void CustomData_set_layer_unique_name(CustomData *data, int index)
}
BLI_uniquename_cb(
- customdata_unique_check, &data_arg, NULL, '.', nlayer->name, sizeof(nlayer->name));
+ customdata_unique_check, &data_arg, nullptr, '.', nlayer->name, sizeof(nlayer->name));
}
void CustomData_validate_layer_name(const CustomData *data,
@@ -4393,7 +4413,12 @@ bool CustomData_verify_versions(struct CustomData *data, int index)
/* 0 structnum is used in writing code to tag layer types that should not be written. */
else if (typeInfo->structnum == 0 &&
/* XXX Not sure why those three are exception, maybe that should be fixed? */
- !ELEM(layer->type, CD_PAINT_MASK, CD_FACEMAP, CD_MTEXPOLY, CD_SCULPT_FACE_SETS)) {
+ !ELEM(layer->type,
+ CD_PAINT_MASK,
+ CD_FACEMAP,
+ CD_MTEXPOLY,
+ CD_SCULPT_FACE_SETS,
+ CD_CREASE)) {
keeplayer = false;
CLOG_WARN(&LOG, ".blend file read: removing a data layer that should not have been written");
}
@@ -4413,7 +4438,7 @@ bool CustomData_layer_validate(CustomDataLayer *layer, const uint totitems, cons
{
const LayerTypeInfo *typeInfo = layerType_getInfo(layer->type);
- if (typeInfo->validate != NULL) {
+ if (typeInfo->validate != nullptr) {
return typeInfo->validate(layer->data, totitems, do_fixes);
}
@@ -4665,7 +4690,7 @@ void CustomData_external_add(
}
if (!external) {
- external = MEM_callocN(sizeof(CustomDataExternal), "CustomDataExternal");
+ external = MEM_cnew<CustomDataExternal>(__func__);
data->external = external;
}
BLI_strncpy(external->filename, filename, sizeof(external->filename));
@@ -4764,7 +4789,7 @@ static void customdata_data_transfer_interp_generic(const CustomDataTransferLaye
const int count,
const float mix_factor)
{
- BLI_assert(weights != NULL);
+ BLI_assert(weights != nullptr);
BLI_assert(count > 0);
/* Fake interpolation, we actually copy highest weighted source to dest.
@@ -4779,8 +4804,8 @@ static void customdata_data_transfer_interp_generic(const CustomDataTransferLaye
size_t data_size;
const uint64_t data_flag = laymap->data_flag;
- cd_interp interp_cd = NULL;
- cd_copy copy_cd = NULL;
+ cd_interp interp_cd = nullptr;
+ cd_copy copy_cd = nullptr;
if (!sources) {
/* Not supported here, abort. */
@@ -4834,7 +4859,7 @@ static void customdata_data_transfer_interp_generic(const CustomDataTransferLaye
BLI_assert(best_src_idx >= 0);
if (interp_cd) {
- interp_cd(sources, weights, NULL, count, tmp_dst);
+ interp_cd(sources, weights, nullptr, count, tmp_dst);
}
else if (data_flag) {
copy_bit_flag(tmp_dst, sources[best_src_idx], data_size, data_flag);
@@ -4879,13 +4904,13 @@ void customdata_data_transfer_interp_normal_normals(const CustomDataTransferLaye
const int count,
const float mix_factor)
{
- BLI_assert(weights != NULL);
+ BLI_assert(weights != nullptr);
BLI_assert(count > 0);
const int data_type = laymap->data_type;
const int mix_mode = laymap->mix_mode;
- SpaceTransform *space_transform = laymap->interp_data;
+ SpaceTransform *space_transform = static_cast<SpaceTransform *>(laymap->interp_data);
const LayerTypeInfo *type_info = layerType_getInfo(data_type);
cd_interp interp_cd = type_info->interp;
@@ -4899,7 +4924,7 @@ void customdata_data_transfer_interp_normal_normals(const CustomDataTransferLaye
return;
}
- interp_cd(sources, weights, NULL, count, tmp_dst);
+ interp_cd(sources, weights, nullptr, count, tmp_dst);
if (space_transform) {
/* tmp_dst is in source space so far, bring it back in destination space. */
BLI_space_transform_invert_normal(space_transform, tmp_dst);
@@ -4922,18 +4947,19 @@ void CustomData_data_transfer(const MeshPairRemap *me_remap,
size_t data_size;
size_t data_offset;
- cd_datatransfer_interp interp = NULL;
+ cd_datatransfer_interp interp = nullptr;
size_t tmp_buff_size = 32;
- const void **tmp_data_src = NULL;
+ const void **tmp_data_src = nullptr;
- /* NOTE: NULL data_src may happen and be valid (see vgroups...). */
+ /* NOTE: null data_src may happen and be valid (see vgroups...). */
if (!data_dst) {
return;
}
if (data_src) {
- tmp_data_src = MEM_malloc_arrayN(tmp_buff_size, sizeof(*tmp_data_src), __func__);
+ tmp_data_src = (const void **)MEM_malloc_arrayN(
+ tmp_buff_size, sizeof(*tmp_data_src), __func__);
}
if (data_type & CD_FAKE) {
@@ -4965,7 +4991,8 @@ void CustomData_data_transfer(const MeshPairRemap *me_remap,
if (tmp_data_src) {
if (UNLIKELY(sources_num > tmp_buff_size)) {
tmp_buff_size = (size_t)sources_num;
- tmp_data_src = MEM_reallocN((void *)tmp_data_src, sizeof(*tmp_data_src) * tmp_buff_size);
+ tmp_data_src = (const void **)MEM_reallocN((void *)tmp_data_src,
+ sizeof(*tmp_data_src) * tmp_buff_size);
}
for (int j = 0; j < sources_num; j++) {
@@ -5037,28 +5064,33 @@ void CustomData_blend_write(BlendWriter *writer,
if (layer->type == CD_MDEFORMVERT) {
/* layer types that allocate own memory need special handling */
- BKE_defvert_blend_write(writer, count, layer->data);
+ BKE_defvert_blend_write(writer, count, static_cast<struct MDeformVert *>(layer->data));
}
else if (layer->type == CD_MDISPS) {
- write_mdisps(writer, count, layer->data, layer->flag & CD_FLAG_EXTERNAL);
+ write_mdisps(
+ writer, count, static_cast<MDisps *>(layer->data), layer->flag & CD_FLAG_EXTERNAL);
}
else if (layer->type == CD_PAINT_MASK) {
- const float *layer_data = layer->data;
+ const float *layer_data = static_cast<const float *>(layer->data);
BLO_write_raw(writer, sizeof(*layer_data) * count, layer_data);
}
else if (layer->type == CD_SCULPT_FACE_SETS) {
- const float *layer_data = layer->data;
+ const float *layer_data = static_cast<const float *>(layer->data);
BLO_write_raw(writer, sizeof(*layer_data) * count, layer_data);
}
else if (layer->type == CD_GRID_PAINT_MASK) {
- write_grid_paint_mask(writer, count, layer->data);
+ write_grid_paint_mask(writer, count, static_cast<GridPaintMask *>(layer->data));
}
else if (layer->type == CD_FACEMAP) {
- const int *layer_data = layer->data;
+ const int *layer_data = static_cast<const int *>(layer->data);
BLO_write_raw(writer, sizeof(*layer_data) * count, layer_data);
}
else if (layer->type == CD_PROP_BOOL) {
- const bool *layer_data = layer->data;
+ const bool *layer_data = static_cast<const bool *>(layer->data);
+ BLO_write_raw(writer, sizeof(*layer_data) * count, layer_data);
+ }
+ else if (layer->type == CD_CREASE) {
+ const float *layer_data = static_cast<const float *>(layer->data);
BLO_write_raw(writer, sizeof(*layer_data) * count, layer_data);
}
else {
@@ -5131,7 +5163,7 @@ void CustomData_blend_read(BlendDataReader *reader, CustomData *data, int count)
/* Annoying workaround for bug T31079 loading legacy files with
* no polygons _but_ have stale custom-data. */
- if (UNLIKELY(count == 0 && data->layers == NULL && data->totlayer != 0)) {
+ if (UNLIKELY(count == 0 && data->layers == nullptr && data->totlayer != 0)) {
CustomData_reset(data);
return;
}
@@ -5150,7 +5182,7 @@ void CustomData_blend_read(BlendDataReader *reader, CustomData *data, int count)
if (CustomData_verify_versions(data, i)) {
BLO_read_data_address(reader, &layer->data);
- if (layer->data == NULL && count > 0 && layer->type == CD_PROP_BOOL) {
+ if (layer->data == nullptr && count > 0 && layer->type == CD_PROP_BOOL) {
/* Usually this should never happen, except when a custom data layer has not been written
* to a file correctly. */
CLOG_WARN(&LOG, "Reallocating custom data layer that was not saved correctly.");
@@ -5161,10 +5193,11 @@ void CustomData_blend_read(BlendDataReader *reader, CustomData *data, int count)
}
}
if (layer->type == CD_MDISPS) {
- blend_read_mdisps(reader, count, layer->data, layer->flag & CD_FLAG_EXTERNAL);
+ blend_read_mdisps(
+ reader, count, static_cast<MDisps *>(layer->data), layer->flag & CD_FLAG_EXTERNAL);
}
else if (layer->type == CD_GRID_PAINT_MASK) {
- blend_read_paint_mask(reader, count, layer->data);
+ blend_read_paint_mask(reader, count, static_cast<GridPaintMask *>(layer->data));
}
i++;
}
@@ -5172,3 +5205,33 @@ void CustomData_blend_read(BlendDataReader *reader, CustomData *data, int count)
CustomData_update_typemap(data);
}
+
+#ifndef NDEBUG
+
+void CustomData_debug_info_from_layers(const CustomData *data, const char *indent, DynStr *dynstr)
+{
+ for (int type = 0; type < CD_NUMTYPES; type++) {
+ if (CustomData_has_layer(data, type)) {
+ /* NOTE: doesn't account for multiple layers. */
+ const char *name = CustomData_layertype_name(type);
+ const int size = CustomData_sizeof(type);
+ const void *pt = CustomData_get_layer(data, type);
+ const int pt_size = pt ? (int)(MEM_allocN_len(pt) / size) : 0;
+ const char *structname;
+ int structnum;
+ CustomData_file_write_info(type, &structname, &structnum);
+ BLI_dynstr_appendf(
+ dynstr,
+ "%sdict(name='%s', struct='%s', type=%d, ptr='%p', elem=%d, length=%d),\n",
+ indent,
+ name,
+ structname,
+ type,
+ (const void *)pt,
+ size,
+ pt_size);
+ }
+ }
+}
+
+#endif /* NDEBUG */
diff --git a/source/blender/blenkernel/intern/data_transfer.c b/source/blender/blenkernel/intern/data_transfer.c
index f036f1ced87..0ad7efb6347 100644
--- a/source/blender/blenkernel/intern/data_transfer.c
+++ b/source/blender/blenkernel/intern/data_transfer.c
@@ -273,7 +273,6 @@ static void data_transfer_dtdata_type_preprocess(Mesh *me_src,
const int num_polys_dst = me_dst->totpoly;
MLoop *loops_dst = me_dst->mloop;
const int num_loops_dst = me_dst->totloop;
- CustomData *pdata_dst = &me_dst->pdata;
CustomData *ldata_dst = &me_dst->ldata;
const bool use_split_nors_dst = (me_dst->flag & ME_AUTOSMOOTH) != 0;
@@ -284,26 +283,9 @@ static void data_transfer_dtdata_type_preprocess(Mesh *me_src,
BLI_assert(CustomData_get_layer(&me_src->pdata, CD_NORMAL) != NULL);
(void)me_src;
- float(*poly_nors_dst)[3];
float(*loop_nors_dst)[3];
short(*custom_nors_dst)[2] = CustomData_get_layer(ldata_dst, CD_CUSTOMLOOPNORMAL);
- /* Cache poly nors into a temp CDLayer. */
- poly_nors_dst = CustomData_get_layer(pdata_dst, CD_NORMAL);
- const bool do_poly_nors_dst = (poly_nors_dst == NULL);
- if (do_poly_nors_dst) {
- poly_nors_dst = CustomData_add_layer(pdata_dst, CD_NORMAL, CD_CALLOC, NULL, num_polys_dst);
- CustomData_set_layer_flag(pdata_dst, CD_NORMAL, CD_FLAG_TEMPORARY);
- }
- if (dirty_nors_dst || do_poly_nors_dst) {
- BKE_mesh_calc_normals_poly(verts_dst,
- num_verts_dst,
- loops_dst,
- num_loops_dst,
- polys_dst,
- num_polys_dst,
- poly_nors_dst);
- }
/* Cache loop nors into a temp CDLayer. */
loop_nors_dst = CustomData_get_layer(ldata_dst, CD_NORMAL);
const bool do_loop_nors_dst = (loop_nors_dst == NULL);
@@ -313,6 +295,7 @@ static void data_transfer_dtdata_type_preprocess(Mesh *me_src,
}
if (dirty_nors_dst || do_loop_nors_dst) {
BKE_mesh_normals_loop_split(verts_dst,
+ BKE_mesh_vertex_normals_ensure(me_dst),
num_verts_dst,
edges_dst,
num_edges_dst,
@@ -320,7 +303,7 @@ static void data_transfer_dtdata_type_preprocess(Mesh *me_src,
loop_nors_dst,
num_loops_dst,
polys_dst,
- (const float(*)[3])poly_nors_dst,
+ BKE_mesh_poly_normals_ensure(me_dst),
num_polys_dst,
use_split_nors_dst,
split_angle_dst,
@@ -368,6 +351,7 @@ static void data_transfer_dtdata_type_postprocess(Object *UNUSED(ob_src),
/* Note loop_nors_dst contains our custom normals as transferred from source... */
BKE_mesh_normals_loop_custom_set(verts_dst,
+ BKE_mesh_vertex_normals_ensure(me_dst),
num_verts_dst,
edges_dst,
num_edges_dst,
@@ -1651,7 +1635,6 @@ bool BKE_object_data_transfer_ex(struct Depsgraph *depsgraph,
const int num_polys_dst = me_dst->totpoly;
MLoop *loops_dst = me_dst->mloop;
const int num_loops_dst = me_dst->totloop;
- CustomData *pdata_dst = &me_dst->pdata;
CustomData *ldata_dst = &me_dst->ldata;
MeshRemapIslandsCalc island_callback = data_transfer_get_loop_islands_generator(cddata_type);
@@ -1685,6 +1668,7 @@ bool BKE_object_data_transfer_ex(struct Depsgraph *depsgraph,
space_transform,
max_distance,
ray_radius,
+ me_dst,
verts_dst,
num_verts_dst,
edges_dst,
@@ -1694,7 +1678,6 @@ bool BKE_object_data_transfer_ex(struct Depsgraph *depsgraph,
polys_dst,
num_polys_dst,
ldata_dst,
- pdata_dst,
(me_dst->flag & ME_AUTOSMOOTH) != 0,
me_dst->smoothresh,
dirty_nors_dst,
@@ -1745,7 +1728,6 @@ bool BKE_object_data_transfer_ex(struct Depsgraph *depsgraph,
const int num_polys_dst = me_dst->totpoly;
MLoop *loops_dst = me_dst->mloop;
const int num_loops_dst = me_dst->totloop;
- CustomData *pdata_dst = &me_dst->pdata;
if (!geom_map_init[PDATA]) {
const int num_polys_src = me_src->totpoly;
@@ -1776,14 +1758,11 @@ bool BKE_object_data_transfer_ex(struct Depsgraph *depsgraph,
space_transform,
max_distance,
ray_radius,
+ me_dst,
verts_dst,
- num_verts_dst,
loops_dst,
- num_loops_dst,
polys_dst,
num_polys_dst,
- pdata_dst,
- dirty_nors_dst,
me_src,
&geom_map[PDATA]);
geom_map_init[PDATA] = true;
diff --git a/source/blender/blenkernel/intern/data_transfer_intern.h b/source/blender/blenkernel/intern/data_transfer_intern.h
index e40b4946f52..b5b3db31fbf 100644
--- a/source/blender/blenkernel/intern/data_transfer_intern.h
+++ b/source/blender/blenkernel/intern/data_transfer_intern.h
@@ -25,47 +25,48 @@
#include "BKE_customdata.h" /* For cd_datatransfer_interp */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
struct CustomData;
struct CustomDataTransferLayerMap;
struct ListBase;
-float data_transfer_interp_float_do(const int mix_mode,
- const float val_dst,
- const float val_src,
- const float mix_factor);
+float data_transfer_interp_float_do(int mix_mode, float val_dst, float val_src, float mix_factor);
void data_transfer_layersmapping_add_item(struct ListBase *r_map,
- const int data_type,
- const int mix_mode,
- const float mix_factor,
+ int data_type,
+ int mix_mode,
+ float mix_factor,
const float *mix_weights,
const void *data_src,
void *data_dst,
- const int data_src_n,
- const int data_dst_n,
- const size_t elem_size,
- const size_t data_size,
- const size_t data_offset,
- const uint64_t data_flag,
+ int data_src_n,
+ int data_dst_n,
+ size_t elem_size,
+ size_t data_size,
+ size_t data_offset,
+ uint64_t data_flag,
cd_datatransfer_interp interp,
void *interp_data);
/* Type-specific. */
bool data_transfer_layersmapping_vgroups(struct ListBase *r_map,
- const int mix_mode,
- const float mix_factor,
+ int mix_mode,
+ float mix_factor,
const float *mix_weights,
- const int num_elem_dst,
- const bool use_create,
- const bool use_delete,
+ int num_elem_dst,
+ bool use_create,
+ bool use_delete,
struct Object *ob_src,
struct Object *ob_dst,
struct CustomData *cd_src,
struct CustomData *cd_dst,
- const bool use_dupref_dst,
- const int fromlayers,
- const int tolayers);
+ bool use_dupref_dst,
+ int fromlayers,
+ int tolayers);
/* Defined in customdata.c */
@@ -76,5 +77,9 @@ void customdata_data_transfer_interp_normal_normals(const CustomDataTransferLaye
void *data_dst,
const void **sources,
const float *weights,
- const int count,
- const float mix_factor);
+ int count,
+ float mix_factor);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/source/blender/blenkernel/intern/displist.cc b/source/blender/blenkernel/intern/displist.cc
index edf043de63f..78177095a77 100644
--- a/source/blender/blenkernel/intern/displist.cc
+++ b/source/blender/blenkernel/intern/displist.cc
@@ -316,7 +316,7 @@ static void curve_to_displist(const Curve *cu,
* and resolution > 1. */
const bool use_cyclic_sample = is_cyclic && (samples_len != 2);
- DispList *dl = (DispList *)MEM_callocN(sizeof(DispList), __func__);
+ DispList *dl = MEM_cnew<DispList>(__func__);
/* Add one to the length because of 'BKE_curve_forward_diff_bezier'. */
dl->verts = (float *)MEM_mallocN(sizeof(float[3]) * (samples_len + 1), __func__);
BLI_addtail(r_dispbase, dl);
@@ -371,7 +371,7 @@ static void curve_to_displist(const Curve *cu,
}
else if (nu->type == CU_NURBS) {
const int len = (resolution * SEGMENTSU(nu));
- DispList *dl = (DispList *)MEM_callocN(sizeof(DispList), __func__);
+ DispList *dl = MEM_cnew<DispList>(__func__);
dl->verts = (float *)MEM_mallocN(len * sizeof(float[3]), __func__);
BLI_addtail(r_dispbase, dl);
dl->parts = 1;
@@ -384,7 +384,7 @@ static void curve_to_displist(const Curve *cu,
}
else if (nu->type == CU_POLY) {
const int len = nu->pntsu;
- DispList *dl = (DispList *)MEM_callocN(sizeof(DispList), __func__);
+ DispList *dl = MEM_cnew<DispList>(__func__);
dl->verts = (float *)MEM_mallocN(len * sizeof(float[3]), __func__);
BLI_addtail(r_dispbase, dl);
dl->parts = 1;
@@ -475,7 +475,7 @@ void BKE_displist_fill(const ListBase *dispbase,
const int triangles_len = BLI_scanfill_calc_ex(&sf_ctx, scanfill_flag, normal_proj);
if (totvert != 0 && triangles_len != 0) {
- DispList *dlnew = (DispList *)MEM_callocN(sizeof(DispList), __func__);
+ DispList *dlnew = MEM_cnew<DispList>(__func__);
dlnew->type = DL_INDEX3;
dlnew->flag = (dl_flag_accum & (DL_BACK_CURVE | DL_FRONT_CURVE));
dlnew->rt = (dl_rt_accum & CU_SMOOTH);
@@ -530,7 +530,7 @@ static void bevels_to_filledpoly(const Curve *cu, ListBase *dispbase)
if (dl->type == DL_SURF) {
if ((dl->flag & DL_CYCL_V) && (dl->flag & DL_CYCL_U) == 0) {
if ((cu->flag & CU_BACK) && (dl->flag & DL_BACK_CURVE)) {
- DispList *dlnew = (DispList *)MEM_callocN(sizeof(DispList), __func__);
+ DispList *dlnew = MEM_cnew<DispList>(__func__);
BLI_addtail(&front, dlnew);
dlnew->verts = (float *)MEM_mallocN(sizeof(float[3]) * dl->parts, __func__);
dlnew->nr = dl->parts;
@@ -549,7 +549,7 @@ static void bevels_to_filledpoly(const Curve *cu, ListBase *dispbase)
}
}
if ((cu->flag & CU_FRONT) && (dl->flag & DL_FRONT_CURVE)) {
- DispList *dlnew = (DispList *)MEM_callocN(sizeof(DispList), __func__);
+ DispList *dlnew = MEM_cnew<DispList>(__func__);
BLI_addtail(&back, dlnew);
dlnew->verts = (float *)MEM_mallocN(sizeof(float[3]) * dl->parts, __func__);
dlnew->nr = dl->parts;
@@ -665,7 +665,7 @@ void BKE_displist_make_mball(Depsgraph *depsgraph, Scene *scene, Object *ob)
BKE_displist_free(&(ob->runtime.curve_cache->disp));
}
else {
- ob->runtime.curve_cache = (CurveCache *)MEM_callocN(sizeof(CurveCache), __func__);
+ ob->runtime.curve_cache = MEM_cnew<CurveCache>(__func__);
}
BKE_mball_polygonize(depsgraph, scene, ob, &ob->runtime.curve_cache->disp);
@@ -904,7 +904,7 @@ static GeometrySet curve_calc_modifiers_post(Depsgraph *depsgraph,
int totvert;
float(*vertex_coords)[3] = BKE_mesh_vert_coords_alloc(mesh, &totvert);
if (mti->dependsOnNormals != nullptr && mti->dependsOnNormals(md)) {
- BKE_mesh_ensure_normals(mesh);
+ BKE_mesh_vertex_normals_ensure(mesh);
}
mti->deformVerts(md, &mectx_deform, mesh, vertex_coords, totvert);
BKE_mesh_vert_coords_apply(mesh, vertex_coords);
@@ -912,7 +912,7 @@ static GeometrySet curve_calc_modifiers_post(Depsgraph *depsgraph,
}
else {
if (mti->dependsOnNormals != nullptr && mti->dependsOnNormals(md)) {
- BKE_mesh_ensure_normals(mesh);
+ BKE_mesh_vertex_normals_ensure(mesh);
}
Mesh *output_mesh = mti->modifyMesh(md, &mectx_apply, mesh);
if (mesh != output_mesh) {
@@ -924,7 +924,7 @@ static GeometrySet curve_calc_modifiers_post(Depsgraph *depsgraph,
if (geometry_set.has_mesh()) {
Mesh *final_mesh = geometry_set.get_mesh_for_write();
- BKE_mesh_calc_normals(final_mesh);
+ BKE_mesh_ensure_normals_for_display(final_mesh);
BLI_strncpy(final_mesh->id.name, cu->id.name, sizeof(final_mesh->id.name));
*((short *)final_mesh->id.name) = ID_ME;
@@ -996,7 +996,7 @@ static void evaluate_surface_object(Depsgraph *depsgraph,
if (nu->pntsv == 1) {
const int len = SEGMENTSU(nu) * resolu;
- DispList *dl = (DispList *)MEM_callocN(sizeof(DispList), __func__);
+ DispList *dl = MEM_cnew<DispList>(__func__);
dl->verts = (float *)MEM_mallocN(len * sizeof(float[3]), __func__);
BLI_addtail(r_dispbase, dl);
@@ -1019,7 +1019,7 @@ static void evaluate_surface_object(Depsgraph *depsgraph,
else {
const int len = (nu->pntsu * resolu) * (nu->pntsv * resolv);
- DispList *dl = (DispList *)MEM_callocN(sizeof(DispList), __func__);
+ DispList *dl = MEM_cnew<DispList>(__func__);
dl->verts = (float *)MEM_mallocN(len * sizeof(float[3]), __func__);
BLI_addtail(r_dispbase, dl);
@@ -1122,7 +1122,7 @@ static void fillBevelCap(const Nurb *nu,
const float *prev_fp,
ListBase *dispbase)
{
- DispList *dl = (DispList *)MEM_callocN(sizeof(DispList), __func__);
+ DispList *dl = MEM_cnew<DispList>(__func__);
dl->verts = (float *)MEM_mallocN(sizeof(float[3]) * dlb->nr, __func__);
memcpy(dl->verts, prev_fp, sizeof(float[3]) * dlb->nr);
@@ -1321,7 +1321,7 @@ static GeometrySet evaluate_curve_type_object(Depsgraph *depsgraph,
/* exception handling; curve without bevel or extrude, with width correction */
if (BLI_listbase_is_empty(&dlbev)) {
- DispList *dl = (DispList *)MEM_callocN(sizeof(DispList), "makeDispListbev");
+ DispList *dl = MEM_cnew<DispList>("makeDispListbev");
dl->verts = (float *)MEM_mallocN(sizeof(float[3]) * bl->nr, "dlverts");
BLI_addtail(r_dispbase, dl);
@@ -1371,7 +1371,7 @@ static GeometrySet evaluate_curve_type_object(Depsgraph *depsgraph,
LISTBASE_FOREACH (DispList *, dlb, &dlbev) {
/* for each part of the bevel use a separate displblock */
- DispList *dl = (DispList *)MEM_callocN(sizeof(DispList), __func__);
+ DispList *dl = MEM_cnew<DispList>(__func__);
dl->verts = data = (float *)MEM_mallocN(sizeof(float[3]) * dlb->nr * steps, __func__);
BLI_addtail(r_dispbase, dl);
@@ -1495,7 +1495,7 @@ void BKE_displist_make_curveTypes(Depsgraph *depsgraph,
BKE_object_free_derived_caches(ob);
cow_curve.curve_eval = nullptr;
- ob->runtime.curve_cache = (CurveCache *)MEM_callocN(sizeof(CurveCache), __func__);
+ ob->runtime.curve_cache = MEM_cnew<CurveCache>(__func__);
ListBase *dispbase = &ob->runtime.curve_cache->disp;
if (ob->type == OB_SURF) {
diff --git a/source/blender/blenkernel/intern/dynamicpaint.c b/source/blender/blenkernel/intern/dynamicpaint.c
index ce92a34de47..64e0427a810 100644
--- a/source/blender/blenkernel/intern/dynamicpaint.c
+++ b/source/blender/blenkernel/intern/dynamicpaint.c
@@ -1789,6 +1789,7 @@ typedef struct DynamicPaintModifierApplyData {
Object *ob;
MVert *mvert;
+ const float (*vert_normals)[3];
const MLoop *mloop;
const MPoly *mpoly;
@@ -1806,14 +1807,11 @@ static void dynamic_paint_apply_surface_displace_cb(void *__restrict userdata,
const DynamicPaintSurface *surface = data->surface;
MVert *mvert = data->mvert;
- float normal[3];
const float *value = (float *)surface->data->type_data;
const float val = value[i] * surface->disp_factor;
- normal_short_to_float_v3(normal, mvert[i].no);
-
/* same as 'mvert[i].co[0] -= normal[0] * val' etc. */
- madd_v3_v3fl(mvert[i].co, normal, -val);
+ madd_v3_v3fl(mvert[i].co, data->vert_normals[i], -val);
}
/* apply displacing vertex surface to the derived mesh */
@@ -1832,6 +1830,7 @@ static void dynamicPaint_applySurfaceDisplace(DynamicPaintSurface *surface, Mesh
DynamicPaintModifierApplyData data = {
.surface = surface,
.mvert = mvert,
+ .vert_normals = BKE_mesh_vertex_normals_ensure(result),
};
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
@@ -1898,10 +1897,8 @@ static void dynamic_paint_apply_surface_wave_cb(void *__restrict userdata,
PaintWavePoint *wPoint = (PaintWavePoint *)data->surface->data->type_data;
MVert *mvert = data->mvert;
- float normal[3];
- normal_short_to_float_v3(normal, mvert[i].no);
- madd_v3_v3fl(mvert[i].co, normal, wPoint[i].height);
+ madd_v3_v3fl(mvert[i].co, data->vert_normals[i], wPoint[i].height);
}
/*
@@ -2030,6 +2027,7 @@ static Mesh *dynamicPaint_Modifier_apply(DynamicPaintModifierData *pmd, Object *
DynamicPaintModifierApplyData data = {
.surface = surface,
.mvert = mvert,
+ .vert_normals = BKE_mesh_vertex_normals_ensure(result),
};
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
@@ -4287,6 +4285,7 @@ static bool dynamicPaint_paintMesh(Depsgraph *depsgraph,
mesh = BKE_mesh_copy_for_eval(brush_mesh, false);
mvert = mesh->mvert;
+ const float(*vert_normals)[3] = BKE_mesh_vertex_normals_ensure(mesh);
mlooptri = BKE_mesh_runtime_looptri_ensure(mesh);
mloop = mesh->mloop;
numOfVerts = mesh->totvert;
@@ -4301,7 +4300,7 @@ static bool dynamicPaint_paintMesh(Depsgraph *depsgraph,
/* for proximity project calculate average normal */
if (brush->flags & MOD_DPAINT_PROX_PROJECT && brush->collision != MOD_DPAINT_COL_VOLUME) {
float nor[3];
- normal_short_to_float_v3(nor, mvert[ii].no);
+ copy_v3_v3(nor, vert_normals[ii]);
mul_mat3_m4_v3(brushOb->obmat, nor);
normalize_v3(nor);
@@ -5909,6 +5908,7 @@ typedef struct DynamicPaintGenerateBakeData {
Object *ob;
const MVert *mvert;
+ const float (*vert_normals)[3];
const Vec3f *canvas_verts;
const bool do_velocity_data;
@@ -5928,7 +5928,6 @@ static void dynamic_paint_generate_bake_data_cb(void *__restrict userdata,
Object *ob = data->ob;
- const MVert *mvert = data->mvert;
const Vec3f *canvas_verts = data->canvas_verts;
const bool do_velocity_data = data->do_velocity_data;
@@ -5962,9 +5961,9 @@ static void dynamic_paint_generate_bake_data_cb(void *__restrict userdata,
}
/* Calculate current pixel surface normal */
- normal_short_to_float_v3(n1, mvert[tPoint->v1].no);
- normal_short_to_float_v3(n2, mvert[tPoint->v2].no);
- normal_short_to_float_v3(n3, mvert[tPoint->v3].no);
+ copy_v3_v3(n1, data->vert_normals[tPoint->v1]);
+ copy_v3_v3(n2, data->vert_normals[tPoint->v2]);
+ copy_v3_v3(n3, data->vert_normals[tPoint->v3]);
interp_v3_v3v3v3(
temp_nor, n1, n2, n3, f_data->barycentricWeights[index * bData->s_num[index]].v);
@@ -6006,7 +6005,7 @@ static void dynamic_paint_generate_bake_data_cb(void *__restrict userdata,
}
/* normal */
- normal_short_to_float_v3(temp_nor, mvert[index].no);
+ copy_v3_v3(temp_nor, data->vert_normals[index]);
if (ELEM(surface->type, MOD_DPAINT_SURFACE_T_DISPLACE, MOD_DPAINT_SURFACE_T_WAVE)) {
/* Prepare surface normal directional scale to easily convert
* brush intersection amount between global and local space */
@@ -6145,6 +6144,7 @@ static bool dynamicPaint_generateBakeData(DynamicPaintSurface *surface,
.surface = surface,
.ob = ob,
.mvert = mvert,
+ .vert_normals = BKE_mesh_vertex_normals_ensure(mesh),
.canvas_verts = canvas_verts,
.do_velocity_data = do_velocity_data,
.new_bdata = new_bdata,
diff --git a/source/blender/blenkernel/intern/editmesh.c b/source/blender/blenkernel/intern/editmesh.c
index 805d3cdb5e3..0774a1a3d88 100644
--- a/source/blender/blenkernel/intern/editmesh.c
+++ b/source/blender/blenkernel/intern/editmesh.c
@@ -39,6 +39,8 @@
#include "BKE_mesh_wrapper.h"
#include "BKE_object.h"
+#include "DEG_depsgraph_query.h"
+
BMEditMesh *BKE_editmesh_create(BMesh *bm)
{
BMEditMesh *em = MEM_callocN(sizeof(BMEditMesh), __func__);
@@ -51,9 +53,6 @@ BMEditMesh *BKE_editmesh_copy(BMEditMesh *em)
BMEditMesh *em_copy = MEM_callocN(sizeof(BMEditMesh), __func__);
*em_copy = *em;
- em_copy->mesh_eval_cage = em_copy->mesh_eval_final = NULL;
- em_copy->bb_cage = NULL;
-
em_copy->bm = BM_mesh_copy(em->bm);
/* The tessellation is NOT calculated on the copy here,
@@ -194,22 +193,8 @@ void BKE_editmesh_looptri_and_normals_calc_with_partial(BMEditMesh *em,
});
}
-void BKE_editmesh_free_derived_caches(BMEditMesh *em)
-{
- if (em->mesh_eval_cage) {
- BKE_id_free(NULL, em->mesh_eval_cage);
- }
- if (em->mesh_eval_final && em->mesh_eval_final != em->mesh_eval_cage) {
- BKE_id_free(NULL, em->mesh_eval_final);
- }
- em->mesh_eval_cage = em->mesh_eval_final = NULL;
-
- MEM_SAFE_FREE(em->bb_cage);
-}
-
void BKE_editmesh_free_data(BMEditMesh *em)
{
- BKE_editmesh_free_derived_caches(em);
if (em->looptris) {
MEM_freeN(em->looptris);
@@ -229,8 +214,7 @@ struct CageUserData {
static void cage_mapped_verts_callback(void *userData,
int index,
const float co[3],
- const float UNUSED(no_f[3]),
- const short UNUSED(no_s[3]))
+ const float UNUSED(no[3]))
{
struct CageUserData *data = userData;
@@ -284,13 +268,15 @@ const float (*BKE_editmesh_vert_coords_when_deformed(struct Depsgraph *depsgraph
*r_is_alloc = false;
Mesh *me = ob->data;
+ Object *object_eval = DEG_get_evaluated_object(depsgraph, ob);
+ Mesh *editmesh_eval_final = BKE_object_get_editmesh_eval_final(object_eval);
if ((me->runtime.edit_data != NULL) && (me->runtime.edit_data->vertexCos != NULL)) {
/* Deformed, and we have deformed coords already. */
coords = me->runtime.edit_data->vertexCos;
}
- else if ((em->mesh_eval_final != NULL) &&
- (em->mesh_eval_final->runtime.wrapper_type == ME_WRAPPER_TYPE_BMESH)) {
+ else if ((editmesh_eval_final != NULL) &&
+ (editmesh_eval_final->runtime.wrapper_type == ME_WRAPPER_TYPE_BMESH)) {
/* If this is an edit-mesh type, leave NULL as we can use the vertex coords. */
}
else {
@@ -335,18 +321,18 @@ void BKE_editmesh_ensure_autosmooth(BMEditMesh *em, Mesh *me)
}
}
-BoundBox *BKE_editmesh_cage_boundbox_get(BMEditMesh *em)
+BoundBox *BKE_editmesh_cage_boundbox_get(struct Object *object, BMEditMesh *UNUSED(em))
{
- if (em->bb_cage == NULL) {
+ if (object->runtime.editmesh_bb_cage == NULL) {
float min[3], max[3];
INIT_MINMAX(min, max);
- if (em->mesh_eval_cage) {
- BKE_mesh_wrapper_minmax(em->mesh_eval_cage, min, max);
+ if (object->runtime.editmesh_eval_cage) {
+ BKE_mesh_wrapper_minmax(object->runtime.editmesh_eval_cage, min, max);
}
- em->bb_cage = MEM_callocN(sizeof(BoundBox), "BMEditMesh.bb_cage");
- BKE_boundbox_init_from_minmax(em->bb_cage, min, max);
+ object->runtime.editmesh_bb_cage = MEM_callocN(sizeof(BoundBox), "BMEditMesh.bb_cage");
+ BKE_boundbox_init_from_minmax(object->runtime.editmesh_bb_cage, min, max);
}
- return em->bb_cage;
+ return object->runtime.editmesh_bb_cage;
}
diff --git a/source/blender/blenkernel/intern/effect.c b/source/blender/blenkernel/intern/effect.c
index 8229228976c..bbf9e9edfd2 100644
--- a/source/blender/blenkernel/intern/effect.c
+++ b/source/blender/blenkernel/intern/effect.c
@@ -59,6 +59,7 @@
#include "BKE_fluid.h"
#include "BKE_global.h"
#include "BKE_layer.h"
+#include "BKE_mesh.h"
#include "BKE_modifier.h"
#include "BKE_object.h"
#include "BKE_particle.h"
@@ -715,9 +716,10 @@ bool get_effector_data(EffectorCache *eff,
else if (eff->pd && eff->pd->shape == PFIELD_SHAPE_POINTS) {
/* TODO: hair and points object support */
const Mesh *me_eval = BKE_object_get_evaluated_mesh(eff->ob);
+ const float(*vert_normals)[3] = BKE_mesh_vertex_normals_ensure(me_eval);
if (me_eval != NULL) {
copy_v3_v3(efd->loc, me_eval->mvert[*efd->index].co);
- normal_short_to_float_v3(efd->nor, me_eval->mvert[*efd->index].no);
+ copy_v3_v3(efd->nor, vert_normals[*efd->index]);
mul_m4_v3(eff->ob->obmat, efd->loc);
mul_mat3_m4_v3(eff->ob->obmat, efd->nor);
diff --git a/source/blender/blenkernel/intern/fcurve.c b/source/blender/blenkernel/intern/fcurve.c
index 5bbfc0913a1..f7a547543af 100644
--- a/source/blender/blenkernel/intern/fcurve.c
+++ b/source/blender/blenkernel/intern/fcurve.c
@@ -412,7 +412,7 @@ FCurve *BKE_fcurve_find_by_rna_context_ui(bContext *C,
char *path = NULL;
if (!adt && C) {
- path = BKE_animdata_driver_path_hack(C, &tptr, prop, NULL);
+ path = RNA_path_from_ID_to_property(&tptr, prop);
adt = BKE_animdata_from_id(tptr.owner_id);
step--;
}
@@ -463,7 +463,7 @@ FCurve *BKE_fcurve_find_by_rna_context_ui(bContext *C,
}
if (step) {
- char *tpath = BKE_animdata_driver_path_hack(C, &tptr, prop, path);
+ char *tpath = path ? path : RNA_path_from_ID_to_property(&tptr, prop);
if (tpath && tpath != path) {
MEM_freeN(path);
path = tpath;
diff --git a/source/blender/blenkernel/intern/fcurve_driver.c b/source/blender/blenkernel/intern/fcurve_driver.c
index 5496519e53b..ce30f80ba65 100644
--- a/source/blender/blenkernel/intern/fcurve_driver.c
+++ b/source/blender/blenkernel/intern/fcurve_driver.c
@@ -29,6 +29,7 @@
#include "BLI_alloca.h"
#include "BLI_expr_pylike_eval.h"
+#include "BLI_listbase.h"
#include "BLI_math.h"
#include "BLI_string_utils.h"
#include "BLI_threads.h"
@@ -864,6 +865,12 @@ void driver_variable_name_validate(DriverVar *dvar)
}
}
+void driver_variable_unique_name(DriverVar *dvar)
+{
+ ListBase variables = BLI_listbase_from_link((Link *)dvar);
+ BLI_uniquename(&variables, dvar, dvar->name, '_', offsetof(DriverVar, name), sizeof(dvar->name));
+}
+
DriverVar *driver_add_new_variable(ChannelDriver *driver)
{
DriverVar *dvar;
diff --git a/source/blender/blenkernel/intern/fluid.c b/source/blender/blenkernel/intern/fluid.c
index 39122b33683..0c9e352da12 100644
--- a/source/blender/blenkernel/intern/fluid.c
+++ b/source/blender/blenkernel/intern/fluid.c
@@ -1030,7 +1030,6 @@ static void obstacles_from_mesh(Object *coll_ob,
CustomData_set_layer(&me->vdata, CD_MVERT, me->mvert);
}
- BKE_mesh_ensure_normals(me);
mvert = me->mvert;
mloop = me->mloop;
looptri = BKE_mesh_runtime_looptri_ensure(me);
@@ -1053,9 +1052,11 @@ static void obstacles_from_mesh(Object *coll_ob,
}
}
- /* Transform mesh vertices to domain grid space for fast lookups */
+ /* Transform mesh vertices to domain grid space for fast lookups.
+ * This is valid because the mesh is copied above. */
+ BKE_mesh_vertex_normals_ensure(me);
+ float(*vert_normals)[3] = BKE_mesh_vertex_normals_for_write(me);
for (i = 0; i < numverts; i++) {
- float n[3];
float co[3];
/* Vertex position. */
@@ -1063,11 +1064,9 @@ static void obstacles_from_mesh(Object *coll_ob,
manta_pos_to_cell(fds, mvert[i].co);
/* Vertex normal. */
- normal_short_to_float_v3(n, mvert[i].no);
- mul_mat3_m4_v3(coll_ob->obmat, n);
- mul_mat3_m4_v3(fds->imat, n);
- normalize_v3(n);
- normal_float_to_short_v3(mvert[i].no, n);
+ mul_mat3_m4_v3(coll_ob->obmat, vert_normals[i]);
+ mul_mat3_m4_v3(fds->imat, vert_normals[i]);
+ normalize_v3(vert_normals[i]);
/* Vertex velocity. */
add_v3fl_v3fl_v3i(co, mvert[i].co, fds->shift);
@@ -1826,6 +1825,7 @@ static void update_distances(int index,
static void sample_mesh(FluidFlowSettings *ffs,
const MVert *mvert,
+ const float (*vert_normals)[3],
const MLoop *mloop,
const MLoopTri *mlooptri,
const MLoopUV *mloopuv,
@@ -1906,7 +1906,7 @@ static void sample_mesh(FluidFlowSettings *ffs,
tree_data->tree, ray_start, &nearest, tree_data->nearest_callback, tree_data) != -1) {
float weights[3];
int v1, v2, v3, f_index = nearest.index;
- float n1[3], n2[3], n3[3], hit_normal[3];
+ float hit_normal[3];
/* Calculate barycentric weights for nearest point. */
v1 = mloop[mlooptri[f_index].tri[0]].v;
@@ -1969,10 +1969,8 @@ static void sample_mesh(FluidFlowSettings *ffs,
/* Apply normal directional velocity. */
if (ffs->vel_normal) {
/* Interpolate vertex normal vectors to get nearest point normal. */
- normal_short_to_float_v3(n1, mvert[v1].no);
- normal_short_to_float_v3(n2, mvert[v2].no);
- normal_short_to_float_v3(n3, mvert[v3].no);
- interp_v3_v3v3v3(hit_normal, n1, n2, n3, weights);
+ interp_v3_v3v3v3(
+ hit_normal, vert_normals[v1], vert_normals[v2], vert_normals[v3], weights);
normalize_v3(hit_normal);
/* Apply normal directional velocity. */
@@ -2022,6 +2020,7 @@ typedef struct EmitFromDMData {
FluidFlowSettings *ffs;
const MVert *mvert;
+ const float (*vert_normals)[3];
const MLoop *mloop;
const MLoopTri *mlooptri;
const MLoopUV *mloopuv;
@@ -2056,6 +2055,7 @@ static void emit_from_mesh_task_cb(void *__restrict userdata,
(data->ffs->behavior == FLUID_FLOW_BEHAVIOR_INFLOW)) {
sample_mesh(data->ffs,
data->mvert,
+ data->vert_normals,
data->mloop,
data->mlooptri,
data->mloopuv,
@@ -2117,7 +2117,6 @@ static void emit_from_mesh(
CustomData_set_layer(&me->vdata, CD_MVERT, me->mvert);
}
- BKE_mesh_ensure_normals(me);
mvert = me->mvert;
mloop = me->mloop;
mlooptri = BKE_mesh_runtime_looptri_ensure(me);
@@ -2140,20 +2139,19 @@ static void emit_from_mesh(
}
}
- /* Transform mesh vertices to domain grid space for fast lookups */
+ /* Transform mesh vertices to domain grid space for fast lookups.
+ * This is valid because the mesh is copied above. */
+ BKE_mesh_vertex_normals_ensure(me);
+ float(*vert_normals)[3] = BKE_mesh_vertex_normals_for_write(me);
for (i = 0; i < numverts; i++) {
- float n[3];
-
/* Vertex position. */
mul_m4_v3(flow_ob->obmat, mvert[i].co);
manta_pos_to_cell(fds, mvert[i].co);
/* Vertex normal. */
- normal_short_to_float_v3(n, mvert[i].no);
- mul_mat3_m4_v3(flow_ob->obmat, n);
- mul_mat3_m4_v3(fds->imat, n);
- normalize_v3(n);
- normal_float_to_short_v3(mvert[i].no, n);
+ mul_mat3_m4_v3(flow_ob->obmat, vert_normals[i]);
+ mul_mat3_m4_v3(fds->imat, vert_normals[i]);
+ normalize_v3(vert_normals[i]);
/* Vertex velocity. */
if (ffs->flags & FLUID_FLOW_INITVELOCITY) {
@@ -2193,6 +2191,7 @@ static void emit_from_mesh(
.fds = fds,
.ffs = ffs,
.mvert = mvert,
+ .vert_normals = vert_normals,
.mloop = mloop,
.mlooptri = mlooptri,
.mloopuv = mloopuv,
@@ -3265,8 +3264,6 @@ static Mesh *create_liquid_geometry(FluidDomainSettings *fds,
MVert *mverts;
MPoly *mpolys;
MLoop *mloops;
- short *normals, *no_s;
- float no[3];
float min[3];
float max[3];
float size[3];
@@ -3285,26 +3282,23 @@ static Mesh *create_liquid_geometry(FluidDomainSettings *fds,
const char mp_flag = mp_example.flag;
int i;
- int num_verts, num_normals, num_faces;
+ int num_verts, num_faces;
if (!fds->fluid) {
return NULL;
}
num_verts = manta_liquid_get_num_verts(fds->fluid);
- num_normals = manta_liquid_get_num_normals(fds->fluid);
num_faces = manta_liquid_get_num_triangles(fds->fluid);
# ifdef DEBUG_PRINT
/* Debugging: Print number of vertices, normals, and faces. */
- printf("num_verts: %d, num_normals: %d, num_faces: %d\n", num_verts, num_normals, num_faces);
+ printf("num_verts: %d, num_faces: %d\n", num_verts, num_faces);
# endif
if (!num_verts || !num_faces) {
return NULL;
}
- /* Normals are per vertex, so these must match. */
- BLI_assert(num_verts == num_normals);
me = BKE_mesh_new_nomain(num_verts, 0, 0, num_faces * 3, num_faces);
if (!me) {
@@ -3334,9 +3328,6 @@ static Mesh *create_liquid_geometry(FluidDomainSettings *fds,
co_offset[1] = (fds->p0[1] + fds->p1[1]) / 2.0f;
co_offset[2] = (fds->p0[2] + fds->p1[2]) / 2.0f;
- /* Normals. */
- normals = MEM_callocN(sizeof(short[3]) * num_normals, "Fluidmesh_tmp_normals");
-
/* Velocities. */
/* If needed, vertex velocities will be read too. */
bool use_speedvectors = fds->flags & FLUID_DOMAIN_USE_SPEED_VECTORS;
@@ -3350,7 +3341,7 @@ static Mesh *create_liquid_geometry(FluidDomainSettings *fds,
}
/* Loop for vertices and normals. */
- for (i = 0, no_s = normals; i < num_verts && i < num_normals; i++, mverts++, no_s += 3) {
+ for (i = 0; i < num_verts; i++, mverts++) {
/* Vertices (data is normalized cube around domain origin). */
mverts->co[0] = manta_liquid_get_vertex_x_at(fds->fluid, i);
@@ -3376,12 +3367,6 @@ static Mesh *create_liquid_geometry(FluidDomainSettings *fds,
mverts->co[2]);
# endif
- /* Normals (data is normalized cube around domain origin). */
- no[0] = manta_liquid_get_normal_x_at(fds->fluid, i);
- no[1] = manta_liquid_get_normal_y_at(fds->fluid, i);
- no[2] = manta_liquid_get_normal_z_at(fds->fluid, i);
-
- normal_float_to_short_v3(no_s, no);
# ifdef DEBUG_PRINT
/* Debugging: Print coordinates of normals. */
printf("no_s[0]: %d, no_s[1]: %d, no_s[2]: %d\n", no_s[0], no_s[1], no_s[2]);
@@ -3425,11 +3410,7 @@ static Mesh *create_liquid_geometry(FluidDomainSettings *fds,
# endif
}
- BKE_mesh_ensure_normals(me);
BKE_mesh_calc_edges(me, false, false);
- BKE_mesh_vert_normals_apply(me, (short(*)[3])normals);
-
- MEM_freeN(normals);
return me;
}
diff --git a/source/blender/blenkernel/intern/geometry_component_curve.cc b/source/blender/blenkernel/intern/geometry_component_curve.cc
index 1e24b29038d..16edbc36f9c 100644
--- a/source/blender/blenkernel/intern/geometry_component_curve.cc
+++ b/source/blender/blenkernel/intern/geometry_component_curve.cc
@@ -14,6 +14,8 @@
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
+#include "BLI_task.hh"
+
#include "DNA_ID_enums.h"
#include "DNA_curve_types.h"
@@ -389,6 +391,98 @@ static const CurveEval *get_curve_from_component_for_read(const GeometryComponen
namespace blender::bke {
/* -------------------------------------------------------------------- */
+/** \name Curve Normals Access
+ * \{ */
+
+static void calculate_bezier_normals(const BezierSpline &spline, MutableSpan<float3> normals)
+{
+ Span<int> offsets = spline.control_point_offsets();
+ Span<float3> evaluated_normals = spline.evaluated_normals();
+ for (const int i : IndexRange(spline.size())) {
+ normals[i] = evaluated_normals[offsets[i]];
+ }
+}
+
+static void calculate_poly_normals(const PolySpline &spline, MutableSpan<float3> normals)
+{
+ normals.copy_from(spline.evaluated_normals());
+}
+
+/**
+ * Because NURBS control points are not necessarily on the path, the normal at the control points
+ * is not well defined, so create a temporary poly spline to find the normals. This requires extra
+ * copying currently, but may be more efficient in the future if attributes have some form of CoW.
+ */
+static void calculate_nurbs_normals(const NURBSpline &spline, MutableSpan<float3> normals)
+{
+ PolySpline poly_spline;
+ poly_spline.resize(spline.size());
+ poly_spline.positions().copy_from(spline.positions());
+ poly_spline.tilts().copy_from(spline.tilts());
+ normals.copy_from(poly_spline.evaluated_normals());
+}
+
+static Array<float3> curve_normal_point_domain(const CurveEval &curve)
+{
+ Span<SplinePtr> splines = curve.splines();
+ Array<int> offsets = curve.control_point_offsets();
+ const int total_size = offsets.last();
+ Array<float3> normals(total_size);
+
+ threading::parallel_for(splines.index_range(), 128, [&](IndexRange range) {
+ for (const int i : range) {
+ const Spline &spline = *splines[i];
+ MutableSpan spline_normals{normals.as_mutable_span().slice(offsets[i], spline.size())};
+ switch (splines[i]->type()) {
+ case Spline::Type::Bezier:
+ calculate_bezier_normals(static_cast<const BezierSpline &>(spline), spline_normals);
+ break;
+ case Spline::Type::Poly:
+ calculate_poly_normals(static_cast<const PolySpline &>(spline), spline_normals);
+ break;
+ case Spline::Type::NURBS:
+ calculate_nurbs_normals(static_cast<const NURBSpline &>(spline), spline_normals);
+ break;
+ }
+ }
+ });
+ return normals;
+}
+
+VArray<float3> curve_normals_varray(const CurveComponent &component, const AttributeDomain domain)
+{
+ const CurveEval *curve = component.get_for_read();
+ if (curve == nullptr) {
+ return nullptr;
+ }
+
+ if (domain == ATTR_DOMAIN_POINT) {
+ const Span<SplinePtr> splines = curve->splines();
+
+ /* Use a reference to evaluated normals if possible to avoid an allocation and a copy.
+ * This is only possible when there is only one poly spline. */
+ if (splines.size() == 1 && splines.first()->type() == Spline::Type::Poly) {
+ const PolySpline &spline = static_cast<PolySpline &>(*splines.first());
+ return VArray<float3>::ForSpan(spline.evaluated_normals());
+ }
+
+ Array<float3> normals = curve_normal_point_domain(*curve);
+ return VArray<float3>::ForContainer(std::move(normals));
+ }
+
+ if (domain == ATTR_DOMAIN_CURVE) {
+ Array<float3> point_normals = curve_normal_point_domain(*curve);
+ VArray<float3> varray = VArray<float3>::ForContainer(std::move(point_normals));
+ return component.attribute_try_adapt_domain<float3>(
+ std::move(varray), ATTR_DOMAIN_POINT, ATTR_DOMAIN_CURVE);
+ }
+
+ return nullptr;
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
/** \name Builtin Spline Attributes
*
* Attributes with a value for every spline, stored contiguously or in every spline separately.
diff --git a/source/blender/blenkernel/intern/geometry_component_instances.cc b/source/blender/blenkernel/intern/geometry_component_instances.cc
index 62d66f13e9f..b411c793298 100644
--- a/source/blender/blenkernel/intern/geometry_component_instances.cc
+++ b/source/blender/blenkernel/intern/geometry_component_instances.cc
@@ -17,6 +17,7 @@
#include <mutex>
#include "BLI_float4x4.hh"
+#include "BLI_index_mask.hh"
#include "BLI_map.hh"
#include "BLI_rand.hh"
#include "BLI_set.hh"
@@ -26,12 +27,17 @@
#include "DNA_collection_types.h"
+#include "BKE_attribute_access.hh"
+#include "BKE_attribute_math.hh"
#include "BKE_geometry_set.hh"
#include "BKE_geometry_set_instances.hh"
#include "attribute_access_intern.hh"
+#include "FN_cpp_type_make.hh"
+
using blender::float4x4;
+using blender::IndexMask;
using blender::Map;
using blender::MutableSpan;
using blender::Set;
@@ -39,6 +45,8 @@ using blender::Span;
using blender::VectorSet;
using blender::fn::GSpan;
+MAKE_CPP_TYPE(InstanceReference, InstanceReference, CPPTypeFlags::None)
+
/* -------------------------------------------------------------------- */
/** \name Geometry Component Implementation
* \{ */
@@ -128,6 +136,62 @@ blender::Span<InstanceReference> InstancesComponent::references() const
return references_;
}
+template<typename T>
+static void copy_data_based_on_mask(Span<T> src, MutableSpan<T> dst, IndexMask mask)
+{
+ BLI_assert(src.data() != dst.data());
+ using namespace blender;
+ threading::parallel_for(mask.index_range(), 1024, [&](IndexRange range) {
+ for (const int i : range) {
+ dst[i] = src[mask[i]];
+ }
+ });
+}
+
+void InstancesComponent::remove_instances(const IndexMask mask)
+{
+ using namespace blender;
+ if (mask.is_range() && mask.as_range().start() == 0) {
+ /* Deleting from the end of the array can be much faster since no data has to be shifted. */
+ this->resize(mask.size());
+ this->remove_unused_references();
+ return;
+ }
+
+ Vector<int> new_handles(mask.size());
+ copy_data_based_on_mask<int>(this->instance_reference_handles(), new_handles, mask);
+ instance_reference_handles_ = std::move(new_handles);
+ Vector<float4x4> new_transforms(mask.size());
+ copy_data_based_on_mask<float4x4>(this->instance_transforms(), new_transforms, mask);
+ instance_transforms_ = std::move(new_transforms);
+
+ const bke::CustomDataAttributes &src_attributes = attributes_;
+
+ bke::CustomDataAttributes dst_attributes;
+ dst_attributes.reallocate(mask.size());
+
+ src_attributes.foreach_attribute(
+ [&](const bke::AttributeIDRef &id, const AttributeMetaData &meta_data) {
+ if (!id.should_be_kept()) {
+ return true;
+ }
+
+ GSpan src = *src_attributes.get_for_read(id);
+ dst_attributes.create(id, meta_data.data_type);
+ fn::GMutableSpan dst = *dst_attributes.get_for_write(id);
+
+ attribute_math::convert_to_static_type(src.type(), [&](auto dummy) {
+ using T = decltype(dummy);
+ copy_data_based_on_mask<T>(src.typed<T>(), dst.typed<T>(), mask);
+ });
+ return true;
+ },
+ ATTR_DOMAIN_INSTANCE);
+
+ attributes_ = std::move(dst_attributes);
+ this->remove_unused_references();
+}
+
void InstancesComponent::remove_unused_references()
{
using namespace blender;
diff --git a/source/blender/blenkernel/intern/geometry_component_mesh.cc b/source/blender/blenkernel/intern/geometry_component_mesh.cc
index cc15e6d7b84..2509448d8aa 100644
--- a/source/blender/blenkernel/intern/geometry_component_mesh.cc
+++ b/source/blender/blenkernel/intern/geometry_component_mesh.cc
@@ -15,6 +15,7 @@
*/
#include "BLI_listbase.h"
+#include "BLI_task.hh"
#include "DNA_mesh_types.h"
#include "DNA_meshdata_types.h"
@@ -123,6 +124,61 @@ void MeshComponent::ensure_owns_direct_data()
/** \} */
/* -------------------------------------------------------------------- */
+/** \name Mesh Normals Field Input
+ * \{ */
+
+namespace blender::bke {
+
+VArray<float3> mesh_normals_varray(const MeshComponent &mesh_component,
+ const Mesh &mesh,
+ const IndexMask mask,
+ const AttributeDomain domain)
+{
+ switch (domain) {
+ case ATTR_DOMAIN_FACE: {
+ return VArray<float3>::ForSpan(
+ {(float3 *)BKE_mesh_poly_normals_ensure(&mesh), mesh.totpoly});
+ }
+ case ATTR_DOMAIN_POINT: {
+ return VArray<float3>::ForSpan(
+ {(float3 *)BKE_mesh_vertex_normals_ensure(&mesh), mesh.totvert});
+ }
+ case ATTR_DOMAIN_EDGE: {
+ /* In this case, start with vertex normals and convert to the edge domain, since the
+ * conversion from edges to vertices is very simple. Use "manual" domain interpolation
+ * instead of the GeometryComponent API to avoid calculating unnecessary values and to
+ * allow normalizing the result more simply. */
+ Span<float3> vert_normals{(float3 *)BKE_mesh_vertex_normals_ensure(&mesh), mesh.totvert};
+ Array<float3> edge_normals(mask.min_array_size());
+ Span<MEdge> edges{mesh.medge, mesh.totedge};
+ for (const int i : mask) {
+ const MEdge &edge = edges[i];
+ edge_normals[i] = math::normalize(
+ math::interpolate(vert_normals[edge.v1], vert_normals[edge.v2], 0.5f));
+ }
+
+ return VArray<float3>::ForContainer(std::move(edge_normals));
+ }
+ case ATTR_DOMAIN_CORNER: {
+ /* The normals on corners are just the mesh's face normals, so start with the face normal
+ * array and copy the face normal for each of its corners. In this case using the mesh
+ * component's generic domain interpolation is fine, the data will still be normalized,
+ * since the face normal is just copied to every corner. */
+ return mesh_component.attribute_try_adapt_domain(
+ VArray<float3>::ForSpan({(float3 *)BKE_mesh_poly_normals_ensure(&mesh), mesh.totpoly}),
+ ATTR_DOMAIN_FACE,
+ ATTR_DOMAIN_CORNER);
+ }
+ default:
+ return {};
+ }
+}
+
+} // namespace blender::bke
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
/** \name Attribute Access
* \{ */
@@ -211,89 +267,54 @@ static GVArray adapt_mesh_domain_corner_to_point(const Mesh &mesh, const GVArray
/**
* Each corner's value is simply a copy of the value at its vertex.
- *
- * \note Theoretically this interpolation does not need to compute all values at once.
- * However, doing that makes the implementation simpler, and this can be optimized in the future if
- * only some values are required.
*/
-template<typename T>
-static void adapt_mesh_domain_point_to_corner_impl(const Mesh &mesh,
- const VArray<T> &old_values,
- MutableSpan<T> r_values)
-{
- BLI_assert(r_values.size() == mesh.totloop);
-
- for (const int loop_index : IndexRange(mesh.totloop)) {
- const int vertex_index = mesh.mloop[loop_index].v;
- r_values[loop_index] = old_values[vertex_index];
- }
-}
-
static GVArray adapt_mesh_domain_point_to_corner(const Mesh &mesh, const GVArray &varray)
{
GVArray new_varray;
attribute_math::convert_to_static_type(varray.type(), [&](auto dummy) {
using T = decltype(dummy);
- Array<T> values(mesh.totloop);
- adapt_mesh_domain_point_to_corner_impl<T>(mesh, varray.typed<T>(), values);
- new_varray = VArray<T>::ForContainer(std::move(values));
+ new_varray = VArray<T>::ForFunc(mesh.totloop,
+ [mesh, varray = varray.typed<T>()](const int64_t loop_index) {
+ const int vertex_index = mesh.mloop[loop_index].v;
+ return varray[vertex_index];
+ });
});
return new_varray;
}
-/**
- * \note Theoretically this interpolation does not need to compute all values at once.
- * However, doing that makes the implementation simpler, and this can be optimized in the future if
- * only some values are required.
- */
-template<typename T>
-static void adapt_mesh_domain_corner_to_face_impl(const Mesh &mesh,
- const VArray<T> &old_values,
- MutableSpan<T> r_values)
-{
- BLI_assert(r_values.size() == mesh.totpoly);
- attribute_math::DefaultMixer<T> mixer(r_values);
-
- for (const int poly_index : IndexRange(mesh.totpoly)) {
- const MPoly &poly = mesh.mpoly[poly_index];
- for (const int loop_index : IndexRange(poly.loopstart, poly.totloop)) {
- const T value = old_values[loop_index];
- mixer.mix_in(poly_index, value);
- }
- }
-
- mixer.finalize();
-}
-
-/* A face is selected if all of its corners were selected. */
-template<>
-void adapt_mesh_domain_corner_to_face_impl(const Mesh &mesh,
- const VArray<bool> &old_values,
- MutableSpan<bool> r_values)
-{
- BLI_assert(r_values.size() == mesh.totpoly);
-
- r_values.fill(true);
- for (const int poly_index : IndexRange(mesh.totpoly)) {
- const MPoly &poly = mesh.mpoly[poly_index];
- for (const int loop_index : IndexRange(poly.loopstart, poly.totloop)) {
- if (!old_values[loop_index]) {
- r_values[poly_index] = false;
- break;
- }
- }
- }
-}
-
static GVArray adapt_mesh_domain_corner_to_face(const Mesh &mesh, const GVArray &varray)
{
GVArray new_varray;
attribute_math::convert_to_static_type(varray.type(), [&](auto dummy) {
using T = decltype(dummy);
if constexpr (!std::is_void_v<attribute_math::DefaultMixer<T>>) {
- Array<T> values(mesh.totpoly);
- adapt_mesh_domain_corner_to_face_impl<T>(mesh, varray.typed<T>(), values);
- new_varray = VArray<T>::ForContainer(std::move(values));
+ if constexpr (std::is_same_v<T, bool>) {
+ new_varray = VArray<T>::ForFunc(
+ mesh.totpoly, [mesh, varray = varray.typed<bool>()](const int face_index) {
+ /* A face is selected if all of its corners were selected. */
+ const MPoly &poly = mesh.mpoly[face_index];
+ for (const int loop_index : IndexRange(poly.loopstart, poly.totloop)) {
+ if (!varray[loop_index]) {
+ return false;
+ }
+ }
+ return true;
+ });
+ }
+ else {
+ new_varray = VArray<T>::ForFunc(
+ mesh.totpoly, [mesh, varray = varray.typed<T>()](const int face_index) {
+ T return_value;
+ attribute_math::DefaultMixer<T> mixer({&return_value, 1});
+ const MPoly &poly = mesh.mpoly[face_index];
+ for (const int loop_index : IndexRange(poly.loopstart, poly.totloop)) {
+ const T value = varray[loop_index];
+ mixer.mix_in(0, value);
+ }
+ mixer.finalize();
+ return return_value;
+ });
+ }
}
});
return new_varray;
@@ -351,11 +372,13 @@ void adapt_mesh_domain_corner_to_edge_impl(const Mesh &mesh,
}
/* Deselect loose edges without corners that are still selected from the 'true' default. */
- for (const int edge_index : IndexRange(mesh.totedge)) {
- if (loose_edges[edge_index]) {
- r_values[edge_index] = false;
+ threading::parallel_for(IndexRange(mesh.totedge), 2048, [&](const IndexRange range) {
+ for (const int edge_index : range) {
+ if (loose_edges[edge_index]) {
+ r_values[edge_index] = false;
+ }
}
- }
+ });
}
static GVArray adapt_mesh_domain_corner_to_edge(const Mesh &mesh, const GVArray &varray)
@@ -436,11 +459,13 @@ void adapt_mesh_domain_face_to_corner_impl(const Mesh &mesh,
{
BLI_assert(r_values.size() == mesh.totloop);
- for (const int poly_index : IndexRange(mesh.totpoly)) {
- const MPoly &poly = mesh.mpoly[poly_index];
- MutableSpan<T> poly_corner_values = r_values.slice(poly.loopstart, poly.totloop);
- poly_corner_values.fill(old_values[poly_index]);
- }
+ threading::parallel_for(IndexRange(mesh.totpoly), 1024, [&](const IndexRange range) {
+ for (const int poly_index : range) {
+ const MPoly &poly = mesh.mpoly[poly_index];
+ MutableSpan<T> poly_corner_values = r_values.slice(poly.loopstart, poly.totloop);
+ poly_corner_values.fill(old_values[poly_index]);
+ }
+ });
}
static GVArray adapt_mesh_domain_face_to_corner(const Mesh &mesh, const GVArray &varray)
@@ -511,111 +536,72 @@ static GVArray adapt_mesh_domain_face_to_edge(const Mesh &mesh, const GVArray &v
return new_varray;
}
-/**
- * \note Theoretically this interpolation does not need to compute all values at once.
- * However, doing that makes the implementation simpler, and this can be optimized in the future if
- * only some values are required.
- */
-template<typename T>
-static void adapt_mesh_domain_point_to_face_impl(const Mesh &mesh,
- const VArray<T> &old_values,
- MutableSpan<T> r_values)
-{
- BLI_assert(r_values.size() == mesh.totpoly);
- attribute_math::DefaultMixer<T> mixer(r_values);
-
- for (const int poly_index : IndexRange(mesh.totpoly)) {
- const MPoly &poly = mesh.mpoly[poly_index];
- for (const int loop_index : IndexRange(poly.loopstart, poly.totloop)) {
- MLoop &loop = mesh.mloop[loop_index];
- const int point_index = loop.v;
- mixer.mix_in(poly_index, old_values[point_index]);
- }
- }
- mixer.finalize();
-}
-
-/* A face is selected if all of its vertices were selected too. */
-template<>
-void adapt_mesh_domain_point_to_face_impl(const Mesh &mesh,
- const VArray<bool> &old_values,
- MutableSpan<bool> r_values)
-{
- BLI_assert(r_values.size() == mesh.totpoly);
-
- r_values.fill(true);
- for (const int poly_index : IndexRange(mesh.totpoly)) {
- const MPoly &poly = mesh.mpoly[poly_index];
- for (const int loop_index : IndexRange(poly.loopstart, poly.totloop)) {
- MLoop &loop = mesh.mloop[loop_index];
- const int vert_index = loop.v;
- if (!old_values[vert_index]) {
- r_values[poly_index] = false;
- break;
- }
- }
- }
-}
-
static GVArray adapt_mesh_domain_point_to_face(const Mesh &mesh, const GVArray &varray)
{
GVArray new_varray;
attribute_math::convert_to_static_type(varray.type(), [&](auto dummy) {
using T = decltype(dummy);
if constexpr (!std::is_void_v<attribute_math::DefaultMixer<T>>) {
- Array<T> values(mesh.totpoly);
- adapt_mesh_domain_point_to_face_impl<T>(mesh, varray.typed<T>(), values);
- new_varray = VArray<T>::ForContainer(std::move(values));
+ if constexpr (std::is_same_v<T, bool>) {
+ new_varray = VArray<T>::ForFunc(
+ mesh.totpoly, [mesh, varray = varray.typed<bool>()](const int face_index) {
+ /* A face is selected if all of its vertices were selected. */
+ const MPoly &poly = mesh.mpoly[face_index];
+ for (const int loop_index : IndexRange(poly.loopstart, poly.totloop)) {
+ const MLoop &loop = mesh.mloop[loop_index];
+ if (!varray[loop.v]) {
+ return false;
+ }
+ }
+ return true;
+ });
+ }
+ else {
+ new_varray = VArray<T>::ForFunc(
+ mesh.totpoly, [mesh, varray = varray.typed<T>()](const int face_index) {
+ T return_value;
+ attribute_math::DefaultMixer<T> mixer({&return_value, 1});
+ const MPoly &poly = mesh.mpoly[face_index];
+ for (const int loop_index : IndexRange(poly.loopstart, poly.totloop)) {
+ const MLoop &loop = mesh.mloop[loop_index];
+ const T value = varray[loop.v];
+ mixer.mix_in(0, value);
+ }
+ mixer.finalize();
+ return return_value;
+ });
+ }
}
});
return new_varray;
}
-/**
- * \note Theoretically this interpolation does not need to compute all values at once.
- * However, doing that makes the implementation simpler, and this can be optimized in the future if
- * only some values are required.
- */
-template<typename T>
-static void adapt_mesh_domain_point_to_edge_impl(const Mesh &mesh,
- const VArray<T> &old_values,
- MutableSpan<T> r_values)
-{
- BLI_assert(r_values.size() == mesh.totedge);
- attribute_math::DefaultMixer<T> mixer(r_values);
-
- for (const int edge_index : IndexRange(mesh.totedge)) {
- const MEdge &edge = mesh.medge[edge_index];
- mixer.mix_in(edge_index, old_values[edge.v1]);
- mixer.mix_in(edge_index, old_values[edge.v2]);
- }
-
- mixer.finalize();
-}
-
-/* An edge is selected if both of its vertices were selected. */
-template<>
-void adapt_mesh_domain_point_to_edge_impl(const Mesh &mesh,
- const VArray<bool> &old_values,
- MutableSpan<bool> r_values)
-{
- BLI_assert(r_values.size() == mesh.totedge);
-
- for (const int edge_index : IndexRange(mesh.totedge)) {
- const MEdge &edge = mesh.medge[edge_index];
- r_values[edge_index] = old_values[edge.v1] && old_values[edge.v2];
- }
-}
-
static GVArray adapt_mesh_domain_point_to_edge(const Mesh &mesh, const GVArray &varray)
{
GVArray new_varray;
attribute_math::convert_to_static_type(varray.type(), [&](auto dummy) {
using T = decltype(dummy);
if constexpr (!std::is_void_v<attribute_math::DefaultMixer<T>>) {
- Array<T> values(mesh.totedge);
- adapt_mesh_domain_point_to_edge_impl<T>(mesh, varray.typed<T>(), values);
- new_varray = VArray<T>::ForContainer(std::move(values));
+ if constexpr (std::is_same_v<T, bool>) {
+ /* An edge is selected if both of its vertices were selected. */
+ new_varray = VArray<bool>::ForFunc(
+ mesh.totedge, [mesh, varray = varray.typed<bool>()](const int edge_index) {
+ const MEdge &edge = mesh.medge[edge_index];
+ return varray[edge.v1] && varray[edge.v2];
+ });
+ }
+ else {
+ new_varray = VArray<T>::ForFunc(
+ mesh.totedge, [mesh, varray = varray.typed<T>()](const int edge_index) {
+ T return_value;
+ attribute_math::DefaultMixer<T> mixer({&return_value, 1});
+ const MEdge &edge = mesh.medge[edge_index];
+ mixer.mix_in(0, varray[edge.v1]);
+ mixer.mix_in(0, varray[edge.v2]);
+ mixer.finalize();
+ return return_value;
+ });
+ }
}
});
return new_varray;
@@ -732,61 +718,41 @@ static GVArray adapt_mesh_domain_edge_to_point(const Mesh &mesh, const GVArray &
return new_varray;
}
-/**
- * \note Theoretically this interpolation does not need to compute all values at once.
- * However, doing that makes the implementation simpler, and this can be optimized in the future if
- * only some values are required.
- */
-template<typename T>
-static void adapt_mesh_domain_edge_to_face_impl(const Mesh &mesh,
- const VArray<T> &old_values,
- MutableSpan<T> r_values)
-{
- BLI_assert(r_values.size() == mesh.totpoly);
- attribute_math::DefaultMixer<T> mixer(r_values);
-
- for (const int poly_index : IndexRange(mesh.totpoly)) {
- const MPoly &poly = mesh.mpoly[poly_index];
- for (const int loop_index : IndexRange(poly.loopstart, poly.totloop)) {
- const MLoop &loop = mesh.mloop[loop_index];
- mixer.mix_in(poly_index, old_values[loop.e]);
- }
- }
-
- mixer.finalize();
-}
-
-/* A face is selected if all of its edges are selected. */
-template<>
-void adapt_mesh_domain_edge_to_face_impl(const Mesh &mesh,
- const VArray<bool> &old_values,
- MutableSpan<bool> r_values)
-{
- BLI_assert(r_values.size() == mesh.totpoly);
-
- r_values.fill(true);
- for (const int poly_index : IndexRange(mesh.totpoly)) {
- const MPoly &poly = mesh.mpoly[poly_index];
- for (const int loop_index : IndexRange(poly.loopstart, poly.totloop)) {
- const MLoop &loop = mesh.mloop[loop_index];
- const int edge_index = loop.e;
- if (!old_values[edge_index]) {
- r_values[poly_index] = false;
- break;
- }
- }
- }
-}
-
static GVArray adapt_mesh_domain_edge_to_face(const Mesh &mesh, const GVArray &varray)
{
GVArray new_varray;
attribute_math::convert_to_static_type(varray.type(), [&](auto dummy) {
using T = decltype(dummy);
if constexpr (!std::is_void_v<attribute_math::DefaultMixer<T>>) {
- Array<T> values(mesh.totpoly);
- adapt_mesh_domain_edge_to_face_impl<T>(mesh, varray.typed<T>(), values);
- new_varray = VArray<T>::ForContainer(std::move(values));
+ if constexpr (std::is_same_v<T, bool>) {
+ /* A face is selected if all of its edges are selected. */
+ new_varray = VArray<bool>::ForFunc(
+ mesh.totpoly, [mesh, varray = varray.typed<T>()](const int face_index) {
+ const MPoly &poly = mesh.mpoly[face_index];
+ for (const int loop_index : IndexRange(poly.loopstart, poly.totloop)) {
+ const MLoop &loop = mesh.mloop[loop_index];
+ if (!varray[loop.e]) {
+ return false;
+ }
+ }
+ return true;
+ });
+ }
+ else {
+ new_varray = VArray<T>::ForFunc(
+ mesh.totpoly, [mesh, varray = varray.typed<T>()](const int face_index) {
+ T return_value;
+ attribute_math::DefaultMixer<T> mixer({&return_value, 1});
+ const MPoly &poly = mesh.mpoly[face_index];
+ for (const int loop_index : IndexRange(poly.loopstart, poly.totloop)) {
+ const MLoop &loop = mesh.mloop[loop_index];
+ const T value = varray[loop.e];
+ mixer.mix_in(0, value);
+ }
+ mixer.finalize();
+ return return_value;
+ });
+ }
}
});
return new_varray;
@@ -1155,25 +1121,10 @@ class NormalAttributeProvider final : public BuiltinAttributeProvider {
{
const MeshComponent &mesh_component = static_cast<const MeshComponent &>(component);
const Mesh *mesh = mesh_component.get_for_read();
- if (mesh == nullptr) {
+ if (mesh == nullptr || mesh->totpoly == 0) {
return {};
}
-
- /* Use existing normals if possible. */
- if (!(mesh->runtime.cd_dirty_poly & CD_MASK_NORMAL) &&
- CustomData_has_layer(&mesh->pdata, CD_NORMAL)) {
- const void *data = CustomData_get_layer(&mesh->pdata, CD_NORMAL);
-
- return VArray<float3>::ForSpan(Span<float3>((const float3 *)data, mesh->totpoly));
- }
-
- Array<float3> normals(mesh->totpoly);
- for (const int i : IndexRange(mesh->totpoly)) {
- const MPoly *poly = &mesh->mpoly[i];
- BKE_mesh_calc_poly_normal(poly, &mesh->mloop[poly->loopstart], mesh->mvert, normals[i]);
- }
-
- return VArray<float3>::ForContainer(std::move(normals));
+ return VArray<float3>::ForSpan({(float3 *)BKE_mesh_poly_normals_ensure(mesh), mesh->totpoly});
}
WriteAttributeLookup try_get_for_write(GeometryComponent &UNUSED(component)) const final
diff --git a/source/blender/blenkernel/intern/geometry_set.cc b/source/blender/blenkernel/intern/geometry_set.cc
index ef5609ec9a8..c1e386c626b 100644
--- a/source/blender/blenkernel/intern/geometry_set.cc
+++ b/source/blender/blenkernel/intern/geometry_set.cc
@@ -17,6 +17,8 @@
#include "BLI_map.hh"
#include "BLI_task.hh"
+#include "BLT_translation.h"
+
#include "BKE_attribute.h"
#include "BKE_attribute_access.hh"
#include "BKE_geometry_set.hh"
@@ -183,25 +185,27 @@ Vector<const GeometryComponent *> GeometrySet::get_components_for_read() const
return components;
}
-void GeometrySet::compute_boundbox_without_instances(float3 *r_min, float3 *r_max) const
+bool GeometrySet::compute_boundbox_without_instances(float3 *r_min, float3 *r_max) const
{
+ bool have_minmax = false;
const PointCloud *pointcloud = this->get_pointcloud_for_read();
if (pointcloud != nullptr) {
- BKE_pointcloud_minmax(pointcloud, *r_min, *r_max);
+ have_minmax |= BKE_pointcloud_minmax(pointcloud, *r_min, *r_max);
}
const Mesh *mesh = this->get_mesh_for_read();
if (mesh != nullptr) {
- BKE_mesh_wrapper_minmax(mesh, *r_min, *r_max);
+ have_minmax |= BKE_mesh_wrapper_minmax(mesh, *r_min, *r_max);
}
const Volume *volume = this->get_volume_for_read();
if (volume != nullptr) {
- BKE_volume_min_max(volume, *r_min, *r_max);
+ have_minmax |= BKE_volume_min_max(volume, *r_min, *r_max);
}
const CurveEval *curve = this->get_curve_for_read();
if (curve != nullptr) {
/* Using the evaluated positions is somewhat arbitrary, but it is probably expected. */
- curve->bounds_min_max(*r_min, *r_max, true);
+ have_minmax |= curve->bounds_min_max(*r_min, *r_max, true);
}
+ return have_minmax;
}
std::ostream &operator<<(std::ostream &stream, const GeometrySet &geometry_set)
@@ -566,6 +570,48 @@ void GeometrySet::modify_geometry_sets(ForeachSubGeometryCallback callback)
/** \} */
/* -------------------------------------------------------------------- */
+/** \name Mesh and Curve Normals Field Input
+ * \{ */
+
+namespace blender::bke {
+
+GVArray NormalFieldInput::get_varray_for_context(const GeometryComponent &component,
+ const AttributeDomain domain,
+ IndexMask mask) const
+{
+ if (component.type() == GEO_COMPONENT_TYPE_MESH) {
+ const MeshComponent &mesh_component = static_cast<const MeshComponent &>(component);
+ if (const Mesh *mesh = mesh_component.get_for_read()) {
+ return mesh_normals_varray(mesh_component, *mesh, mask, domain);
+ }
+ }
+ else if (component.type() == GEO_COMPONENT_TYPE_CURVE) {
+ const CurveComponent &curve_component = static_cast<const CurveComponent &>(component);
+ return curve_normals_varray(curve_component, domain);
+ }
+ return {};
+}
+
+std::string NormalFieldInput::socket_inspection_name() const
+{
+ return TIP_("Normal");
+}
+
+uint64_t NormalFieldInput::hash() const
+{
+ return 213980475983;
+}
+
+bool NormalFieldInput::is_equal_to(const fn::FieldNode &other) const
+{
+ return dynamic_cast<const NormalFieldInput *>(&other) != nullptr;
+}
+
+} // namespace blender::bke
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
/** \name C API
* \{ */
diff --git a/source/blender/blenkernel/intern/geometry_set_instances.cc b/source/blender/blenkernel/intern/geometry_set_instances.cc
index 4d84d5d899d..42d2211c360 100644
--- a/source/blender/blenkernel/intern/geometry_set_instances.cc
+++ b/source/blender/blenkernel/intern/geometry_set_instances.cc
@@ -69,9 +69,18 @@ GeometrySet object_get_evaluated_geometry_set(const Object &object)
}
/* Otherwise, construct a new geometry set with the component based on the object type. */
- GeometrySet geometry_set;
if (object.type == OB_MESH) {
+ GeometrySet geometry_set;
add_final_mesh_as_geometry_component(object, geometry_set);
+ return geometry_set;
+ }
+ if (object.type == OB_EMPTY && object.instance_collection != nullptr) {
+ GeometrySet geometry_set;
+ Collection &collection = *object.instance_collection;
+ InstancesComponent &instances = geometry_set.get_component_for_write<InstancesComponent>();
+ const int handle = instances.add_reference(collection);
+ instances.add_instance(handle, float4x4::identity());
+ return geometry_set;
}
/* TODO: Cover the case of point clouds without modifiers-- they may not be covered by the
@@ -80,7 +89,7 @@ GeometrySet object_get_evaluated_geometry_set(const Object &object)
/* TODO: Add volume support. */
/* Return by value since there is not always an existing geometry set owned elsewhere to use. */
- return geometry_set;
+ return {};
}
static void geometry_set_collect_recursive_collection_instance(
@@ -98,13 +107,6 @@ static void geometry_set_collect_recursive_object(const Object &object,
{
GeometrySet instance_geometry_set = object_get_evaluated_geometry_set(object);
geometry_set_collect_recursive(instance_geometry_set, transform, r_sets);
-
- if (object.type == OB_EMPTY) {
- const Collection *collection_instance = object.instance_collection;
- if (collection_instance != nullptr) {
- geometry_set_collect_recursive_collection_instance(*collection_instance, transform, r_sets);
- }
- }
}
static void geometry_set_collect_recursive_collection(const Collection &collection,
diff --git a/source/blender/blenkernel/intern/gpencil_geom.cc b/source/blender/blenkernel/intern/gpencil_geom.cc
index b5190f598c6..9abdbceec61 100644
--- a/source/blender/blenkernel/intern/gpencil_geom.cc
+++ b/source/blender/blenkernel/intern/gpencil_geom.cc
@@ -33,10 +33,10 @@
#include "BLI_array_utils.h"
#include "BLI_blenlib.h"
-#include "BLI_float3.hh"
#include "BLI_ghash.h"
#include "BLI_hash.h"
#include "BLI_heap.h"
+#include "BLI_math_vec_types.hh"
#include "BLI_math_vector.h"
#include "BLI_polyfill_2d.h"
#include "BLI_span.hh"
@@ -60,6 +60,7 @@
#include "BKE_gpencil_geom.h"
#include "BKE_main.h"
#include "BKE_material.h"
+#include "BKE_mesh.h"
#include "BKE_object.h"
#include "DEG_depsgraph_query.h"
@@ -145,7 +146,7 @@ void BKE_gpencil_stroke_boundingbox_calc(bGPDstroke *gps)
static void boundbox_gpencil(Object *ob)
{
if (ob->runtime.bb == nullptr) {
- ob->runtime.bb = (BoundBox *)MEM_callocN(sizeof(BoundBox), "GPencil boundbox");
+ ob->runtime.bb = MEM_cnew<BoundBox>("GPencil boundbox");
}
BoundBox *bb = ob->runtime.bb;
@@ -182,7 +183,7 @@ BoundBox *BKE_gpencil_boundbox_get(Object *ob)
* to keep both values synchronized. */
if (!ELEM(ob_orig, nullptr, ob)) {
if (ob_orig->runtime.bb == nullptr) {
- ob_orig->runtime.bb = (BoundBox *)MEM_callocN(sizeof(BoundBox), "GPencil boundbox");
+ ob_orig->runtime.bb = MEM_cnew<BoundBox>("GPencil boundbox");
}
for (int i = 0; i < 8; i++) {
copy_v3_v3(ob_orig->runtime.bb->vec[i], ob->runtime.bb->vec[i]);
@@ -364,7 +365,7 @@ static void stroke_defvert_create_nr_list(MDeformVert *dv_list,
}
}
if (!found) {
- ld = (LinkData *)MEM_callocN(sizeof(LinkData), "def_nr_item");
+ ld = MEM_cnew<LinkData>("def_nr_item");
ld->data = POINTER_FROM_INT(dw->def_nr);
BLI_addtail(result, ld);
tw++;
@@ -2350,6 +2351,7 @@ static void gpencil_generate_edgeloops(Object *ob,
if (me->totedge == 0) {
return;
}
+ const float(*vert_normals)[3] = BKE_mesh_vertex_normals_ensure(me);
/* Arrays for all edge vertices (forward and backward) that form a edge loop.
* This is reused for each edge-loop to create gpencil stroke. */
@@ -2364,13 +2366,13 @@ static void gpencil_generate_edgeloops(Object *ob,
MEdge *ed = &me->medge[i];
gped = &gp_edges[i];
MVert *mv1 = &me->mvert[ed->v1];
- normal_short_to_float_v3(gped->n1, mv1->no);
+ copy_v3_v3(gped->n1, vert_normals[ed->v1]);
gped->v1 = ed->v1;
copy_v3_v3(gped->v1_co, mv1->co);
MVert *mv2 = &me->mvert[ed->v2];
- normal_short_to_float_v3(gped->n2, mv2->no);
+ copy_v3_v3(gped->n2, vert_normals[ed->v2]);
gped->v2 = ed->v2;
copy_v3_v3(gped->v2_co, mv2->co);
@@ -2439,7 +2441,7 @@ static void gpencil_generate_edgeloops(Object *ob,
/* Add segment. */
bGPDspoint *pt = &gps_stroke->points[i];
- normal_short_to_float_v3(fpt, mv->no);
+ copy_v3_v3(fpt, vert_normals[vertex_index]);
mul_v3_v3fl(fpt, fpt, offset);
add_v3_v3v3(&pt->x, mv->co, fpt);
mul_m4_v3(matrix, &pt->x);
@@ -3482,7 +3484,7 @@ struct tSampleEdge {
/* Helper: creates a tSamplePoint from a bGPDspoint and (optionally) a MDeformVert. */
static tSamplePoint *new_sample_point_from_gp_point(const bGPDspoint *pt, const MDeformVert *dvert)
{
- tSamplePoint *new_pt = (tSamplePoint *)MEM_callocN(sizeof(tSamplePoint), __func__);
+ tSamplePoint *new_pt = MEM_cnew<tSamplePoint>(__func__);
copy_v3_v3(&new_pt->x, &pt->x);
new_pt->pressure = pt->pressure;
new_pt->strength = pt->strength;
@@ -3505,7 +3507,7 @@ static tSamplePoint *new_sample_point_from_gp_point(const bGPDspoint *pt, const
* the edge. */
static tSampleEdge *new_sample_edge_from_sample_points(tSamplePoint *from, tSamplePoint *to)
{
- tSampleEdge *new_edge = (tSampleEdge *)MEM_callocN(sizeof(tSampleEdge), __func__);
+ tSampleEdge *new_edge = MEM_cnew<tSampleEdge>(__func__);
new_edge->from = from;
new_edge->to = to;
new_edge->length_sq = len_squared_v3v3(&from->x, &to->x);
@@ -3561,7 +3563,7 @@ void BKE_gpencil_stroke_uniform_subdivide(bGPdata *gpd,
tSamplePoint *sp_next = se->to;
/* Subdivide the edge. */
- tSamplePoint *new_sp = (tSamplePoint *)MEM_callocN(sizeof(tSamplePoint), __func__);
+ tSamplePoint *new_sp = MEM_cnew<tSamplePoint>(__func__);
interp_v3_v3v3(&new_sp->x, &sp->x, &sp_next->x, 0.5f);
new_sp->pressure = interpf(sp->pressure, sp_next->pressure, 0.5f);
new_sp->strength = interpf(sp->strength, sp_next->strength, 0.5f);
@@ -3687,7 +3689,7 @@ struct tPerimeterPoint {
static tPerimeterPoint *new_perimeter_point(const float pt[3])
{
- tPerimeterPoint *new_pt = (tPerimeterPoint *)MEM_callocN(sizeof(tPerimeterPoint), __func__);
+ tPerimeterPoint *new_pt = MEM_cnew<tPerimeterPoint>(__func__);
copy_v3_v3(&new_pt->x, pt);
return new_pt;
}
@@ -3856,8 +3858,8 @@ static ListBase *gpencil_stroke_perimeter_ex(const bGPdata *gpd,
float defaultpixsize = 1000.0f / gpd->pixfactor;
float stroke_radius = ((gps->thickness + gpl->line_change) / defaultpixsize) / 2.0f;
- ListBase *perimeter_right_side = (ListBase *)MEM_callocN(sizeof(ListBase), __func__);
- ListBase *perimeter_left_side = (ListBase *)MEM_callocN(sizeof(ListBase), __func__);
+ ListBase *perimeter_right_side = MEM_cnew<ListBase>(__func__);
+ ListBase *perimeter_left_side = MEM_cnew<ListBase>(__func__);
int num_perimeter_points = 0;
bGPDspoint *first = &gps->points[0];
diff --git a/source/blender/blenkernel/intern/gpencil_modifier.c b/source/blender/blenkernel/intern/gpencil_modifier.c
index 62604286b43..74db151261f 100644
--- a/source/blender/blenkernel/intern/gpencil_modifier.c
+++ b/source/blender/blenkernel/intern/gpencil_modifier.c
@@ -79,10 +79,6 @@ static GpencilVirtualModifierData virtualModifierCommonData;
* each loop over all the geometry being evaluated.
*/
-/**
- * Init grease pencil cache deform data.
- * \param ob: Grease pencil object
- */
void BKE_gpencil_cache_data_init(Depsgraph *depsgraph, Object *ob)
{
LISTBASE_FOREACH (GpencilModifierData *, md, &ob->greasepencil_modifiers) {
@@ -131,10 +127,6 @@ void BKE_gpencil_cache_data_init(Depsgraph *depsgraph, Object *ob)
}
}
-/**
- * Clear grease pencil cache deform data.
- * \param ob: Grease pencil object
- */
void BKE_gpencil_cache_data_clear(Object *ob)
{
LISTBASE_FOREACH (GpencilModifierData *, md, &ob->greasepencil_modifiers) {
diff --git a/source/blender/blenkernel/intern/hair.c b/source/blender/blenkernel/intern/hair.cc
index f2a5146422e..b7ba159f631 100644
--- a/source/blender/blenkernel/intern/hair.c
+++ b/source/blender/blenkernel/intern/hair.cc
@@ -18,6 +18,9 @@
* \ingroup bke
*/
+#include <cmath>
+#include <cstring>
+
#include "MEM_guardedalloc.h"
#include "DNA_defaults.h"
@@ -26,7 +29,8 @@
#include "DNA_object_types.h"
#include "BLI_listbase.h"
-#include "BLI_math.h"
+#include "BLI_math_base.h"
+#include "BLI_math_vec_types.hh"
#include "BLI_rand.h"
#include "BLI_string.h"
#include "BLI_utildefines.h"
@@ -49,6 +53,8 @@
#include "BLO_read_write.h"
+using blender::float3;
+
static const char *HAIR_ATTR_POSITION = "position";
static const char *HAIR_ATTR_RADIUS = "radius";
@@ -67,10 +73,10 @@ static void hair_init_data(ID *id)
CustomData_reset(&hair->cdata);
CustomData_add_layer_named(
- &hair->pdata, CD_PROP_FLOAT3, CD_CALLOC, NULL, hair->totpoint, HAIR_ATTR_POSITION);
+ &hair->pdata, CD_PROP_FLOAT3, CD_CALLOC, nullptr, hair->totpoint, HAIR_ATTR_POSITION);
CustomData_add_layer_named(
- &hair->pdata, CD_PROP_FLOAT, CD_CALLOC, NULL, hair->totpoint, HAIR_ATTR_RADIUS);
- CustomData_add_layer(&hair->cdata, CD_HAIRCURVE, CD_CALLOC, NULL, hair->totcurve);
+ &hair->pdata, CD_PROP_FLOAT, CD_CALLOC, nullptr, hair->totpoint, HAIR_ATTR_RADIUS);
+ CustomData_add_layer(&hair->cdata, CD_HAIRCURVE, CD_CALLOC, nullptr, hair->totcurve);
BKE_hair_update_customdata_pointers(hair);
hair_random(hair);
@@ -80,14 +86,14 @@ static void hair_copy_data(Main *UNUSED(bmain), ID *id_dst, const ID *id_src, co
{
Hair *hair_dst = (Hair *)id_dst;
const Hair *hair_src = (const Hair *)id_src;
- hair_dst->mat = MEM_dupallocN(hair_src->mat);
+ hair_dst->mat = static_cast<Material **>(MEM_dupallocN(hair_src->mat));
const eCDAllocType alloc_type = (flag & LIB_ID_COPY_CD_REFERENCE) ? CD_REFERENCE : CD_DUPLICATE;
CustomData_copy(&hair_src->pdata, &hair_dst->pdata, CD_MASK_ALL, alloc_type, hair_dst->totpoint);
CustomData_copy(&hair_src->cdata, &hair_dst->cdata, CD_MASK_ALL, alloc_type, hair_dst->totcurve);
BKE_hair_update_customdata_pointers(hair_dst);
- hair_dst->batch_cache = NULL;
+ hair_dst->batch_cache = nullptr;
}
static void hair_free_data(ID *id)
@@ -115,8 +121,8 @@ static void hair_blend_write(BlendWriter *writer, ID *id, const void *id_address
{
Hair *hair = (Hair *)id;
- CustomDataLayer *players = NULL, players_buff[CD_TEMP_CHUNK_SIZE];
- CustomDataLayer *clayers = NULL, clayers_buff[CD_TEMP_CHUNK_SIZE];
+ CustomDataLayer *players = nullptr, players_buff[CD_TEMP_CHUNK_SIZE];
+ CustomDataLayer *clayers = nullptr, clayers_buff[CD_TEMP_CHUNK_SIZE];
CustomData_blend_write_prepare(&hair->pdata, &players, players_buff, ARRAY_SIZE(players_buff));
CustomData_blend_write_prepare(&hair->cdata, &clayers, clayers_buff, ARRAY_SIZE(clayers_buff));
@@ -174,33 +180,33 @@ static void hair_blend_read_expand(BlendExpander *expander, ID *id)
}
IDTypeInfo IDType_ID_HA = {
- .id_code = ID_HA,
- .id_filter = FILTER_ID_HA,
- .main_listbase_index = INDEX_ID_HA,
- .struct_size = sizeof(Hair),
- .name = "Hair",
- .name_plural = "hairs",
- .translation_context = BLT_I18NCONTEXT_ID_HAIR,
- .flags = IDTYPE_FLAGS_APPEND_IS_REUSABLE,
- .asset_type_info = NULL,
-
- .init_data = hair_init_data,
- .copy_data = hair_copy_data,
- .free_data = hair_free_data,
- .make_local = NULL,
- .foreach_id = hair_foreach_id,
- .foreach_cache = NULL,
- .foreach_path = NULL,
- .owner_get = NULL,
-
- .blend_write = hair_blend_write,
- .blend_read_data = hair_blend_read_data,
- .blend_read_lib = hair_blend_read_lib,
- .blend_read_expand = hair_blend_read_expand,
-
- .blend_read_undo_preserve = NULL,
-
- .lib_override_apply_post = NULL,
+ /*id_code */ ID_HA,
+ /*id_filter */ FILTER_ID_HA,
+ /*main_listbase_index */ INDEX_ID_HA,
+ /*struct_size */ sizeof(Hair),
+ /*name */ "Hair",
+ /*name_plural */ "hairs",
+ /*translation_context */ BLT_I18NCONTEXT_ID_HAIR,
+ /*flags */ IDTYPE_FLAGS_APPEND_IS_REUSABLE,
+ /*asset_type_info */ nullptr,
+
+ /*init_data */ hair_init_data,
+ /*copy_data */ hair_copy_data,
+ /*free_data */ hair_free_data,
+ /*make_local */ nullptr,
+ /*foreach_id */ hair_foreach_id,
+ /*foreach_cache */ nullptr,
+ /*foreach_path */ nullptr,
+ /*owner_get */ nullptr,
+
+ /*blend_write */ hair_blend_write,
+ /*blend_read_data */ hair_blend_read_data,
+ /*blend_read_lib */ hair_blend_read_lib,
+ /*blend_read_expand */ hair_blend_read_expand,
+
+ /*blend_read_undo_preserve */ nullptr,
+
+ /*lib_override_apply_post */ nullptr,
};
static void hair_random(Hair *hair)
@@ -250,7 +256,7 @@ static void hair_random(Hair *hair)
void *BKE_hair_add(Main *bmain, const char *name)
{
- Hair *hair = BKE_id_new(bmain, ID_HA, name);
+ Hair *hair = static_cast<Hair *>(BKE_id_new(bmain, ID_HA, name));
return hair;
}
@@ -258,14 +264,14 @@ void *BKE_hair_add(Main *bmain, const char *name)
BoundBox *BKE_hair_boundbox_get(Object *ob)
{
BLI_assert(ob->type == OB_HAIR);
- Hair *hair = ob->data;
+ Hair *hair = static_cast<Hair *>(ob->data);
- if (ob->runtime.bb != NULL && (ob->runtime.bb->flag & BOUNDBOX_DIRTY) == 0) {
+ if (ob->runtime.bb != nullptr && (ob->runtime.bb->flag & BOUNDBOX_DIRTY) == 0) {
return ob->runtime.bb;
}
- if (ob->runtime.bb == NULL) {
- ob->runtime.bb = MEM_callocN(sizeof(BoundBox), "hair boundbox");
+ if (ob->runtime.bb == nullptr) {
+ ob->runtime.bb = MEM_cnew<BoundBox>(__func__);
float min[3], max[3];
INIT_MINMAX(min, max);
@@ -289,10 +295,12 @@ BoundBox *BKE_hair_boundbox_get(Object *ob)
void BKE_hair_update_customdata_pointers(Hair *hair)
{
- hair->co = CustomData_get_layer_named(&hair->pdata, CD_PROP_FLOAT3, HAIR_ATTR_POSITION);
- hair->radius = CustomData_get_layer_named(&hair->pdata, CD_PROP_FLOAT, HAIR_ATTR_RADIUS);
- hair->curves = CustomData_get_layer(&hair->cdata, CD_HAIRCURVE);
- hair->mapping = CustomData_get_layer(&hair->cdata, CD_HAIRMAPPING);
+ hair->co = (float(*)[3])CustomData_get_layer_named(
+ &hair->pdata, CD_PROP_FLOAT3, HAIR_ATTR_POSITION);
+ hair->radius = (float *)CustomData_get_layer_named(
+ &hair->pdata, CD_PROP_FLOAT, HAIR_ATTR_RADIUS);
+ hair->curves = (HairCurve *)CustomData_get_layer(&hair->cdata, CD_HAIRCURVE);
+ hair->mapping = (HairMaping *)CustomData_get_layer(&hair->cdata, CD_HAIRMAPPING);
}
bool BKE_hair_customdata_required(Hair *UNUSED(hair), CustomDataLayer *layer)
@@ -304,10 +312,10 @@ bool BKE_hair_customdata_required(Hair *UNUSED(hair), CustomDataLayer *layer)
Hair *BKE_hair_new_for_eval(const Hair *hair_src, int totpoint, int totcurve)
{
- Hair *hair_dst = BKE_id_new_nomain(ID_HA, NULL);
+ Hair *hair_dst = static_cast<Hair *>(BKE_id_new_nomain(ID_HA, nullptr));
STRNCPY(hair_dst->id.name, hair_src->id.name);
- hair_dst->mat = MEM_dupallocN(hair_src->mat);
+ hair_dst->mat = static_cast<Material **>(MEM_dupallocN(hair_src->mat));
hair_dst->totcol = hair_src->totcol;
hair_dst->totpoint = totpoint;
@@ -327,7 +335,7 @@ Hair *BKE_hair_copy_for_eval(Hair *hair_src, bool reference)
flags |= LIB_ID_COPY_CD_REFERENCE;
}
- Hair *result = (Hair *)BKE_id_copy_ex(NULL, &hair_src->id, NULL, flags);
+ Hair *result = (Hair *)BKE_id_copy_ex(nullptr, &hair_src->id, nullptr, flags);
return result;
}
@@ -351,7 +359,7 @@ static Hair *hair_evaluate_modifiers(struct Depsgraph *depsgraph,
/* Evaluate modifiers. */
for (; md; md = md->next) {
- const ModifierTypeInfo *mti = BKE_modifier_get_info(md->type);
+ const ModifierTypeInfo *mti = BKE_modifier_get_info(static_cast<ModifierType>(md->type));
if (!BKE_modifier_is_enabled(scene, md, required_mode)) {
continue;
@@ -370,7 +378,7 @@ static Hair *hair_evaluate_modifiers(struct Depsgraph *depsgraph,
BKE_hair_update_customdata_pointers(hair);
/* Created deformed coordinates array on demand. */
- mti->deformVerts(md, &mectx, NULL, hair->co, hair->totpoint);
+ mti->deformVerts(md, &mectx, nullptr, hair->co, hair->totpoint);
}
else if (mti->modifyHair) {
/* Ensure we are not modifying the input. */
@@ -383,7 +391,7 @@ static Hair *hair_evaluate_modifiers(struct Depsgraph *depsgraph,
if (hair_next && hair_next != hair) {
/* If the modifier returned a new hair, release the old one. */
if (hair != hair_input) {
- BKE_id_free(NULL, hair);
+ BKE_id_free(nullptr, hair);
}
hair = hair_next;
}
@@ -399,7 +407,7 @@ void BKE_hair_data_update(struct Depsgraph *depsgraph, struct Scene *scene, Obje
BKE_object_free_derived_caches(object);
/* Evaluate modifiers. */
- Hair *hair = object->data;
+ Hair *hair = static_cast<Hair *>(object->data);
Hair *hair_eval = hair_evaluate_modifiers(depsgraph, scene, object, hair);
/* Assign evaluated object. */
@@ -409,8 +417,8 @@ void BKE_hair_data_update(struct Depsgraph *depsgraph, struct Scene *scene, Obje
/* Draw Cache */
-void (*BKE_hair_batch_cache_dirty_tag_cb)(Hair *hair, int mode) = NULL;
-void (*BKE_hair_batch_cache_free_cb)(Hair *hair) = NULL;
+void (*BKE_hair_batch_cache_dirty_tag_cb)(Hair *hair, int mode) = nullptr;
+void (*BKE_hair_batch_cache_free_cb)(Hair *hair) = nullptr;
void BKE_hair_batch_cache_dirty_tag(Hair *hair, int mode)
{
diff --git a/source/blender/blenkernel/intern/idprop_create.cc b/source/blender/blenkernel/intern/idprop_create.cc
new file mode 100644
index 00000000000..12f2fdc6a63
--- /dev/null
+++ b/source/blender/blenkernel/intern/idprop_create.cc
@@ -0,0 +1,140 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2021 by Blender Foundation.
+ */
+
+#include <type_traits>
+
+#include "DNA_ID.h"
+
+#include "BKE_idprop.hh"
+
+namespace blender::bke::idprop {
+
+/* -------------------------------------------------------------------- */
+/** \name Create Functions
+ * \{ */
+
+std::unique_ptr<IDProperty, IDPropertyDeleter> create(const StringRefNull prop_name, int32_t value)
+{
+ IDPropertyTemplate prop_template{0};
+ prop_template.i = value;
+ IDProperty *property = IDP_New(IDP_INT, &prop_template, prop_name.c_str());
+ return std::unique_ptr<IDProperty, IDPropertyDeleter>(property);
+}
+
+std::unique_ptr<IDProperty, IDPropertyDeleter> create(const StringRefNull prop_name, float value)
+{
+ IDPropertyTemplate prop_template{0};
+ prop_template.f = value;
+ IDProperty *property = IDP_New(IDP_FLOAT, &prop_template, prop_name.c_str());
+ return std::unique_ptr<IDProperty, IDPropertyDeleter>(property);
+}
+
+std::unique_ptr<IDProperty, IDPropertyDeleter> create(const StringRefNull prop_name, double value)
+{
+ IDPropertyTemplate prop_template{0};
+ prop_template.d = value;
+ IDProperty *property = IDP_New(IDP_DOUBLE, &prop_template, prop_name.c_str());
+ return std::unique_ptr<IDProperty, IDPropertyDeleter>(property);
+}
+
+std::unique_ptr<IDProperty, IDPropertyDeleter> create(const StringRefNull prop_name,
+ const StringRefNull value)
+{
+ IDProperty *property = IDP_NewString(value.c_str(), prop_name.c_str(), value.size() + 1);
+ return std::unique_ptr<IDProperty, IDPropertyDeleter>(property);
+}
+
+static std::unique_ptr<IDProperty, IDPropertyDeleter> array_create(const StringRefNull prop_name,
+ eIDPropertyType subtype,
+ size_t array_len)
+{
+ IDPropertyTemplate prop_template{0};
+ prop_template.array.len = array_len;
+ prop_template.array.type = subtype;
+ IDProperty *property = IDP_New(IDP_ARRAY, &prop_template, prop_name.c_str());
+ return std::unique_ptr<IDProperty, IDPropertyDeleter>(property);
+}
+
+static void array_values_set(IDProperty *property,
+ const void *values,
+ size_t values_len,
+ size_t value_size)
+{
+ BLI_assert(values);
+ BLI_assert(property->len == values_len);
+ memcpy(IDP_Array(property), values, values_len * value_size);
+}
+
+/**
+ * Create a IDProperty array of `id_property_subtype` and fill it with the given values.
+ */
+template<
+ /** C-Primitive type of the array. Can be int32_t, float, double. */
+ typename PrimitiveType,
+ /** Subtype of the ID_ARRAY. Must match PrimitiveType. */
+ eIDPropertyType id_property_subtype>
+std::unique_ptr<IDProperty, IDPropertyDeleter> create_array(StringRefNull prop_name,
+ Span<PrimitiveType> values)
+{
+ static_assert(std::is_same_v<PrimitiveType, int32_t> || std::is_same_v<PrimitiveType, float_t> ||
+ std::is_same_v<PrimitiveType, double>,
+ "Allowed values for PrimitiveType are int32_t, float and double.");
+ static_assert(!std::is_same_v<PrimitiveType, int32_t> || id_property_subtype == IDP_INT,
+ "PrimitiveType and id_property_type do not match (int32_t).");
+ static_assert(!std::is_same_v<PrimitiveType, float> || id_property_subtype == IDP_FLOAT,
+ "PrimitiveType and id_property_type do not match (float).");
+ static_assert(!std::is_same_v<PrimitiveType, double> || id_property_subtype == IDP_DOUBLE,
+ "PrimitiveType and id_property_type do not match (double).");
+
+ const int64_t values_len = values.size();
+ BLI_assert(values_len > 0);
+ std::unique_ptr<IDProperty, IDPropertyDeleter> property = array_create(
+ prop_name.c_str(), id_property_subtype, values_len);
+ array_values_set(
+ property.get(), static_cast<const void *>(values.data()), values_len, sizeof(PrimitiveType));
+ return property;
+}
+
+std::unique_ptr<IDProperty, IDPropertyDeleter> create(const StringRefNull prop_name,
+ Span<int32_t> values)
+{
+ return create_array<int32_t, IDP_INT>(prop_name, values);
+}
+
+std::unique_ptr<IDProperty, IDPropertyDeleter> create(const StringRefNull prop_name,
+ Span<float> values)
+{
+ return create_array<float, IDP_FLOAT>(prop_name, values);
+}
+
+std::unique_ptr<IDProperty, IDPropertyDeleter> create(const StringRefNull prop_name,
+ Span<double> values)
+{
+ return create_array<double, IDP_DOUBLE>(prop_name, values);
+}
+
+std::unique_ptr<IDProperty, IDPropertyDeleter> create_group(const StringRefNull prop_name)
+{
+ IDPropertyTemplate prop_template{0};
+ IDProperty *property = IDP_New(IDP_GROUP, &prop_template, prop_name.c_str());
+ return std::unique_ptr<IDProperty, IDPropertyDeleter>(property);
+}
+
+/* \} */
+
+} // namespace blender::bke::idprop
diff --git a/source/blender/blenkernel/intern/idprop_serialize.cc b/source/blender/blenkernel/intern/idprop_serialize.cc
new file mode 100644
index 00000000000..08a7f13b806
--- /dev/null
+++ b/source/blender/blenkernel/intern/idprop_serialize.cc
@@ -0,0 +1,844 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2021 by Blender Foundation.
+ */
+
+#include <optional>
+
+#include "DNA_ID.h"
+
+#include "BKE_idprop.hh"
+
+#include "BLI_listbase.h"
+
+namespace blender::bke::idprop {
+using namespace blender::io::serialize;
+
+/* Forward declarations */
+class IDPropertySerializer;
+struct DictionaryEntryParser;
+static IDProperty *idprop_from_value(const DictionaryValue &value);
+static const IDPropertySerializer &serializer_for(eIDPropertyType property_type);
+static const IDPropertySerializer &serializer_for(StringRef idprop_typename);
+
+/* -------------------------------------------------------------------- */
+/** \name ID property serialization.
+ * \{ */
+
+/* Definitions */
+static constexpr StringRef IDP_KEY_NAME("name");
+static constexpr StringRef IDP_KEY_TYPE("type");
+static constexpr StringRef IDP_KEY_SUBTYPE("subtype");
+static constexpr StringRef IDP_KEY_VALUE("value");
+
+static constexpr StringRef IDP_PROPERTY_TYPENAME_STRING("IDP_STRING");
+static constexpr StringRef IDP_PROPERTY_TYPENAME_INT("IDP_INT");
+static constexpr StringRef IDP_PROPERTY_TYPENAME_FLOAT("IDP_FLOAT");
+static constexpr StringRef IDP_PROPERTY_TYPENAME_DOUBLE("IDP_DOUBLE");
+static constexpr StringRef IDP_PROPERTY_TYPENAME_ARRAY("IDP_ARRAY");
+static constexpr StringRef IDP_PROPERTY_TYPENAME_GROUP("IDP_GROUP");
+static constexpr StringRef IDP_PROPERTY_TYPENAME_UNKNOWN("IDP_UNKNOWN");
+
+/**
+ * \brief Base class for (de)serializing IDProperties.
+ *
+ * Has a subclass for supported IDProperties and one for unsupported IDProperties.
+ */
+class IDPropertySerializer {
+ public:
+ constexpr IDPropertySerializer() = default;
+
+ /**
+ * \brief return the type name for (de)serializing.
+ * Type name is stored in the `type` or `subtype` attribute of the serialized id_property.
+ */
+ virtual std::string type_name() const = 0;
+
+ /**
+ * \brief return the IDPropertyType for (de)serializing.
+ */
+ virtual std::optional<eIDPropertyType> property_type() const = 0;
+
+ /**
+ * \brief create dictionary containing the given id_property.
+ */
+ virtual std::shared_ptr<DictionaryValue> idprop_to_dictionary(
+ const struct IDProperty *id_property) const = 0;
+
+ /**
+ * \brief convert the entry to an id property.
+ */
+ virtual std::unique_ptr<IDProperty, IDPropertyDeleter> entry_to_idprop(
+ DictionaryEntryParser &entry_reader) const = 0;
+
+ /**
+ * \brief Can the serializer be used?
+ *
+ * IDP_ID and IDP_IDPARRAY aren't supported for serialization.
+ */
+ virtual bool supports_serializing() const
+ {
+ return true;
+ }
+
+ protected:
+ /**
+ * \brief Create a new DictionaryValue instance.
+ *
+ * Only fill the dictionary with common attributes (name, type).
+ */
+ std::shared_ptr<DictionaryValue> create_dictionary(const struct IDProperty *id_property) const
+ {
+ std::shared_ptr<DictionaryValue> result = std::make_shared<DictionaryValue>();
+ DictionaryValue::Items &attributes = result->elements();
+ attributes.append_as(std::pair(IDP_KEY_NAME, new StringValue(id_property->name)));
+ attributes.append_as(std::pair(IDP_KEY_TYPE, new StringValue(type_name())));
+ return result;
+ }
+};
+
+/**
+ * \brief Helper class for parsing DictionaryValues.
+ */
+struct DictionaryEntryParser {
+ const DictionaryValue::Lookup lookup;
+
+ public:
+ explicit DictionaryEntryParser(const DictionaryValue &value) : lookup(value.create_lookup())
+ {
+ }
+
+ std::optional<eIDPropertyType> get_type() const
+ {
+ return get_id_property_type(IDP_KEY_TYPE);
+ }
+
+ std::optional<eIDPropertyType> get_subtype() const
+ {
+ return get_id_property_type(IDP_KEY_SUBTYPE);
+ }
+
+ std::optional<std::string> get_name() const
+ {
+ return get_string(IDP_KEY_NAME);
+ }
+
+ std::optional<std::string> get_string_value() const
+ {
+ return get_string(IDP_KEY_VALUE);
+ }
+
+ std::optional<int32_t> get_int_value() const
+ {
+ return get_int(IDP_KEY_VALUE);
+ }
+
+ std::optional<float> get_float_value() const
+ {
+ return get_float(IDP_KEY_VALUE);
+ }
+
+ std::optional<double> get_double_value() const
+ {
+ return get_double(IDP_KEY_VALUE);
+ }
+
+ const ArrayValue *get_array_value() const
+ {
+ return get_array(IDP_KEY_VALUE);
+ }
+
+ std::optional<Vector<int32_t>> get_array_int_value() const
+ {
+ return get_array_primitive<int32_t, IntValue>(IDP_KEY_VALUE);
+ }
+
+ std::optional<Vector<float>> get_array_float_value() const
+ {
+ return get_array_primitive<float, DoubleValue>(IDP_KEY_VALUE);
+ }
+
+ std::optional<Vector<double>> get_array_double_value() const
+ {
+ return get_array_primitive<double, DoubleValue>(IDP_KEY_VALUE);
+ }
+
+ private:
+ std::optional<std::string> get_string(StringRef key) const
+ {
+ const DictionaryValue::LookupValue *value_ptr = lookup.lookup_ptr(key);
+ if (value_ptr == nullptr) {
+ return std::nullopt;
+ }
+ const DictionaryValue::LookupValue &value = *value_ptr;
+
+ if (value->type() != eValueType::String) {
+ return std::nullopt;
+ }
+
+ return value->as_string_value()->value();
+ }
+
+ const ArrayValue *get_array(StringRef key) const
+ {
+ const DictionaryValue::LookupValue *value_ptr = lookup.lookup_ptr(key);
+ if (value_ptr == nullptr) {
+ return nullptr;
+ }
+ const DictionaryValue::LookupValue &value = *value_ptr;
+
+ if (value->type() != eValueType::Array) {
+ return nullptr;
+ }
+
+ return value->as_array_value();
+ }
+
+ std::optional<int32_t> get_int(StringRef key) const
+ {
+ const DictionaryValue::LookupValue *value_ptr = lookup.lookup_ptr(key);
+ if (value_ptr == nullptr) {
+ return std::nullopt;
+ }
+ const DictionaryValue::LookupValue &value = *value_ptr;
+
+ if (value->type() != eValueType::Int) {
+ return std::nullopt;
+ }
+
+ return value->as_int_value()->value();
+ }
+
+ std::optional<double> get_double(StringRef key) const
+ {
+ const DictionaryValue::LookupValue *value_ptr = lookup.lookup_ptr(key);
+ if (value_ptr == nullptr) {
+ return std::nullopt;
+ }
+ const DictionaryValue::LookupValue &value = *value_ptr;
+
+ if (value->type() != eValueType::Double) {
+ return std::nullopt;
+ }
+
+ return value->as_double_value()->value();
+ }
+
+ std::optional<float> get_float(StringRef key) const
+ {
+ return static_cast<std::optional<float>>(get_double(key));
+ }
+
+ template<typename PrimitiveType, typename ValueType>
+ std::optional<Vector<PrimitiveType>> get_array_primitive(StringRef key) const
+ {
+ const DictionaryValue::LookupValue *value_ptr = lookup.lookup_ptr(key);
+ if (value_ptr == nullptr) {
+ return std::nullopt;
+ }
+ const DictionaryValue::LookupValue &value = *value_ptr;
+
+ if (value->type() != eValueType::Array) {
+ return std::nullopt;
+ }
+
+ Vector<PrimitiveType> result;
+ const ArrayValue::Items &elements = value->as_array_value()->elements();
+ for (const ArrayValue::Item &element : elements) {
+ const ValueType *value_type = static_cast<const ValueType *>(element.get());
+ PrimitiveType primitive_value = value_type->value();
+ result.append_as(primitive_value);
+ }
+
+ return result;
+ }
+
+ std::optional<eIDPropertyType> get_id_property_type(StringRef key) const
+ {
+ std::optional<std::string> string_value = get_string(key);
+ if (!string_value.has_value()) {
+ return std::nullopt;
+ }
+ const IDPropertySerializer &serializer = serializer_for(*string_value);
+ return serializer.property_type();
+ }
+};
+
+/** \brief IDPSerializer for IDP_STRING. */
+class IDPStringSerializer : public IDPropertySerializer {
+ public:
+ constexpr IDPStringSerializer() = default;
+
+ std::string type_name() const override
+ {
+ return IDP_PROPERTY_TYPENAME_STRING;
+ }
+
+ std::optional<eIDPropertyType> property_type() const override
+ {
+ return IDP_STRING;
+ }
+
+ std::shared_ptr<DictionaryValue> idprop_to_dictionary(
+ const struct IDProperty *id_property) const override
+ {
+ std::shared_ptr<DictionaryValue> result = create_dictionary(id_property);
+ DictionaryValue::Items &attributes = result->elements();
+ attributes.append_as(std::pair(IDP_KEY_VALUE, new StringValue(IDP_String(id_property))));
+ return result;
+ }
+
+ std::unique_ptr<IDProperty, IDPropertyDeleter> entry_to_idprop(
+ DictionaryEntryParser &entry_reader) const override
+ {
+ BLI_assert(*(entry_reader.get_type()) == IDP_STRING);
+ std::optional<std::string> name = entry_reader.get_name();
+ if (!name.has_value()) {
+ return nullptr;
+ }
+ std::optional<std::string> string_value = entry_reader.get_string_value();
+ if (!string_value.has_value()) {
+ return nullptr;
+ }
+ return create(name->c_str(), string_value->c_str());
+ }
+};
+
+/** \brief IDPSerializer for IDP_INT. */
+class IDPIntSerializer : public IDPropertySerializer {
+ public:
+ constexpr IDPIntSerializer() = default;
+
+ std::string type_name() const override
+ {
+ return IDP_PROPERTY_TYPENAME_INT;
+ }
+
+ std::optional<eIDPropertyType> property_type() const override
+ {
+ return IDP_INT;
+ }
+
+ std::shared_ptr<DictionaryValue> idprop_to_dictionary(
+ const struct IDProperty *id_property) const override
+ {
+ std::shared_ptr<DictionaryValue> result = create_dictionary(id_property);
+ DictionaryValue::Items &attributes = result->elements();
+ attributes.append_as(std::pair(IDP_KEY_VALUE, new IntValue(IDP_Int(id_property))));
+ return result;
+ }
+
+ std::unique_ptr<IDProperty, IDPropertyDeleter> entry_to_idprop(
+ DictionaryEntryParser &entry_reader) const override
+ {
+ BLI_assert(*(entry_reader.get_type()) == IDP_INT);
+ std::optional<std::string> name = entry_reader.get_name();
+ if (!name.has_value()) {
+ return nullptr;
+ }
+ std::optional<int32_t> extracted_value = entry_reader.get_int_value();
+ if (!extracted_value.has_value()) {
+ return nullptr;
+ }
+ return create(name->c_str(), *extracted_value);
+ }
+};
+
+/** \brief IDPSerializer for IDP_FLOAT. */
+class IDPFloatSerializer : public IDPropertySerializer {
+ public:
+ constexpr IDPFloatSerializer() = default;
+
+ std::string type_name() const override
+ {
+ return IDP_PROPERTY_TYPENAME_FLOAT;
+ }
+
+ std::optional<eIDPropertyType> property_type() const override
+ {
+ return IDP_FLOAT;
+ }
+
+ std::shared_ptr<DictionaryValue> idprop_to_dictionary(
+ const struct IDProperty *id_property) const override
+ {
+ std::shared_ptr<DictionaryValue> result = create_dictionary(id_property);
+ DictionaryValue::Items &attributes = result->elements();
+ attributes.append_as(std::pair(IDP_KEY_VALUE, new DoubleValue(IDP_Float(id_property))));
+ return result;
+ }
+
+ std::unique_ptr<IDProperty, IDPropertyDeleter> entry_to_idprop(
+ DictionaryEntryParser &entry_reader) const override
+ {
+ BLI_assert(*(entry_reader.get_type()) == IDP_FLOAT);
+ std::optional<std::string> name = entry_reader.get_name();
+ if (!name.has_value()) {
+ return nullptr;
+ }
+ std::optional<float> extracted_value = entry_reader.get_float_value();
+ if (!extracted_value.has_value()) {
+ return nullptr;
+ }
+ return create(name->c_str(), *extracted_value);
+ }
+};
+
+/** \brief IDPSerializer for IDP_DOUBLE. */
+class IDPDoubleSerializer : public IDPropertySerializer {
+ public:
+ constexpr IDPDoubleSerializer() = default;
+
+ std::string type_name() const override
+ {
+ return IDP_PROPERTY_TYPENAME_DOUBLE;
+ }
+
+ std::optional<eIDPropertyType> property_type() const override
+ {
+ return IDP_DOUBLE;
+ }
+
+ std::shared_ptr<DictionaryValue> idprop_to_dictionary(
+ const struct IDProperty *id_property) const override
+ {
+ std::shared_ptr<DictionaryValue> result = create_dictionary(id_property);
+ DictionaryValue::Items &attributes = result->elements();
+ attributes.append_as(std::pair(IDP_KEY_VALUE, new DoubleValue(IDP_Double(id_property))));
+ return result;
+ }
+
+ std::unique_ptr<IDProperty, IDPropertyDeleter> entry_to_idprop(
+ DictionaryEntryParser &entry_reader) const override
+ {
+ BLI_assert(*(entry_reader.get_type()) == IDP_DOUBLE);
+ std::optional<std::string> name = entry_reader.get_name();
+ if (!name.has_value()) {
+ return nullptr;
+ }
+ std::optional<double> extracted_value = entry_reader.get_double_value();
+ if (!extracted_value.has_value()) {
+ return nullptr;
+ }
+ return create(name->c_str(), *extracted_value);
+ }
+};
+
+/** \brief IDPSerializer for IDP_ARRAY. */
+class IDPArraySerializer : public IDPropertySerializer {
+ public:
+ constexpr IDPArraySerializer() = default;
+
+ std::string type_name() const override
+ {
+ return IDP_PROPERTY_TYPENAME_ARRAY;
+ }
+
+ std::optional<eIDPropertyType> property_type() const override
+ {
+ return IDP_ARRAY;
+ }
+
+ std::shared_ptr<DictionaryValue> idprop_to_dictionary(
+ const struct IDProperty *id_property) const override
+ {
+ std::shared_ptr<DictionaryValue> result = create_dictionary(id_property);
+ DictionaryValue::Items &attributes = result->elements();
+ const IDPropertySerializer &subtype_serializer = serializer_for(
+ static_cast<eIDPropertyType>(id_property->subtype));
+ attributes.append_as(
+ std::pair(IDP_KEY_SUBTYPE, new StringValue(subtype_serializer.type_name())));
+
+ std::shared_ptr<ArrayValue> array = std::make_shared<ArrayValue>();
+ switch (static_cast<eIDPropertyType>(id_property->subtype)) {
+ case IDP_INT: {
+ int32_t *values = static_cast<int32_t *>(IDP_Array(id_property));
+ add_values<int32_t, IntValue>(array.get(), Span<int32_t>(values, id_property->len));
+ break;
+ }
+
+ case IDP_FLOAT: {
+ float *values = static_cast<float *>(IDP_Array(id_property));
+ add_values<float, DoubleValue>(array.get(), Span<float>(values, id_property->len));
+ break;
+ }
+
+ case IDP_DOUBLE: {
+ double *values = static_cast<double *>(IDP_Array(id_property));
+ add_values<double, DoubleValue>(array.get(), Span<double>(values, id_property->len));
+ break;
+ }
+
+ case IDP_GROUP: {
+ IDProperty *values = static_cast<IDProperty *>(IDP_Array(id_property));
+ add_values(array.get(), Span<IDProperty>(values, id_property->len));
+ break;
+ }
+
+ default: {
+ /* IDP_ARRAY only supports IDP_INT, IDP_FLOAT, IDP_DOUBLE and IDP_GROUP. */
+ BLI_assert_unreachable();
+ break;
+ }
+ }
+ attributes.append_as(std::pair(IDP_KEY_VALUE, std::move(array)));
+
+ return result;
+ }
+
+ std::unique_ptr<IDProperty, IDPropertyDeleter> entry_to_idprop(
+ DictionaryEntryParser &entry_reader) const override
+ {
+ BLI_assert(*(entry_reader.get_type()) == IDP_ARRAY);
+ std::optional<eIDPropertyType> property_subtype = entry_reader.get_subtype();
+ if (!property_subtype.has_value()) {
+ return nullptr;
+ }
+
+ switch (*property_subtype) {
+ case IDP_INT:
+ return idprop_array_int_from_value(entry_reader);
+
+ case IDP_FLOAT:
+ return idprop_array_float_from_value(entry_reader);
+
+ case IDP_DOUBLE:
+ return idprop_array_double_from_value(entry_reader);
+
+ default:
+ break;
+ }
+ return nullptr;
+ }
+
+ private:
+ /** Add the given values to array. */
+ template</* C-primitive type of the values to add. Possible types are `float`, `int32_t` or
+ * `double`. */
+ typename PrimitiveType,
+ /* Type of value that can store the PrimitiveType in the Array. */
+ typename ValueType>
+ void add_values(ArrayValue *array, Span<PrimitiveType> values) const
+ {
+ ArrayValue::Items &items = array->elements();
+ for (PrimitiveType value : values) {
+ items.append_as(std::make_shared<ValueType>(value));
+ }
+ }
+
+ void add_values(ArrayValue *array, Span<IDProperty> values) const
+ {
+ ArrayValue::Items &items = array->elements();
+ for (const IDProperty &id_property : values) {
+ const IDPropertySerializer &value_serializer = serializer_for(
+ static_cast<eIDPropertyType>(id_property.type));
+ if (!value_serializer.supports_serializing()) {
+ continue;
+ }
+ std::shared_ptr<DictionaryValue> value = value_serializer.idprop_to_dictionary(&id_property);
+ items.append_as(value);
+ }
+ }
+
+ std::unique_ptr<IDProperty, IDPropertyDeleter> idprop_array_int_from_value(
+ DictionaryEntryParser &entry_reader) const
+ {
+ BLI_assert(*(entry_reader.get_type()) == IDP_ARRAY);
+ BLI_assert(*(entry_reader.get_subtype()) == IDP_INT);
+ std::optional<std::string> name = entry_reader.get_name();
+ if (!name.has_value()) {
+ return nullptr;
+ }
+ std::optional<Vector<int32_t>> extracted_value = entry_reader.get_array_int_value();
+ if (!extracted_value.has_value()) {
+ return nullptr;
+ }
+ return create(name->c_str(), *extracted_value);
+ }
+
+ std::unique_ptr<IDProperty, IDPropertyDeleter> idprop_array_float_from_value(
+ DictionaryEntryParser &entry_reader) const
+ {
+ BLI_assert(*(entry_reader.get_type()) == IDP_ARRAY);
+ BLI_assert(*(entry_reader.get_subtype()) == IDP_FLOAT);
+ std::optional<std::string> name = entry_reader.get_name();
+ if (!name.has_value()) {
+ return nullptr;
+ }
+ std::optional<Vector<float>> extracted_value = entry_reader.get_array_float_value();
+ if (!extracted_value.has_value()) {
+ return nullptr;
+ }
+ return create(name->c_str(), *extracted_value);
+ }
+
+ std::unique_ptr<IDProperty, IDPropertyDeleter> idprop_array_double_from_value(
+ DictionaryEntryParser &entry_reader) const
+ {
+ BLI_assert(*(entry_reader.get_type()) == IDP_ARRAY);
+ BLI_assert(*(entry_reader.get_subtype()) == IDP_DOUBLE);
+ std::optional<std::string> name = entry_reader.get_name();
+ if (!name.has_value()) {
+ return nullptr;
+ }
+ std::optional<Vector<double>> extracted_value = entry_reader.get_array_double_value();
+ if (!extracted_value.has_value()) {
+ return nullptr;
+ }
+ return create(name->c_str(), *extracted_value);
+ }
+};
+
+/** \brief IDPSerializer for IDP_GROUP. */
+class IDPGroupSerializer : public IDPropertySerializer {
+ public:
+ constexpr IDPGroupSerializer() = default;
+
+ std::string type_name() const override
+ {
+ return IDP_PROPERTY_TYPENAME_GROUP;
+ }
+
+ std::optional<eIDPropertyType> property_type() const override
+ {
+ return IDP_GROUP;
+ }
+
+ std::shared_ptr<DictionaryValue> idprop_to_dictionary(
+ const struct IDProperty *id_property) const override
+ {
+ std::shared_ptr<DictionaryValue> result = create_dictionary(id_property);
+ DictionaryValue::Items &attributes = result->elements();
+ std::shared_ptr<ArrayValue> array = std::make_shared<ArrayValue>();
+ ArrayValue::Items &elements = array->elements();
+
+ LISTBASE_FOREACH (IDProperty *, sub_property, &id_property->data.group) {
+
+ const IDPropertySerializer &sub_property_serializer = serializer_for(
+ static_cast<eIDPropertyType>(sub_property->type));
+ elements.append_as(sub_property_serializer.idprop_to_dictionary(sub_property));
+ }
+
+ attributes.append_as(std::pair(IDP_KEY_VALUE, array));
+ return result;
+ }
+
+ std::unique_ptr<IDProperty, IDPropertyDeleter> entry_to_idprop(
+ DictionaryEntryParser &entry_reader) const override
+ {
+ BLI_assert(*(entry_reader.get_type()) == IDP_GROUP);
+ std::optional<std::string> name = entry_reader.get_name();
+ if (!name.has_value()) {
+ return nullptr;
+ }
+
+ const ArrayValue *array = entry_reader.get_array_value();
+ if (array == nullptr) {
+ return nullptr;
+ }
+
+ std::unique_ptr<IDProperty, IDPropertyDeleter> result = create_group(name->c_str());
+ for (const ArrayValue::Item &element : array->elements()) {
+ if (element->type() != eValueType::Dictionary) {
+ continue;
+ }
+ const DictionaryValue *subobject = element->as_dictionary_value();
+ IDProperty *subproperty = idprop_from_value(*subobject);
+ IDP_AddToGroup(result.get(), subproperty);
+ }
+
+ return result;
+ }
+};
+
+/**
+ * \brief Dummy serializer for unknown and unsupported types.
+ */
+class IDPUnknownSerializer : public IDPropertySerializer {
+ public:
+ constexpr IDPUnknownSerializer() = default;
+ std::string type_name() const override
+ {
+ return IDP_PROPERTY_TYPENAME_UNKNOWN;
+ }
+ std::optional<eIDPropertyType> property_type() const override
+ {
+ return std::nullopt;
+ }
+
+ std::shared_ptr<DictionaryValue> idprop_to_dictionary(
+ const struct IDProperty *UNUSED(id_property)) const override
+ {
+ BLI_assert_unreachable();
+ return nullptr;
+ }
+
+ bool supports_serializing() const override
+ {
+ return false;
+ }
+
+ std::unique_ptr<IDProperty, IDPropertyDeleter> entry_to_idprop(
+ DictionaryEntryParser &UNUSED(entry_reader)) const override
+ {
+ return nullptr;
+ }
+};
+
+/* Serializers are constructed statically to remove construction/destruction. */
+static constexpr IDPStringSerializer IDP_SERIALIZER_STRING;
+static constexpr IDPIntSerializer IDP_SERIALIZER_INT;
+static constexpr IDPFloatSerializer IDP_SERIALIZER_FLOAT;
+static constexpr IDPDoubleSerializer IDP_SERIALIZER_DOUBLE;
+static constexpr IDPArraySerializer IDP_SERIALIZER_ARRAY;
+static constexpr IDPGroupSerializer IDP_SERIALIZER_GROUP;
+static constexpr IDPUnknownSerializer IDP_SERIALIZER_UNKNOWN;
+
+/** \brief get the serializer for the given property type. */
+static const IDPropertySerializer &serializer_for(eIDPropertyType property_type)
+{
+ switch (property_type) {
+ case IDP_STRING:
+ return IDP_SERIALIZER_STRING;
+
+ case IDP_INT:
+ return IDP_SERIALIZER_INT;
+
+ case IDP_FLOAT:
+ return IDP_SERIALIZER_FLOAT;
+
+ case IDP_DOUBLE:
+ return IDP_SERIALIZER_DOUBLE;
+
+ case IDP_ARRAY:
+ return IDP_SERIALIZER_ARRAY;
+
+ case IDP_GROUP:
+ return IDP_SERIALIZER_GROUP;
+
+ default:
+ BLI_assert_msg(false, "Trying to convert an unsupported/unknown property type to a string");
+ return IDP_SERIALIZER_UNKNOWN;
+ }
+}
+
+/** \brief get serializer for the given typename. */
+static const IDPropertySerializer &serializer_for(StringRef idprop_typename)
+{
+ if (idprop_typename == IDP_PROPERTY_TYPENAME_STRING) {
+ return IDP_SERIALIZER_STRING;
+ }
+ if (idprop_typename == IDP_PROPERTY_TYPENAME_INT) {
+ return IDP_SERIALIZER_INT;
+ }
+ if (idprop_typename == IDP_PROPERTY_TYPENAME_FLOAT) {
+ return IDP_SERIALIZER_FLOAT;
+ }
+ if (idprop_typename == IDP_PROPERTY_TYPENAME_DOUBLE) {
+ return IDP_SERIALIZER_DOUBLE;
+ }
+ if (idprop_typename == IDP_PROPERTY_TYPENAME_ARRAY) {
+ return IDP_SERIALIZER_ARRAY;
+ }
+ if (idprop_typename == IDP_PROPERTY_TYPENAME_GROUP) {
+ return IDP_SERIALIZER_GROUP;
+ }
+ return IDP_SERIALIZER_UNKNOWN;
+}
+
+/* \} */
+
+/* -------------------------------------------------------------------- */
+/** \name IDProperty to Value
+ * \{ */
+std::unique_ptr<ArrayValue> convert_to_serialize_values(const struct IDProperty *properties)
+{
+ BLI_assert(properties != nullptr);
+ std::unique_ptr<ArrayValue> result = std::make_unique<ArrayValue>();
+ ArrayValue::Items &elements = result->elements();
+ const struct IDProperty *current_property = properties;
+ while (current_property != nullptr) {
+ const IDPropertySerializer &serializer = serializer_for(
+ static_cast<eIDPropertyType>(current_property->type));
+ if (serializer.supports_serializing()) {
+ elements.append_as(serializer.idprop_to_dictionary(current_property));
+ }
+ current_property = current_property->next;
+ }
+
+ return result;
+}
+
+/* \} */
+
+/* -------------------------------------------------------------------- */
+/** \name IDProperty from Value
+ * \{ */
+
+static IDProperty *idprop_from_value(const DictionaryValue &value)
+{
+ DictionaryEntryParser entry_reader(value);
+ std::optional<eIDPropertyType> property_type = entry_reader.get_type();
+ if (!property_type.has_value()) {
+ return nullptr;
+ }
+
+ const IDPropertySerializer &serializer = serializer_for(*property_type);
+ return serializer.entry_to_idprop(entry_reader).release();
+}
+
+static IDProperty *idprop_from_value(const ArrayValue &value)
+{
+ IDProperty *result = nullptr;
+ IDProperty *previous_added = nullptr;
+
+ const ArrayValue::Items &elements = value.elements();
+ for (const ArrayValue::Item &element : elements) {
+ if (element->type() != eValueType::Dictionary) {
+ continue;
+ }
+ const DictionaryValue *object_value = element->as_dictionary_value();
+ IDProperty *last_created = idprop_from_value(*object_value);
+ if (last_created == nullptr) {
+ continue;
+ }
+
+ if (result == nullptr) {
+ result = last_created;
+ }
+
+ if (previous_added) {
+ previous_added->next = last_created;
+ }
+ last_created->prev = previous_added;
+ previous_added = last_created;
+ }
+
+ return result;
+}
+
+IDProperty *convert_from_serialize_value(const Value &value)
+{
+ if (value.type() != eValueType::Array) {
+ return nullptr;
+ }
+
+ return idprop_from_value(*value.as_array_value());
+}
+
+/* \} */
+
+} // namespace blender::bke::idprop
diff --git a/source/blender/blenkernel/intern/idprop_serialize_test.cc b/source/blender/blenkernel/intern/idprop_serialize_test.cc
new file mode 100644
index 00000000000..eeee3fc2aea
--- /dev/null
+++ b/source/blender/blenkernel/intern/idprop_serialize_test.cc
@@ -0,0 +1,448 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2021 by Blender Foundation.
+ */
+
+#include "testing/testing.h"
+
+#include "DNA_ID.h"
+
+#include "BKE_idprop.hh"
+
+namespace blender::bke::idprop::tests {
+
+using namespace blender::io::serialize;
+
+static void check_container_value(ArrayValue *value)
+{
+ ASSERT_NE(value, nullptr);
+ ASSERT_EQ(value->type(), eValueType::Array);
+ const ArrayValue::Items elements = value->elements();
+ EXPECT_FALSE(elements.is_empty());
+ EXPECT_EQ(elements.size(), 1);
+
+ const ArrayValue::Item &item = value->elements()[0];
+ ASSERT_EQ(item->type(), eValueType::Dictionary);
+}
+
+static void check_object_attribute(const DictionaryValue::Lookup &lookup,
+ const std::string expected_key,
+ const std::string expected_value)
+{
+ EXPECT_TRUE(lookup.contains(expected_key));
+ const std::shared_ptr<Value> &element = *lookup.lookup_ptr(expected_key);
+ ASSERT_EQ(element->type(), eValueType::String);
+ EXPECT_EQ(element->as_string_value()->value(), expected_value);
+}
+
+static void check_object_attribute(const DictionaryValue::Lookup &lookup,
+ const std::string expected_key,
+ const int32_t expected_value)
+{
+ EXPECT_TRUE(lookup.contains(expected_key));
+ const std::shared_ptr<Value> &element = *lookup.lookup_ptr(expected_key);
+ ASSERT_EQ(element->type(), eValueType::Int);
+ EXPECT_EQ(element->as_int_value()->value(), expected_value);
+}
+
+static void check_object_attribute(const DictionaryValue::Lookup &lookup,
+ const std::string expected_key,
+ const float expected_value)
+{
+ EXPECT_TRUE(lookup.contains(expected_key));
+ const std::shared_ptr<Value> &element = *lookup.lookup_ptr(expected_key);
+ ASSERT_EQ(element->type(), eValueType::Double);
+ EXPECT_EQ(element->as_double_value()->value(), expected_value);
+}
+
+static void check_object_attribute(const DictionaryValue::Lookup &lookup,
+ const std::string expected_key,
+ const double expected_value)
+{
+ EXPECT_TRUE(lookup.contains(expected_key));
+ const std::shared_ptr<Value> &element = *lookup.lookup_ptr(expected_key);
+ ASSERT_EQ(element->type(), eValueType::Double);
+ EXPECT_EQ(element->as_double_value()->value(), expected_value);
+}
+
+static void test_string_to_value(const StringRefNull prop_name, const StringRefNull prop_content)
+{
+ std::unique_ptr<IDProperty, IDPropertyDeleter> property = create(prop_name, prop_content);
+
+ std::unique_ptr<ArrayValue> value = convert_to_serialize_values(property.get());
+ check_container_value(value.get());
+ const ArrayValue::Item &item = value->elements()[0];
+ const DictionaryValue *object = item->as_dictionary_value();
+ const DictionaryValue::Lookup lookup = object->create_lookup();
+
+ EXPECT_EQ(lookup.size(), 3);
+ check_object_attribute(lookup, "name", prop_name);
+ check_object_attribute(lookup, "type", "IDP_STRING");
+ check_object_attribute(lookup, "value", prop_content);
+}
+
+TEST(idprop, convert_idp_string_to_value)
+{
+ test_string_to_value("mykey", "mycontent");
+}
+
+static void test_int_to_value(const StringRefNull prop_name, int32_t prop_content)
+{
+ std::unique_ptr<IDProperty, IDPropertyDeleter> property = create(prop_name, prop_content);
+
+ std::unique_ptr<ArrayValue> value = convert_to_serialize_values(property.get());
+ check_container_value(value.get());
+ const ArrayValue::Item &item = value->elements()[0];
+ const DictionaryValue *object = item->as_dictionary_value();
+ const DictionaryValue::Lookup lookup = object->create_lookup();
+
+ EXPECT_EQ(lookup.size(), 3);
+ check_object_attribute(lookup, "name", prop_name);
+ check_object_attribute(lookup, "type", "IDP_INT");
+ check_object_attribute(lookup, "value", prop_content);
+}
+
+TEST(idprop, convert_idp_int_to_value)
+{
+ test_int_to_value("mykey", 0);
+}
+
+static void test_float_to_value(const StringRefNull prop_name, float prop_content)
+{
+ std::unique_ptr<IDProperty, IDPropertyDeleter> property = create(prop_name, prop_content);
+
+ std::unique_ptr<ArrayValue> value = convert_to_serialize_values(property.get());
+ check_container_value(value.get());
+ const ArrayValue::Item &item = value->elements()[0];
+ const DictionaryValue *object = item->as_dictionary_value();
+ const DictionaryValue::Lookup lookup = object->create_lookup();
+
+ EXPECT_EQ(lookup.size(), 3);
+ check_object_attribute(lookup, "name", prop_name);
+ check_object_attribute(lookup, "type", "IDP_FLOAT");
+ check_object_attribute(lookup, "value", prop_content);
+}
+
+TEST(idprop, convert_idp_float_to_value)
+{
+ test_float_to_value("mykey", 0.2f);
+}
+
+static void test_double_to_value(const StringRefNull prop_name, double prop_content)
+{
+ std::unique_ptr<IDProperty, IDPropertyDeleter> property = create(prop_name, prop_content);
+
+ std::unique_ptr<ArrayValue> value = convert_to_serialize_values(property.get());
+ check_container_value(value.get());
+ const ArrayValue::Item &item = value->elements()[0];
+ const DictionaryValue *object = item->as_dictionary_value();
+ const DictionaryValue::Lookup lookup = object->create_lookup();
+
+ EXPECT_EQ(lookup.size(), 3);
+ check_object_attribute(lookup, "name", prop_name);
+ check_object_attribute(lookup, "type", "IDP_DOUBLE");
+ check_object_attribute(lookup, "value", prop_content);
+}
+
+TEST(idprop, convert_idp_double_to_value)
+{
+ test_double_to_value("mykey", 0.2);
+}
+
+template<typename PrimitiveType, typename ValueType>
+static void test_array_to_value(const StringRefNull prop_name, Vector<PrimitiveType> prop_content)
+{
+ std::unique_ptr<IDProperty, IDPropertyDeleter> property = create(prop_name, prop_content);
+ std::unique_ptr<ArrayValue> value = convert_to_serialize_values(property.get());
+
+ check_container_value(value.get());
+ const ArrayValue::Item &item = value->elements()[0];
+ const DictionaryValue *object = item->as_dictionary_value();
+ const DictionaryValue::Lookup lookup = object->create_lookup();
+
+ EXPECT_EQ(lookup.size(), 4);
+ check_object_attribute(lookup, "name", prop_name);
+ check_object_attribute(lookup, "type", "IDP_ARRAY");
+
+ const std::shared_ptr<Value> &element = *lookup.lookup_ptr("value");
+ const ArrayValue *subvalues = element->as_array_value();
+ ASSERT_NE(subvalues, nullptr);
+ const ArrayValue::Items &subitems = subvalues->elements();
+ ASSERT_EQ(subitems.size(), prop_content.size());
+
+ for (size_t i = 0; i < prop_content.size(); i++) {
+ EXPECT_EQ(static_cast<ValueType *>(subitems[i].get())->value(), prop_content[i]);
+ }
+}
+
+TEST(idprop, convert_idp_int_array_to_value)
+{
+ test_array_to_value<int32_t, IntValue>("my_integer_array",
+ {-16, -8, -4, -2, -1, 0, 1, 2, 4, 8, 16});
+}
+
+TEST(idprop, convert_idp_float_array_to_value)
+{
+ test_array_to_value<float, DoubleValue>(
+ "my_float_array", {-16.8f, -8.4f, -4.2f, -2.1f, -1.0f, 0.0f, 1.0f, 2.1f, 4.2f, 8.4f, 16.8f});
+}
+
+TEST(idprop, convert_idp_double_array_to_value)
+{
+ test_array_to_value<double, DoubleValue>(
+ "my_double_array", {-16.8, -8.4, -4.2, -2.1, -1.0, 0.0, 1.0, 2.1, 4.2, 8.4, 16.8});
+}
+
+static std::unique_ptr<Value> parse_json(StringRef input)
+{
+ std::stringstream is(input);
+ JsonFormatter json;
+ std::unique_ptr<Value> value = json.deserialize(is);
+ return value;
+}
+
+static std::string to_json(const Value &value)
+{
+ std::stringstream out;
+ JsonFormatter json;
+ json.serialize(out, value);
+ return out.str();
+}
+
+static void test_idprop(const IDProperty *id_property,
+ StringRef expected_name,
+ StringRef expected_value)
+{
+ ASSERT_NE(id_property, nullptr);
+ EXPECT_EQ(id_property->type, IDP_STRING);
+ EXPECT_EQ(id_property->name, expected_name);
+ EXPECT_EQ(IDP_String(id_property), expected_value);
+}
+
+static void test_idprop(const IDProperty *id_property,
+ StringRef expected_name,
+ int32_t expected_value)
+{
+ ASSERT_NE(id_property, nullptr);
+ EXPECT_EQ(id_property->type, IDP_INT);
+ EXPECT_EQ(id_property->name, expected_name);
+ EXPECT_EQ(IDP_Int(id_property), expected_value);
+}
+
+static void test_idprop(const IDProperty *id_property,
+ StringRef expected_name,
+ float expected_value)
+{
+ ASSERT_NE(id_property, nullptr);
+ EXPECT_EQ(id_property->type, IDP_FLOAT);
+ EXPECT_EQ(id_property->name, expected_name);
+ EXPECT_EQ(IDP_Float(id_property), expected_value);
+}
+
+static void test_idprop(const IDProperty *id_property,
+ StringRef expected_name,
+ double expected_value)
+{
+ ASSERT_NE(id_property, nullptr);
+ EXPECT_EQ(id_property->type, IDP_DOUBLE);
+ EXPECT_EQ(id_property->name, expected_name);
+ EXPECT_EQ(IDP_Double(id_property), expected_value);
+}
+
+static void test_idprop(const IDProperty *id_property,
+ StringRef expected_name,
+ const Vector<int32_t> &values)
+{
+ ASSERT_NE(id_property, nullptr);
+ EXPECT_EQ(id_property->type, IDP_ARRAY);
+ EXPECT_EQ(id_property->subtype, IDP_INT);
+ EXPECT_EQ(id_property->len, values.size());
+ EXPECT_EQ(id_property->name, expected_name);
+ int32_t *idprop_values = static_cast<int32_t *>(IDP_Array(id_property));
+ for (int i = 0; i < values.size(); i++) {
+ EXPECT_EQ(idprop_values[i], values[i]);
+ }
+}
+
+static void test_idprop(const IDProperty *id_property,
+ StringRef expected_name,
+ const Vector<float> &values)
+{
+ ASSERT_NE(id_property, nullptr);
+ EXPECT_EQ(id_property->type, IDP_ARRAY);
+ EXPECT_EQ(id_property->subtype, IDP_FLOAT);
+ EXPECT_EQ(id_property->len, values.size());
+ EXPECT_EQ(id_property->name, expected_name);
+ float *idprop_values = static_cast<float *>(IDP_Array(id_property));
+ for (int i = 0; i < values.size(); i++) {
+ EXPECT_EQ(idprop_values[i], values[i]);
+ }
+}
+
+static void test_idprop(const IDProperty *id_property,
+ StringRef expected_name,
+ const Vector<double> &values)
+{
+ ASSERT_NE(id_property, nullptr);
+ EXPECT_EQ(id_property->type, IDP_ARRAY);
+ EXPECT_EQ(id_property->subtype, IDP_DOUBLE);
+ EXPECT_EQ(id_property->len, values.size());
+ EXPECT_EQ(id_property->name, expected_name);
+ double *idprop_values = static_cast<double *>(IDP_Array(id_property));
+ for (int i = 0; i < values.size(); i++) {
+ EXPECT_EQ(idprop_values[i], values[i]);
+ }
+}
+
+template<typename Type>
+static void test_convert_idprop_from_value(StringRef input,
+ StringRef expected_name,
+ Type expected_value)
+{
+ std::unique_ptr<Value> value = parse_json(input);
+ IDProperty *id_property = convert_from_serialize_value(*value);
+ test_idprop(id_property, expected_name, expected_value);
+ IDP_FreeProperty(id_property);
+}
+
+TEST(idprop, convert_idp_string_from_value)
+{
+ test_convert_idprop_from_value(
+ R"([{"name":"MyStringName","type":"IDP_STRING","value":"MyString"}])",
+ "MyStringName",
+ "MyString");
+}
+
+TEST(idprop, convert_idp_int_from_value)
+{
+ test_convert_idprop_from_value(
+ R"([{"name":"MyIntegerName","type":"IDP_INT","value":42}])", "MyIntegerName", 42);
+}
+
+TEST(idprop, convert_idp_float_from_value)
+{
+ test_convert_idprop_from_value(
+ R"([{"name":"MyFloatName","type":"IDP_FLOAT","value":42.24}])", "MyFloatName", 42.24f);
+}
+
+TEST(idprop, convert_idp_double_from_value)
+{
+ test_convert_idprop_from_value(
+ R"([{"name":"MyDoubleName","type":"IDP_DOUBLE","value":42.24}])", "MyDoubleName", 42.24);
+}
+
+TEST(idprop, convert_idp_array_int_from_value)
+{
+ test_convert_idprop_from_value(
+ R"([{"name":"MyArrayName","type":"IDP_ARRAY","subtype":"IDP_INT","value":[42, 24, 35]}])",
+ "MyArrayName",
+ Vector<int32_t>{42, 24, 35});
+}
+
+TEST(idprop, convert_idp_array_float_from_value)
+{
+ test_convert_idprop_from_value(
+ R"([{"name":"MyArrayName","type":"IDP_ARRAY","subtype":"IDP_FLOAT","value":[42.0, 24.4, 35.2]}])",
+ "MyArrayName",
+ Vector<float>{42.0f, 24.4f, 35.2f});
+}
+
+TEST(idprop, convert_idp_array_double_from_value)
+{
+ test_convert_idprop_from_value(
+ R"([{"name":"MyArrayName","type":"IDP_ARRAY","subtype":"IDP_DOUBLE","value":[42.43,24.5,35.8]}])",
+ "MyArrayName",
+ Vector<double>{42.43, 24.5, 35.8});
+}
+
+TEST(idprop, convert_idp_multiple_from_value)
+{
+ static const std::string input_json =
+ R"([{"name":"MyIntegerName","type":"IDP_INT","value":42},{"name":"MyStringName","type":"IDP_STRING","value":"MyString"},{"name":"MyFloatName","type":"IDP_FLOAT","value":42.24},{"name":"MyDoubleName","type":"IDP_DOUBLE","value":42.24}])";
+ std::unique_ptr<Value> value = parse_json(input_json);
+
+ IDProperty *id_property = convert_from_serialize_value(*value);
+ IDProperty *id_property_1 = id_property;
+ ASSERT_NE(id_property_1, nullptr);
+ IDProperty *id_property_2 = id_property_1->next;
+ ASSERT_NE(id_property_2, nullptr);
+ IDProperty *id_property_3 = id_property_2->next;
+ ASSERT_NE(id_property_3, nullptr);
+ IDProperty *id_property_4 = id_property_3->next;
+ ASSERT_NE(id_property_4, nullptr);
+
+ EXPECT_EQ(id_property_1->prev, nullptr);
+ EXPECT_EQ(id_property_2->prev, id_property_1);
+ EXPECT_EQ(id_property_3->prev, id_property_2);
+ EXPECT_EQ(id_property_4->prev, id_property_3);
+ EXPECT_EQ(id_property_4->next, nullptr);
+
+ test_idprop(id_property_1, "MyIntegerName", 42);
+ test_idprop(id_property_2, "MyStringName", "MyString");
+ test_idprop(id_property_3, "MyFloatName", 42.24f);
+ test_idprop(id_property_4, "MyDoubleName", 42.24);
+
+ IDP_FreeProperty(id_property_1);
+ IDP_FreeProperty(id_property_2);
+ IDP_FreeProperty(id_property_3);
+ IDP_FreeProperty(id_property_4);
+}
+
+TEST(idprop, convert_idp_multiple_roundtrip)
+{
+ static const std::string input_json =
+ R"([{"name":"MyIntegerName","type":"IDP_INT","value":42},{"name":"MyStringName","type":"IDP_STRING","value":"MyString"},{"name":"MyFloatName","type":"IDP_FLOAT","value":42.2400016784668},{"name":"MyDoubleName","type":"IDP_DOUBLE","value":42.24}])";
+ std::unique_ptr<Value> value = parse_json(input_json);
+
+ IDProperty *id_property = convert_from_serialize_value(*value);
+ IDProperty *id_property_1 = id_property;
+ ASSERT_NE(id_property_1, nullptr);
+ IDProperty *id_property_2 = id_property_1->next;
+ ASSERT_NE(id_property_2, nullptr);
+ IDProperty *id_property_3 = id_property_2->next;
+ ASSERT_NE(id_property_3, nullptr);
+ IDProperty *id_property_4 = id_property_3->next;
+ ASSERT_NE(id_property_4, nullptr);
+
+ std::unique_ptr<Value> value_from_id_properties = convert_to_serialize_values(id_property);
+ std::string output_json = to_json(*value_from_id_properties);
+ EXPECT_EQ(input_json, output_json);
+
+ IDP_FreeProperty(id_property_1);
+ IDP_FreeProperty(id_property_2);
+ IDP_FreeProperty(id_property_3);
+ IDP_FreeProperty(id_property_4);
+}
+
+TEST(idprop, convert_idp_group_from_value)
+{
+ static const std::string input_json =
+ R"([{"name":"AssetMetaData.properties","type":"IDP_GROUP","value":[{"name":"dimensions","type":"IDP_ARRAY","subtype":"IDP_FLOAT","value":[2.0,2.0,2.0]}]}])";
+ std::unique_ptr<Value> value = parse_json(input_json);
+
+ IDProperty *id_property = convert_from_serialize_value(*value);
+ ASSERT_NE(id_property, nullptr);
+ EXPECT_EQ(id_property->type, IDP_GROUP);
+ EXPECT_EQ(BLI_listbase_count(&id_property->data.group), 1);
+
+ test_idprop(static_cast<IDProperty *>(id_property->data.group.first),
+ "dimensions",
+ Vector<float>{2.0f, 2.0f, 2.0f});
+
+ IDP_FreeProperty(id_property);
+}
+
+} // namespace blender::bke::idprop::tests
diff --git a/source/blender/blenkernel/intern/image.c b/source/blender/blenkernel/intern/image.c
index 2c0ba2694e5..040257fe976 100644
--- a/source/blender/blenkernel/intern/image.c
+++ b/source/blender/blenkernel/intern/image.c
@@ -21,6 +21,7 @@
* \ingroup bke
*/
+#include <ctype.h>
#include <fcntl.h>
#include <math.h>
#include <stdio.h>
@@ -84,6 +85,7 @@
#include "BKE_lib_id.h"
#include "BKE_main.h"
#include "BKE_node.h"
+#include "BKE_node_tree_update.h"
#include "BKE_packedFile.h"
#include "BKE_report.h"
#include "BKE_scene.h"
@@ -97,6 +99,7 @@
#include "SEQ_utils.h" /* SEQ_get_topmost_sequence() */
+#include "GPU_material.h"
#include "GPU_texture.h"
#include "BLI_sys_types.h" /* for intptr_t support */
@@ -118,7 +121,7 @@ static void image_init(Image *ima, short source, short type);
static void image_free_packedfiles(Image *ima);
static void copy_image_packedfiles(ListBase *lb_dst, const ListBase *lb_src);
-/* Reset runtime image fields when datablock is being initialized. */
+/* Reset runtime image fields when data-block is being initialized. */
static void image_runtime_reset(struct Image *image)
{
memset(&image->runtime, 0, sizeof(image->runtime));
@@ -126,7 +129,7 @@ static void image_runtime_reset(struct Image *image)
BLI_mutex_init(image->runtime.cache_mutex);
}
-/* Reset runtime image fields when datablock is being copied. */
+/* Reset runtime image fields when data-block is being copied. */
static void image_runtime_reset_on_copy(struct Image *image)
{
image->runtime.cache_mutex = MEM_mallocN(sizeof(ThreadMutex), "image runtime cache_mutex");
@@ -284,7 +287,33 @@ static void image_foreach_path(ID *id, BPathForeachPathData *bpath_data)
return;
}
- if (BKE_bpath_foreach_path_fixed_process(bpath_data, ima->filepath)) {
+ /* If this is a tiled image, and we're asked to resolve the tokens in the virtual
+ * filepath, use the first tile to generate a concrete path for use during processing. */
+ bool result = false;
+ if (ima->source == IMA_SRC_TILED && (flag & BKE_BPATH_FOREACH_PATH_RESOLVE_TOKEN) != 0) {
+ char temp_path[FILE_MAX], orig_file[FILE_MAXFILE];
+ BLI_strncpy(temp_path, ima->filepath, sizeof(temp_path));
+ BLI_split_file_part(temp_path, orig_file, sizeof(orig_file));
+
+ eUDIM_TILE_FORMAT tile_format;
+ char *udim_pattern = BKE_image_get_tile_strformat(temp_path, &tile_format);
+ BKE_image_set_filepath_from_tile_number(
+ temp_path, udim_pattern, tile_format, ((ImageTile *)ima->tiles.first)->tile_number);
+ MEM_SAFE_FREE(udim_pattern);
+
+ result = BKE_bpath_foreach_path_fixed_process(bpath_data, temp_path);
+ if (result) {
+ /* Put the filepath back together using the new directory and the original file name. */
+ char new_dir[FILE_MAXDIR];
+ BLI_split_dir_part(temp_path, new_dir, sizeof(new_dir));
+ BLI_join_dirfile(ima->filepath, sizeof(ima->filepath), new_dir, orig_file);
+ }
+ }
+ else {
+ result = BKE_bpath_foreach_path_fixed_process(bpath_data, ima->filepath);
+ }
+
+ if (result) {
if (flag & BKE_BPATH_FOREACH_PATH_RELOAD_EDITED) {
if (!BKE_image_has_packedfile(ima) &&
/* Image may have been painted onto (and not saved, T44543). */
@@ -900,9 +929,13 @@ Image *BKE_image_load(Main *bmain, const char *filepath)
/* exists? */
file = BLI_open(str, O_BINARY | O_RDONLY, 0);
if (file == -1) {
- return NULL;
+ if (!BKE_image_tile_filepath_exists(str)) {
+ return NULL;
+ }
+ }
+ else {
+ close(file);
}
- close(file);
ima = image_alloc(bmain, BLI_path_basename(filepath), IMA_SRC_FILE, IMA_TYPE_IMAGE);
STRNCPY(ima->filepath, filepath);
@@ -2076,9 +2109,10 @@ static void stampdata(
time_t t;
if (scene->r.stamp & R_STAMP_FILENAME) {
+ const char *blendfile_path = BKE_main_blendfile_path_from_global();
SNPRINTF(stamp_data->file,
do_prefix ? "File %s" : "%s",
- G.relbase_valid ? BKE_main_blendfile_path_from_global() : "<untitled>");
+ (blendfile_path[0] != '\0') ? blendfile_path : "<untitled>");
}
else {
stamp_data->file[0] = '\0';
@@ -2426,7 +2460,7 @@ void BKE_image_stamp_buf(Scene *scene,
/* and draw the text. */
BLF_position(mono, x, y + y_ofs, 0.0);
- BLF_draw_buffer(mono, stamp_data.file, BLF_DRAW_STR_DUMMY_MAX);
+ BLF_draw_buffer(mono, stamp_data.file, sizeof(stamp_data.file));
/* the extra pixel for background. */
y -= BUFF_MARGIN_Y * 2;
@@ -2449,7 +2483,7 @@ void BKE_image_stamp_buf(Scene *scene,
y + h + BUFF_MARGIN_Y);
BLF_position(mono, x, y + y_ofs, 0.0);
- BLF_draw_buffer(mono, stamp_data.date, BLF_DRAW_STR_DUMMY_MAX);
+ BLF_draw_buffer(mono, stamp_data.date, sizeof(stamp_data.date));
/* the extra pixel for background. */
y -= BUFF_MARGIN_Y * 2;
@@ -2472,7 +2506,7 @@ void BKE_image_stamp_buf(Scene *scene,
y + h + BUFF_MARGIN_Y);
BLF_position(mono, x, y + y_ofs, 0.0);
- BLF_draw_buffer(mono, stamp_data.rendertime, BLF_DRAW_STR_DUMMY_MAX);
+ BLF_draw_buffer(mono, stamp_data.rendertime, sizeof(stamp_data.rendertime));
/* the extra pixel for background. */
y -= BUFF_MARGIN_Y * 2;
@@ -2495,7 +2529,7 @@ void BKE_image_stamp_buf(Scene *scene,
y + h + BUFF_MARGIN_Y);
BLF_position(mono, x, y + y_ofs, 0.0);
- BLF_draw_buffer(mono, stamp_data.memory, BLF_DRAW_STR_DUMMY_MAX);
+ BLF_draw_buffer(mono, stamp_data.memory, sizeof(stamp_data.memory));
/* the extra pixel for background. */
y -= BUFF_MARGIN_Y * 2;
@@ -2518,7 +2552,7 @@ void BKE_image_stamp_buf(Scene *scene,
y + h + BUFF_MARGIN_Y);
BLF_position(mono, x, y + y_ofs, 0.0);
- BLF_draw_buffer(mono, stamp_data.hostname, BLF_DRAW_STR_DUMMY_MAX);
+ BLF_draw_buffer(mono, stamp_data.hostname, sizeof(stamp_data.hostname));
/* the extra pixel for background. */
y -= BUFF_MARGIN_Y * 2;
@@ -2542,7 +2576,7 @@ void BKE_image_stamp_buf(Scene *scene,
y + h + BUFF_MARGIN_Y);
BLF_position(mono, x, y + y_ofs + (h - h_fixed), 0.0);
- BLF_draw_buffer(mono, stamp_data.note, BLF_DRAW_STR_DUMMY_MAX);
+ BLF_draw_buffer(mono, stamp_data.note, sizeof(stamp_data.note));
}
BLF_disable(mono, BLF_WORD_WRAP);
@@ -2566,7 +2600,7 @@ void BKE_image_stamp_buf(Scene *scene,
/* and pad the text. */
BLF_position(mono, x, y + y_ofs, 0.0);
- BLF_draw_buffer(mono, stamp_data.marker, BLF_DRAW_STR_DUMMY_MAX);
+ BLF_draw_buffer(mono, stamp_data.marker, sizeof(stamp_data.marker));
/* space width. */
x += w + pad;
@@ -2589,7 +2623,7 @@ void BKE_image_stamp_buf(Scene *scene,
/* and pad the text. */
BLF_position(mono, x, y + y_ofs, 0.0);
- BLF_draw_buffer(mono, stamp_data.time, BLF_DRAW_STR_DUMMY_MAX);
+ BLF_draw_buffer(mono, stamp_data.time, sizeof(stamp_data.time));
/* space width. */
x += w + pad;
@@ -2611,7 +2645,7 @@ void BKE_image_stamp_buf(Scene *scene,
/* and pad the text. */
BLF_position(mono, x, y + y_ofs, 0.0);
- BLF_draw_buffer(mono, stamp_data.frame, BLF_DRAW_STR_DUMMY_MAX);
+ BLF_draw_buffer(mono, stamp_data.frame, sizeof(stamp_data.frame));
/* space width. */
x += w + pad;
@@ -2631,7 +2665,7 @@ void BKE_image_stamp_buf(Scene *scene,
x + w + BUFF_MARGIN_X,
y + h + BUFF_MARGIN_Y);
BLF_position(mono, x, y + y_ofs, 0.0);
- BLF_draw_buffer(mono, stamp_data.camera, BLF_DRAW_STR_DUMMY_MAX);
+ BLF_draw_buffer(mono, stamp_data.camera, sizeof(stamp_data.camera));
/* space width. */
x += w + pad;
@@ -2651,7 +2685,7 @@ void BKE_image_stamp_buf(Scene *scene,
x + w + BUFF_MARGIN_X,
y + h + BUFF_MARGIN_Y);
BLF_position(mono, x, y + y_ofs, 0.0);
- BLF_draw_buffer(mono, stamp_data.cameralens, BLF_DRAW_STR_DUMMY_MAX);
+ BLF_draw_buffer(mono, stamp_data.cameralens, sizeof(stamp_data.cameralens));
}
if (TEXT_SIZE_CHECK(stamp_data.scene, w, h)) {
@@ -2673,7 +2707,7 @@ void BKE_image_stamp_buf(Scene *scene,
/* and pad the text. */
BLF_position(mono, x, y + y_ofs, 0.0);
- BLF_draw_buffer(mono, stamp_data.scene, BLF_DRAW_STR_DUMMY_MAX);
+ BLF_draw_buffer(mono, stamp_data.scene, sizeof(stamp_data.scene));
}
if (TEXT_SIZE_CHECK(stamp_data.strip, w, h)) {
@@ -2695,7 +2729,7 @@ void BKE_image_stamp_buf(Scene *scene,
y + h + BUFF_MARGIN_Y);
BLF_position(mono, x, y + y_ofs, 0.0);
- BLF_draw_buffer(mono, stamp_data.strip, BLF_DRAW_STR_DUMMY_MAX);
+ BLF_draw_buffer(mono, stamp_data.strip, sizeof(stamp_data.strip));
}
/* cleanup the buffer. */
@@ -3376,6 +3410,23 @@ static void image_walk_ntree_all_users(
}
}
+static void image_walk_gpu_materials(
+ ID *id,
+ ListBase *gpu_materials,
+ void *customdata,
+ void callback(Image *ima, ID *iuser_id, ImageUser *iuser, void *customdata))
+{
+ LISTBASE_FOREACH (LinkData *, link, gpu_materials) {
+ GPUMaterial *gpu_material = (GPUMaterial *)link->data;
+ ListBase textures = GPU_material_textures(gpu_material);
+ LISTBASE_FOREACH (GPUMaterialTexture *, gpu_material_texture, &textures) {
+ if (gpu_material_texture->iuser_available) {
+ callback(gpu_material_texture->ima, id, &gpu_material_texture->iuser, customdata);
+ }
+ }
+ }
+}
+
static void image_walk_id_all_users(
ID *id,
bool skip_nested_nodes,
@@ -3395,6 +3446,7 @@ static void image_walk_id_all_users(
if (ma->nodetree && ma->use_nodes && !skip_nested_nodes) {
image_walk_ntree_all_users(ma->nodetree, &ma->id, customdata, callback);
}
+ image_walk_gpu_materials(id, &ma->gpumaterial, customdata, callback);
break;
}
case ID_LA: {
@@ -3409,6 +3461,7 @@ static void image_walk_id_all_users(
if (world->nodetree && world->use_nodes && !skip_nested_nodes) {
image_walk_ntree_all_users(world->nodetree, &world->id, customdata, callback);
}
+ image_walk_gpu_materials(id, &world->gpumaterial, customdata, callback);
break;
}
case ID_TE: {
@@ -3514,7 +3567,7 @@ static void image_tag_frame_recalc(Image *ima, ID *iuser_id, ImageUser *iuser, v
iuser->flag |= IMA_NEED_FRAME_RECALC;
if (iuser_id) {
- /* Must copy image user changes to CoW datablock. */
+ /* Must copy image user changes to CoW data-block. */
DEG_id_tag_update(iuser_id, ID_RECALC_COPY_ON_WRITE);
}
}
@@ -3529,7 +3582,7 @@ static void image_tag_reload(Image *ima, ID *iuser_id, ImageUser *iuser, void *c
image_update_views_format(ima, iuser);
}
if (iuser_id) {
- /* Must copy image user changes to CoW datablock. */
+ /* Must copy image user changes to CoW data-block. */
DEG_id_tag_update(iuser_id, ID_RECALC_COPY_ON_WRITE);
}
}
@@ -3691,6 +3744,43 @@ void BKE_image_signal(Main *bmain, Image *ima, ImageUser *iuser, int signal)
BKE_image_free_buffers(ima);
}
+ if (ima->source == IMA_SRC_TILED) {
+ ListBase new_tiles = {NULL, NULL};
+ int new_start, new_range;
+
+ char filepath[FILE_MAX];
+ BLI_strncpy(filepath, ima->filepath, sizeof(filepath));
+ BLI_path_abs(filepath, ID_BLEND_PATH_FROM_GLOBAL(&ima->id));
+ bool result = BKE_image_get_tile_info(filepath, &new_tiles, &new_start, &new_range);
+ if (result) {
+ /* Because the prior and new list of tiles are both sparse sequences, we need to be sure
+ * to account for how the two sets might or might not overlap. To be complete, we start
+ * the refresh process by clearing all existing tiles, stopping when there's only 1 tile
+ * left. */
+ while (BKE_image_remove_tile(ima, ima->tiles.last)) {
+ ;
+ }
+
+ int remaining_tile_number = ((ImageTile *)ima->tiles.first)->tile_number;
+ bool needs_final_cleanup = true;
+
+ /* Add in all the new tiles. */
+ LISTBASE_FOREACH (LinkData *, new_tile, &new_tiles) {
+ int new_tile_number = POINTER_AS_INT(new_tile->data);
+ BKE_image_add_tile(ima, new_tile_number, NULL);
+ if (new_tile_number == remaining_tile_number) {
+ needs_final_cleanup = false;
+ }
+ }
+
+ /* Final cleanup if the prior remaining tile was never encountered in the new list. */
+ if (needs_final_cleanup) {
+ BKE_image_remove_tile(ima, BKE_image_get_tile(ima, remaining_tile_number));
+ }
+ }
+ BLI_freelistN(&new_tiles);
+ }
+
if (iuser) {
image_tag_reload(ima, NULL, iuser, ima);
}
@@ -3712,16 +3802,8 @@ void BKE_image_signal(Main *bmain, Image *ima, ImageUser *iuser, int signal)
BLI_mutex_unlock(ima->runtime.cache_mutex);
- /* don't use notifiers because they are not 100% sure to succeeded
- * this also makes sure all scenes are accounted for. */
- {
- Scene *scene;
- for (scene = bmain->scenes.first; scene; scene = scene->id.next) {
- if (scene->nodetree) {
- nodeUpdateID(scene->nodetree, &ima->id);
- }
- }
- }
+ BKE_ntree_update_tag_id_changed(bmain, &ima->id);
+ BKE_ntree_update_main(bmain, NULL);
}
/* return renderpass for a given pass index and active view */
@@ -3782,6 +3864,57 @@ void BKE_image_get_tile_label(Image *ima, ImageTile *tile, char *label, int len_
}
}
+bool BKE_image_get_tile_info(char *filepath,
+ ListBase *udim_tiles,
+ int *udim_start,
+ int *udim_range)
+{
+ char filename[FILE_MAXFILE], dirname[FILE_MAXDIR];
+ BLI_split_dirfile(filepath, dirname, filename, sizeof(dirname), sizeof(filename));
+
+ BKE_image_ensure_tile_token(filename);
+
+ eUDIM_TILE_FORMAT tile_format;
+ char *udim_pattern = BKE_image_get_tile_strformat(filename, &tile_format);
+
+ bool is_udim = true;
+ int min_udim = IMA_UDIM_MAX + 1;
+ int max_udim = 0;
+ int id;
+
+ struct direntry *dir;
+ uint totfile = BLI_filelist_dir_contents(dirname, &dir);
+ for (int i = 0; i < totfile; i++) {
+ if (!(dir[i].type & S_IFREG)) {
+ continue;
+ }
+
+ if (!BKE_image_get_tile_number_from_filepath(dir[i].relname, udim_pattern, tile_format, &id)) {
+ continue;
+ }
+
+ if (id < 1001 || id > IMA_UDIM_MAX) {
+ is_udim = false;
+ break;
+ }
+
+ BLI_addtail(udim_tiles, BLI_genericNodeN(POINTER_FROM_INT(id)));
+ min_udim = min_ii(min_udim, id);
+ max_udim = max_ii(max_udim, id);
+ }
+ BLI_filelist_free(dir, totfile);
+ MEM_SAFE_FREE(udim_pattern);
+
+ if (is_udim && min_udim <= IMA_UDIM_MAX) {
+ BLI_join_dirfile(filepath, FILE_MAX, dirname, filename);
+
+ *udim_start = min_udim;
+ *udim_range = max_udim - min_udim + 1;
+ return true;
+ }
+ return false;
+}
+
ImageTile *BKE_image_add_tile(struct Image *ima, int tile_number, const char *label)
{
if (ima->source != IMA_SRC_TILED) {
@@ -3941,6 +4074,184 @@ bool BKE_image_fill_tile(struct Image *ima,
return false;
}
+void BKE_image_ensure_tile_token(char *filename)
+{
+ BLI_assert_msg(BLI_path_slash_find(filename) == NULL,
+ "Only the file-name component should be used!");
+
+ /* Is there a '<' character in the filename? Assume tokens already present. */
+ if (strstr(filename, "<") != NULL) {
+ return;
+ }
+
+ /* Is there a sequence of digits in the filename? */
+ ushort digits;
+ char head[FILE_MAX], tail[FILE_MAX];
+ BLI_path_sequence_decode(filename, head, tail, &digits);
+ if (digits == 4) {
+ sprintf(filename, "%s<UDIM>%s", head, tail);
+ return;
+ }
+
+ /* Is there a sequence like u##_v#### in the filename? */
+ uint cur = 0;
+ uint name_end = strlen(filename);
+ uint u_digits = 0;
+ uint v_digits = 0;
+ uint u_start = (uint)-1;
+ bool u_found = false;
+ bool v_found = false;
+ bool sep_found = false;
+ while (cur < name_end) {
+ if (filename[cur] == 'u') {
+ u_found = true;
+ u_digits = 0;
+ u_start = cur;
+ }
+ else if (filename[cur] == 'v') {
+ v_found = true;
+ v_digits = 0;
+ }
+ else if (u_found && !v_found) {
+ if (isdigit(filename[cur]) && u_digits < 2) {
+ u_digits++;
+ }
+ else if (filename[cur] == '_') {
+ sep_found = true;
+ }
+ else {
+ u_found = false;
+ }
+ }
+ else if (u_found && u_digits > 0 && v_found) {
+ if (isdigit(filename[cur])) {
+ if (v_digits < 4) {
+ v_digits++;
+ }
+ else {
+ u_found = false;
+ v_found = false;
+ }
+ }
+ else if (v_digits > 0) {
+ break;
+ }
+ }
+
+ cur++;
+ }
+
+ if (u_found && sep_found && v_found && (u_digits + v_digits > 1)) {
+ const char *token = "<UVTILE>";
+ const size_t token_length = strlen(token);
+ memmove(filename + u_start + token_length, filename + cur, name_end - cur);
+ memcpy(filename + u_start, token, token_length);
+ filename[u_start + token_length + (name_end - cur)] = '\0';
+ }
+}
+
+bool BKE_image_tile_filepath_exists(const char *filepath)
+{
+ BLI_assert(!BLI_path_is_rel(filepath));
+
+ char dirname[FILE_MAXDIR];
+ BLI_split_dir_part(filepath, dirname, sizeof(dirname));
+
+ eUDIM_TILE_FORMAT tile_format;
+ char *udim_pattern = BKE_image_get_tile_strformat(filepath, &tile_format);
+
+ bool found = false;
+ struct direntry *dir;
+ uint totfile = BLI_filelist_dir_contents(dirname, &dir);
+ for (int i = 0; i < totfile; i++) {
+ if (!(dir[i].type & S_IFREG)) {
+ continue;
+ }
+
+ int id;
+ if (!BKE_image_get_tile_number_from_filepath(dir[i].path, udim_pattern, tile_format, &id)) {
+ continue;
+ }
+
+ if (id < 1001 || id > IMA_UDIM_MAX) {
+ continue;
+ }
+
+ found = true;
+ break;
+ }
+ BLI_filelist_free(dir, totfile);
+ MEM_SAFE_FREE(udim_pattern);
+
+ return found;
+}
+
+char *BKE_image_get_tile_strformat(const char *filepath, eUDIM_TILE_FORMAT *r_tile_format)
+{
+ if (filepath == NULL || r_tile_format == NULL) {
+ return NULL;
+ }
+
+ if (strstr(filepath, "<UDIM>") != NULL) {
+ *r_tile_format = UDIM_TILE_FORMAT_UDIM;
+ return BLI_str_replaceN(filepath, "<UDIM>", "%d");
+ }
+ if (strstr(filepath, "<UVTILE>") != NULL) {
+ *r_tile_format = UDIM_TILE_FORMAT_UVTILE;
+ return BLI_str_replaceN(filepath, "<UVTILE>", "u%d_v%d");
+ }
+
+ *r_tile_format = UDIM_TILE_FORMAT_NONE;
+ return NULL;
+}
+
+bool BKE_image_get_tile_number_from_filepath(const char *filepath,
+ const char *pattern,
+ eUDIM_TILE_FORMAT tile_format,
+ int *r_tile_number)
+{
+ if (filepath == NULL || pattern == NULL || r_tile_number == NULL) {
+ return false;
+ }
+
+ int u, v;
+ bool result = false;
+
+ if (tile_format == UDIM_TILE_FORMAT_UDIM) {
+ if (sscanf(filepath, pattern, &u) == 1) {
+ *r_tile_number = u;
+ result = true;
+ }
+ }
+ else if (tile_format == UDIM_TILE_FORMAT_UVTILE) {
+ if (sscanf(filepath, pattern, &u, &v) == 2) {
+ *r_tile_number = 1001 + (u - 1) + ((v - 1) * 10);
+ result = true;
+ }
+ }
+
+ return result;
+}
+
+void BKE_image_set_filepath_from_tile_number(char *filepath,
+ const char *pattern,
+ eUDIM_TILE_FORMAT tile_format,
+ int tile_number)
+{
+ if (filepath == NULL || pattern == NULL) {
+ return;
+ }
+
+ if (tile_format == UDIM_TILE_FORMAT_UDIM) {
+ sprintf(filepath, pattern, tile_number);
+ }
+ else if (tile_format == UDIM_TILE_FORMAT_UVTILE) {
+ int u = ((tile_number - 1001) % 10);
+ int v = ((tile_number - 1001) / 10);
+ sprintf(filepath, pattern, u + 1, v + 1);
+ }
+}
+
/* if layer or pass changes, we need an index for the imbufs list */
/* note it is called for rendered results, but it doesn't use the index! */
RenderPass *BKE_image_multilayer_index(RenderResult *rr, ImageUser *iuser)
@@ -5476,7 +5787,7 @@ static void image_user_id_has_animation(Image *ima,
bool BKE_image_user_id_has_animation(ID *id)
{
/* For the dependency graph, this does not consider nested node
- * trees as these are handled as their own datablock. */
+ * trees as these are handled as their own data-block. */
bool has_animation = false;
bool skip_nested_nodes = true;
image_walk_id_all_users(id, skip_nested_nodes, &has_animation, image_user_id_has_animation);
@@ -5513,6 +5824,11 @@ void BKE_image_user_id_eval_animation(Depsgraph *depsgraph, ID *id)
void BKE_image_user_file_path(ImageUser *iuser, Image *ima, char *filepath)
{
+ BKE_image_user_file_path_ex(iuser, ima, filepath, true);
+}
+
+void BKE_image_user_file_path_ex(ImageUser *iuser, Image *ima, char *filepath, bool resolve_udim)
+{
if (BKE_image_is_multiview(ima)) {
ImageView *iv = BLI_findlink(&ima->views, iuser->view);
if (iv->filepath[0]) {
@@ -5533,13 +5849,17 @@ void BKE_image_user_file_path(ImageUser *iuser, Image *ima, char *filepath)
int index;
if (ima->source == IMA_SRC_SEQUENCE) {
index = iuser ? iuser->framenr : ima->lastframe;
+ BLI_path_sequence_decode(filepath, head, tail, &numlen);
+ BLI_path_sequence_encode(filepath, head, tail, numlen, index);
}
- else {
+ else if (resolve_udim) {
index = image_get_tile_number_from_iuser(ima, iuser);
- }
- BLI_path_sequence_decode(filepath, head, tail, &numlen);
- BLI_path_sequence_encode(filepath, head, tail, numlen, index);
+ eUDIM_TILE_FORMAT tile_format;
+ char *udim_pattern = BKE_image_get_tile_strformat(filepath, &tile_format);
+ BKE_image_set_filepath_from_tile_number(filepath, udim_pattern, tile_format, index);
+ MEM_SAFE_FREE(udim_pattern);
+ }
}
BLI_path_abs(filepath, ID_BLEND_PATH_FROM_GLOBAL(&ima->id));
diff --git a/source/blender/blenkernel/intern/image_gpu.cc b/source/blender/blenkernel/intern/image_gpu.cc
index 3b64790f7b8..5fe6cb93435 100644
--- a/source/blender/blenkernel/intern/image_gpu.cc
+++ b/source/blender/blenkernel/intern/image_gpu.cc
@@ -169,7 +169,7 @@ static GPUTexture *gpu_texture_create_tile_array(Image *ima,
ImBuf *ibuf = BKE_image_acquire_ibuf(ima, &iuser, nullptr);
if (ibuf) {
- PackTile *packtile = (PackTile *)MEM_callocN(sizeof(PackTile), __func__);
+ PackTile *packtile = MEM_cnew<PackTile>(__func__);
packtile->tile = tile;
packtile->boxpack.w = ibuf->x;
packtile->boxpack.h = ibuf->y;
diff --git a/source/blender/blenkernel/intern/image_save.c b/source/blender/blenkernel/intern/image_save.c
index f93ede517a9..329bc7b498b 100644
--- a/source/blender/blenkernel/intern/image_save.c
+++ b/source/blender/blenkernel/intern/image_save.c
@@ -30,6 +30,8 @@
#include "DNA_image_types.h"
+#include "MEM_guardedalloc.h"
+
#include "IMB_colormanagement.h"
#include "IMB_imbuf.h"
#include "IMB_imbuf_types.h"
@@ -402,15 +404,17 @@ bool BKE_image_save(
bool colorspace_changed = false;
+ eUDIM_TILE_FORMAT tile_format;
+ char *udim_pattern = NULL;
+
if (ima->source == IMA_SRC_TILED) {
- /* Verify filepath for tiles images. */
- ImageTile *first_tile = ima->tiles.first;
- if (BLI_path_sequence_decode(opts->filepath, NULL, NULL, NULL) != first_tile->tile_number) {
+ /* Verify filepath for tiled images contains a valid UDIM marker. */
+ udim_pattern = BKE_image_get_tile_strformat(opts->filepath, &tile_format);
+ if (tile_format == UDIM_TILE_FORMAT_NONE) {
BKE_reportf(reports,
RPT_ERROR,
- "When saving a tiled image, the path '%s' must contain the UDIM tile number %d",
- opts->filepath,
- first_tile->tile_number);
+ "When saving a tiled image, the path '%s' must contain a valid UDIM marker",
+ opts->filepath);
return false;
}
@@ -420,36 +424,29 @@ bool BKE_image_save(
}
}
- /* Save image - or, for tiled images, the first tile. */
- bool ok = image_save_single(reports, ima, iuser, opts, &colorspace_changed);
-
- if (ok && ima->source == IMA_SRC_TILED) {
+ /* Save images */
+ bool ok = false;
+ if (ima->source != IMA_SRC_TILED) {
+ ok = image_save_single(reports, ima, iuser, opts, &colorspace_changed);
+ }
+ else {
char filepath[FILE_MAX];
BLI_strncpy(filepath, opts->filepath, sizeof(filepath));
- char head[FILE_MAX], tail[FILE_MAX];
- unsigned short numlen;
- BLI_path_sequence_decode(filepath, head, tail, &numlen);
-
- /* Save all other tiles. */
- int index;
- LISTBASE_FOREACH_INDEX (ImageTile *, tile, &ima->tiles, index) {
- /* First tile was already saved before the loop. */
- if (index == 0) {
- continue;
- }
+ /* Save all the tiles. */
+ LISTBASE_FOREACH (ImageTile *, tile, &ima->tiles) {
+ BKE_image_set_filepath_from_tile_number(
+ opts->filepath, udim_pattern, tile_format, tile->tile_number);
+ iuser->tile = tile->tile_number;
+ ok = image_save_single(reports, ima, iuser, opts, &colorspace_changed);
if (!ok) {
- continue;
+ break;
}
-
- /* Build filepath of the tile. */
- BLI_path_sequence_encode(opts->filepath, head, tail, numlen, tile->tile_number);
-
- iuser->tile = tile->tile_number;
- ok = ok && image_save_single(reports, ima, iuser, opts, &colorspace_changed);
}
+ BLI_strncpy(ima->filepath, filepath, sizeof(ima->filepath));
BLI_strncpy(opts->filepath, filepath, sizeof(opts->filepath));
+ MEM_freeN(udim_pattern);
}
if (colorspace_changed) {
diff --git a/source/blender/blenkernel/intern/key.c b/source/blender/blenkernel/intern/key.c
index ff199794ab3..0df493e28c0 100644
--- a/source/blender/blenkernel/intern/key.c
+++ b/source/blender/blenkernel/intern/key.c
@@ -2230,13 +2230,20 @@ void BKE_keyblock_mesh_calc_normals(struct KeyBlock *kb,
r_polynors = MEM_mallocN(sizeof(float[3]) * me.totpoly, __func__);
free_polynors = true;
}
- BKE_mesh_calc_normals_poly_and_vertex(
- me.mvert, me.totvert, me.mloop, me.totloop, me.mpoly, me.totpoly, r_polynors, r_vertnors);
+
+ const float(*vert_normals)[3] = BKE_mesh_vertex_normals_ensure(mesh);
+ if (r_vertnors) {
+ memcpy(r_vertnors, vert_normals, sizeof(float[3]) * me.totvert);
+ }
+
+ const float(*face_normals)[3] = BKE_mesh_poly_normals_ensure(mesh);
+ memcpy(r_polynors, face_normals, sizeof(float[3]) * me.totpoly);
if (r_loopnors) {
short(*clnors)[2] = CustomData_get_layer(&mesh->ldata, CD_CUSTOMLOOPNORMAL); /* May be NULL. */
BKE_mesh_normals_loop_split(me.mvert,
+ vert_normals,
me.totvert,
me.medge,
me.totedge,
@@ -2244,7 +2251,7 @@ void BKE_keyblock_mesh_calc_normals(struct KeyBlock *kb,
r_loopnors,
me.totloop,
me.mpoly,
- r_polynors,
+ face_normals,
me.totpoly,
(me.flag & ME_AUTOSMOOTH) != 0,
me.smoothresh,
diff --git a/source/blender/blenkernel/intern/lattice.c b/source/blender/blenkernel/intern/lattice.c
index af0d91d29fc..2f5c5d0a0d5 100644
--- a/source/blender/blenkernel/intern/lattice.c
+++ b/source/blender/blenkernel/intern/lattice.c
@@ -466,7 +466,7 @@ void outside_lattice(Lattice *lt)
bp->hide = 1;
bp->f1 &= ~SELECT;
- /* u extrema */
+ /* U extrema. */
bp1 = latt_bp(lt, 0, v, w);
bp2 = latt_bp(lt, lt->pntsu - 1, v, w);
@@ -475,7 +475,7 @@ void outside_lattice(Lattice *lt)
bp->vec[1] = (1.0f - fac1) * bp1->vec[1] + fac1 * bp2->vec[1];
bp->vec[2] = (1.0f - fac1) * bp1->vec[2] + fac1 * bp2->vec[2];
- /* v extrema */
+ /* V extrema. */
bp1 = latt_bp(lt, u, 0, w);
bp2 = latt_bp(lt, u, lt->pntsv - 1, w);
@@ -484,7 +484,7 @@ void outside_lattice(Lattice *lt)
bp->vec[1] += (1.0f - fac1) * bp1->vec[1] + fac1 * bp2->vec[1];
bp->vec[2] += (1.0f - fac1) * bp1->vec[2] + fac1 * bp2->vec[2];
- /* w extrema */
+ /* W extrema. */
bp1 = latt_bp(lt, u, v, 0);
bp2 = latt_bp(lt, u, v, lt->pntsw - 1);
diff --git a/source/blender/blenkernel/intern/layer.c b/source/blender/blenkernel/intern/layer.c
index 1484d35f28a..a59dd6f2e0e 100644
--- a/source/blender/blenkernel/intern/layer.c
+++ b/source/blender/blenkernel/intern/layer.c
@@ -343,7 +343,7 @@ ViewLayer *BKE_view_layer_find_from_collection(const Scene *scene, LayerCollecti
/* Base */
-static void view_layer_bases_hash_create(ViewLayer *view_layer)
+static void view_layer_bases_hash_create(ViewLayer *view_layer, const bool do_base_duplicates_fix)
{
static ThreadMutex hash_lock = BLI_MUTEX_INITIALIZER;
@@ -353,15 +353,29 @@ static void view_layer_bases_hash_create(ViewLayer *view_layer)
if (view_layer->object_bases_hash == NULL) {
GHash *hash = BLI_ghash_new(BLI_ghashutil_ptrhash, BLI_ghashutil_ptrcmp, __func__);
- LISTBASE_FOREACH (Base *, base, &view_layer->object_bases) {
+ LISTBASE_FOREACH_MUTABLE (Base *, base, &view_layer->object_bases) {
if (base->object) {
- /* Some processes, like ID remapping, may lead to having several bases with the same
- * object. So just take the first one here, and ignore all others
- * (#BKE_layer_collection_sync will clean this up anyway). */
void **val_pp;
if (!BLI_ghash_ensure_p(hash, base->object, &val_pp)) {
*val_pp = base;
}
+ /* The same object has several bases.
+ *
+ * In normal cases this is a serious bug, but this is a common situation when remapping
+ * an object into another one already present in the same View Layer. While ideally we
+ * would process this case separately, for performances reasons it makes more sense to
+ * tackle it here. */
+ else if (do_base_duplicates_fix) {
+ if (view_layer->basact == base) {
+ view_layer->basact = NULL;
+ }
+ BLI_freelinkN(&view_layer->object_bases, base);
+ }
+ else {
+ CLOG_FATAL(&LOG,
+ "Object '%s' has more than one entry in view layer's object bases listbase",
+ base->object->id.name + 2);
+ }
}
}
@@ -376,7 +390,7 @@ static void view_layer_bases_hash_create(ViewLayer *view_layer)
Base *BKE_view_layer_base_find(ViewLayer *view_layer, Object *ob)
{
if (!view_layer->object_bases_hash) {
- view_layer_bases_hash_create(view_layer);
+ view_layer_bases_hash_create(view_layer, false);
}
return BLI_ghash_lookup(view_layer->object_bases_hash, ob);
@@ -1182,6 +1196,23 @@ static bool view_layer_objects_base_cache_validate(ViewLayer *UNUSED(view_layer)
}
#endif
+void BKE_layer_collection_doversion_2_80(const Scene *scene, ViewLayer *view_layer)
+{
+ LayerCollection *first_layer_collection = view_layer->layer_collections.first;
+ if (BLI_listbase_count_at_most(&view_layer->layer_collections, 2) > 1 ||
+ first_layer_collection->collection != scene->master_collection) {
+ /* In some cases (from older files) we do have a master collection, but no matching layer,
+ * instead all the children of the master collection have their layer collections in the
+ * viewlayer's list. This is not a valid situation, add a layer for the master collection and
+ * add all existing first-level layers as children of that new master layer. */
+ ListBase layer_collections = view_layer->layer_collections;
+ BLI_listbase_clear(&view_layer->layer_collections);
+ LayerCollection *master_layer_collection = layer_collection_add(&view_layer->layer_collections,
+ scene->master_collection);
+ master_layer_collection->layer_collections = layer_collections;
+ }
+}
+
void BKE_layer_collection_sync(const Scene *scene, ViewLayer *view_layer)
{
if (no_resync) {
@@ -1193,18 +1224,32 @@ void BKE_layer_collection_sync(const Scene *scene, ViewLayer *view_layer)
return;
}
- /* In some cases (from older files) we do have a master collection, yet no matching layer. Create
- * the master one here, so that the rest of the code can work as expected. */
if (BLI_listbase_is_empty(&view_layer->layer_collections)) {
+ /* In some cases (from older files, or when creating a new ViewLayer from
+ * #BKE_view_layer_add), we do have a master collection, yet no matching layer. Create the
+ * master one here, so that the rest of the code can work as expected. */
layer_collection_add(&view_layer->layer_collections, scene->master_collection);
}
+#ifndef NDEBUG
+ {
+ BLI_assert_msg(BLI_listbase_count_at_most(&view_layer->layer_collections, 2) == 1,
+ "ViewLayer's first level of children layer collections should always have "
+ "exactly one item");
+
+ LayerCollection *first_layer_collection = view_layer->layer_collections.first;
+ BLI_assert_msg(first_layer_collection->collection == scene->master_collection,
+ "ViewLayer's first layer collection should always be the one for the scene's "
+ "master collection");
+ }
+#endif
+
/* Free cache. */
MEM_SAFE_FREE(view_layer->object_bases_array);
/* Create object to base hash if it does not exist yet. */
if (!view_layer->object_bases_hash) {
- view_layer_bases_hash_create(view_layer);
+ view_layer_bases_hash_create(view_layer, false);
}
/* Clear visible and selectable flags to be reset. */
@@ -1317,6 +1362,11 @@ void BKE_main_collection_sync_remap(const Main *bmain)
if (view_layer->object_bases_hash) {
BLI_ghash_free(view_layer->object_bases_hash, NULL, NULL);
view_layer->object_bases_hash = NULL;
+
+ /* Directly re-create the mapping here, so that we can also deal with duplicates in
+ * `view_layer->object_bases` list of bases properly. This is the only place where such
+ * duplicates should be fixed, and not considered as a critical error. */
+ view_layer_bases_hash_create(view_layer, true);
}
}
diff --git a/source/blender/blenkernel/intern/lib_id.c b/source/blender/blenkernel/intern/lib_id.c
index 692e27731c5..49a518607f1 100644
--- a/source/blender/blenkernel/intern/lib_id.c
+++ b/source/blender/blenkernel/intern/lib_id.c
@@ -454,38 +454,57 @@ static void lib_id_copy_ensure_local(Main *bmain, const ID *old_id, ID *new_id,
}
}
-void BKE_lib_id_make_local_generic(Main *bmain, ID *id, const int flags)
+void BKE_lib_id_make_local_generic_action_define(
+ struct Main *bmain, struct ID *id, int flags, bool *r_force_local, bool *r_force_copy)
{
- if (!ID_IS_LINKED(id)) {
- return;
- }
-
- const bool lib_local = (flags & LIB_ID_MAKELOCAL_FULL_LIBRARY) != 0;
bool force_local = (flags & LIB_ID_MAKELOCAL_FORCE_LOCAL) != 0;
bool force_copy = (flags & LIB_ID_MAKELOCAL_FORCE_COPY) != 0;
BLI_assert(force_copy == false || force_copy != force_local);
+ if (force_local || force_copy) {
+ /* Already set by caller code, nothing to do here. */
+ *r_force_local = force_local;
+ *r_force_copy = force_copy;
+ return;
+ }
+
+ const bool lib_local = (flags & LIB_ID_MAKELOCAL_FULL_LIBRARY) != 0;
bool is_local = false, is_lib = false;
- /* - only lib users: do nothing (unless force_local is set)
- * - only local users: set flag
+ /* - no user (neither lib nor local): make local (happens e.g. with UI-used only data).
+ * - only lib users: do nothing (unless force_local is set)
+ * - only local users: make local
* - mixed: make copy
* In case we make a whole lib's content local,
* we always want to localize, and we skip remapping (done later).
*/
- if (!force_copy && !force_local) {
- BKE_library_ID_test_usages(bmain, id, &is_local, &is_lib);
- if (lib_local || is_local) {
- if (!is_lib) {
- force_local = true;
- }
- else {
- force_copy = true;
- }
+ BKE_library_ID_test_usages(bmain, id, &is_local, &is_lib);
+ if (!lib_local && !is_local && !is_lib) {
+ force_local = true;
+ }
+ else if (lib_local || is_local) {
+ if (!is_lib) {
+ force_local = true;
+ }
+ else {
+ force_copy = true;
}
}
+ *r_force_local = force_local;
+ *r_force_copy = force_copy;
+}
+
+void BKE_lib_id_make_local_generic(Main *bmain, ID *id, const int flags)
+{
+ if (!ID_IS_LINKED(id)) {
+ return;
+ }
+
+ bool force_local, force_copy;
+ BKE_lib_id_make_local_generic_action_define(bmain, id, flags, &force_local, &force_copy);
+
if (force_local) {
BKE_lib_id_clear_library_data(bmain, id, flags);
BKE_lib_id_expand_local(bmain, id, flags);
@@ -516,6 +535,7 @@ void BKE_lib_id_make_local_generic(Main *bmain, ID *id, const int flags)
}
}
+ const bool lib_local = (flags & LIB_ID_MAKELOCAL_FULL_LIBRARY) != 0;
if (!lib_local) {
BKE_libblock_remap(bmain, id, id_new, ID_REMAP_SKIP_INDIRECT_USAGE);
}
diff --git a/source/blender/blenkernel/intern/lib_id_delete.c b/source/blender/blenkernel/intern/lib_id_delete.c
index 1922a54addb..f4dd67cac28 100644
--- a/source/blender/blenkernel/intern/lib_id_delete.c
+++ b/source/blender/blenkernel/intern/lib_id_delete.c
@@ -154,7 +154,10 @@ void BKE_id_free_ex(Main *bmain, void *idv, int flag, const bool use_flag_from_i
}
if (remap_editor_id_reference_cb) {
- remap_editor_id_reference_cb(id, NULL);
+ struct IDRemapper *remapper = BKE_id_remapper_create();
+ BKE_id_remapper_add(remapper, id, NULL);
+ remap_editor_id_reference_cb(remapper);
+ BKE_id_remapper_free(remapper);
}
}
@@ -292,32 +295,40 @@ static size_t id_delete(Main *bmain, const bool do_tagged_deletion)
* Note that we go forward here, since we want to check dependencies before users
* (e.g. meshes before objects).
* Avoids to have to loop twice. */
+ struct IDRemapper *remapper = BKE_id_remapper_create();
for (i = 0; i < base_count; i++) {
ListBase *lb = lbarray[i];
ID *id, *id_next;
+ BKE_id_remapper_clear(remapper);
for (id = lb->first; id; id = id_next) {
id_next = id->next;
/* NOTE: in case we delete a library, we also delete all its datablocks! */
if ((id->tag & tag) || (id->lib != NULL && (id->lib->id.tag & tag))) {
id->tag |= tag;
-
- /* Will tag 'never NULL' users of this ID too.
- * Note that we cannot use BKE_libblock_unlink() here, since it would ignore indirect
- * (and proxy!) links, this can lead to nasty crashing here in second,
- * actual deleting loop.
- * Also, this will also flag users of deleted data that cannot be unlinked
- * (object using deleted obdata, etc.), so that they also get deleted. */
- BKE_libblock_remap_locked(bmain,
- id,
- NULL,
- (ID_REMAP_FLAG_NEVER_NULL_USAGE |
- ID_REMAP_FORCE_NEVER_NULL_USAGE |
- ID_REMAP_FORCE_INTERNAL_RUNTIME_POINTERS));
+ BKE_id_remapper_add(remapper, id, NULL);
}
}
+
+ if (BKE_id_remapper_is_empty(remapper)) {
+ continue;
+ }
+
+ /* Will tag 'never NULL' users of this ID too.
+ * Note that we cannot use BKE_libblock_unlink() here, since it would ignore indirect
+ * (and proxy!) links, this can lead to nasty crashing here in second,
+ * actual deleting loop.
+ * Also, this will also flag users of deleted data that cannot be unlinked
+ * (object using deleted obdata, etc.), so that they also get deleted. */
+ BKE_libblock_remap_multiple_locked(bmain,
+ remapper,
+ (ID_REMAP_FLAG_NEVER_NULL_USAGE |
+ ID_REMAP_FORCE_NEVER_NULL_USAGE |
+ ID_REMAP_FORCE_INTERNAL_RUNTIME_POINTERS));
}
+ BKE_id_remapper_free(remapper);
}
+
BKE_main_unlock(bmain);
/* In usual reversed order, such that all usage of a given ID, even 'never NULL' ones,
@@ -350,6 +361,9 @@ static size_t id_delete(Main *bmain, const bool do_tagged_deletion)
void BKE_id_delete(Main *bmain, void *idv)
{
+ BLI_assert_msg((((ID *)idv)->tag & LIB_TAG_NO_MAIN) == 0,
+ "Cannot be used with IDs outside of Main");
+
BKE_main_id_tag_all(bmain, LIB_TAG_DOIT, false);
((ID *)idv)->tag |= LIB_TAG_DOIT;
diff --git a/source/blender/blenkernel/intern/lib_id_remapper.cc b/source/blender/blenkernel/intern/lib_id_remapper.cc
new file mode 100644
index 00000000000..c1734c9826a
--- /dev/null
+++ b/source/blender/blenkernel/intern/lib_id_remapper.cc
@@ -0,0 +1,175 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2022 by Blender Foundation.
+ */
+
+#include "DNA_ID.h"
+
+#include "BKE_idtype.h"
+#include "BKE_lib_id.h"
+#include "BKE_lib_remap.h"
+
+#include "MEM_guardedalloc.h"
+
+#include "BLI_map.hh"
+
+using IDTypeFilter = uint64_t;
+
+namespace blender::bke::id::remapper {
+struct IDRemapper {
+ private:
+ Map<ID *, ID *> mappings;
+ IDTypeFilter source_types = 0;
+
+ public:
+ void clear()
+ {
+ mappings.clear();
+ source_types = 0;
+ }
+
+ bool is_empty() const
+ {
+ return mappings.is_empty();
+ }
+
+ void add(ID *old_id, ID *new_id)
+ {
+ BLI_assert(old_id != nullptr);
+ BLI_assert(new_id == nullptr || (GS(old_id->name) == GS(new_id->name)));
+ mappings.add(old_id, new_id);
+ source_types |= BKE_idtype_idcode_to_idfilter(GS(old_id->name));
+ }
+
+ bool contains_mappings_for_any(IDTypeFilter filter) const
+ {
+ return (source_types & filter) != 0;
+ }
+
+ IDRemapperApplyResult apply(ID **r_id_ptr, IDRemapperApplyOptions options) const
+ {
+ BLI_assert(r_id_ptr != nullptr);
+ if (*r_id_ptr == nullptr) {
+ return ID_REMAP_RESULT_SOURCE_NOT_MAPPABLE;
+ }
+
+ if (!mappings.contains(*r_id_ptr)) {
+ return ID_REMAP_RESULT_SOURCE_UNAVAILABLE;
+ }
+
+ if (options & ID_REMAP_APPLY_UPDATE_REFCOUNT) {
+ id_us_min(*r_id_ptr);
+ }
+
+ *r_id_ptr = mappings.lookup(*r_id_ptr);
+ if (*r_id_ptr == nullptr) {
+ return ID_REMAP_RESULT_SOURCE_UNASSIGNED;
+ }
+
+ if (options & ID_REMAP_APPLY_UPDATE_REFCOUNT) {
+ id_us_plus(*r_id_ptr);
+ }
+
+ if (options & ID_REMAP_APPLY_ENSURE_REAL) {
+ id_us_ensure_real(*r_id_ptr);
+ }
+ return ID_REMAP_RESULT_SOURCE_REMAPPED;
+ }
+
+ void iter(IDRemapperIterFunction func, void *user_data) const
+ {
+ for (auto item : mappings.items()) {
+ func(item.key, item.value, user_data);
+ }
+ }
+};
+
+} // namespace blender::bke::id::remapper
+
+/** \brief wrap CPP IDRemapper to a C handle. */
+static IDRemapper *wrap(blender::bke::id::remapper::IDRemapper *remapper)
+{
+ return static_cast<IDRemapper *>(static_cast<void *>(remapper));
+}
+
+/** \brief wrap C handle to a CPP IDRemapper. */
+static blender::bke::id::remapper::IDRemapper *unwrap(IDRemapper *remapper)
+{
+ return static_cast<blender::bke::id::remapper::IDRemapper *>(static_cast<void *>(remapper));
+}
+
+/** \brief wrap C handle to a CPP IDRemapper. */
+static const blender::bke::id::remapper::IDRemapper *unwrap(const IDRemapper *remapper)
+{
+ return static_cast<const blender::bke::id::remapper::IDRemapper *>(
+ static_cast<const void *>(remapper));
+}
+
+extern "C" {
+
+IDRemapper *BKE_id_remapper_create(void)
+{
+ blender::bke::id::remapper::IDRemapper *remapper =
+ MEM_new<blender::bke::id::remapper::IDRemapper>(__func__);
+ return wrap(remapper);
+}
+
+void BKE_id_remapper_free(IDRemapper *id_remapper)
+{
+ blender::bke::id::remapper::IDRemapper *remapper = unwrap(id_remapper);
+ MEM_delete<blender::bke::id::remapper::IDRemapper>(remapper);
+}
+
+void BKE_id_remapper_clear(struct IDRemapper *id_remapper)
+{
+ blender::bke::id::remapper::IDRemapper *remapper = unwrap(id_remapper);
+ remapper->clear();
+}
+
+bool BKE_id_remapper_is_empty(const struct IDRemapper *id_remapper)
+{
+ const blender::bke::id::remapper::IDRemapper *remapper = unwrap(id_remapper);
+ return remapper->is_empty();
+}
+
+void BKE_id_remapper_add(IDRemapper *id_remapper, ID *old_id, ID *new_id)
+{
+ blender::bke::id::remapper::IDRemapper *remapper = unwrap(id_remapper);
+ remapper->add(old_id, new_id);
+}
+
+bool BKE_id_remapper_has_mapping_for(const struct IDRemapper *id_remapper, uint64_t type_filter)
+{
+ const blender::bke::id::remapper::IDRemapper *remapper = unwrap(id_remapper);
+ return remapper->contains_mappings_for_any(type_filter);
+}
+
+IDRemapperApplyResult BKE_id_remapper_apply(const IDRemapper *id_remapper,
+ ID **r_id_ptr,
+ const IDRemapperApplyOptions options)
+{
+ const blender::bke::id::remapper::IDRemapper *remapper = unwrap(id_remapper);
+ return remapper->apply(r_id_ptr, options);
+}
+
+void BKE_id_remapper_iter(const struct IDRemapper *id_remapper,
+ IDRemapperIterFunction func,
+ void *user_data)
+{
+ const blender::bke::id::remapper::IDRemapper *remapper = unwrap(id_remapper);
+ remapper->iter(func, user_data);
+}
+}
diff --git a/source/blender/blenkernel/intern/lib_id_remapper_test.cc b/source/blender/blenkernel/intern/lib_id_remapper_test.cc
new file mode 100644
index 00000000000..594f64dac73
--- /dev/null
+++ b/source/blender/blenkernel/intern/lib_id_remapper_test.cc
@@ -0,0 +1,83 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2022 by Blender Foundation.
+ */
+
+#include "testing/testing.h"
+
+#include "BKE_lib_remap.h"
+
+#include "BLI_string.h"
+
+#include "DNA_ID.h"
+
+namespace blender::bke::id::remapper::tests {
+
+TEST(lib_id_remapper, unavailable)
+{
+ ID id1;
+ ID *idp = &id1;
+
+ IDRemapper *remapper = BKE_id_remapper_create();
+ IDRemapperApplyResult result = BKE_id_remapper_apply(remapper, &idp, ID_REMAP_APPLY_DEFAULT);
+ EXPECT_EQ(result, ID_REMAP_RESULT_SOURCE_UNAVAILABLE);
+
+ BKE_id_remapper_free(remapper);
+}
+
+TEST(lib_id_remapper, not_mappable)
+{
+ ID *idp = nullptr;
+
+ IDRemapper *remapper = BKE_id_remapper_create();
+ IDRemapperApplyResult result = BKE_id_remapper_apply(remapper, &idp, ID_REMAP_APPLY_DEFAULT);
+ EXPECT_EQ(result, ID_REMAP_RESULT_SOURCE_NOT_MAPPABLE);
+
+ BKE_id_remapper_free(remapper);
+}
+
+TEST(lib_id_remapper, mapped)
+{
+ ID id1;
+ ID id2;
+ ID *idp = &id1;
+ BLI_strncpy(id1.name, "OB1", sizeof(id1.name));
+ BLI_strncpy(id2.name, "OB2", sizeof(id2.name));
+
+ IDRemapper *remapper = BKE_id_remapper_create();
+ BKE_id_remapper_add(remapper, &id1, &id2);
+ IDRemapperApplyResult result = BKE_id_remapper_apply(remapper, &idp, ID_REMAP_APPLY_DEFAULT);
+ EXPECT_EQ(result, ID_REMAP_RESULT_SOURCE_REMAPPED);
+ EXPECT_EQ(idp, &id2);
+
+ BKE_id_remapper_free(remapper);
+}
+
+TEST(lib_id_remapper, unassigned)
+{
+ ID id1;
+ ID *idp = &id1;
+
+ IDRemapper *remapper = BKE_id_remapper_create();
+ BKE_id_remapper_add(remapper, &id1, nullptr);
+ IDRemapperApplyResult result = BKE_id_remapper_apply(remapper, &idp, ID_REMAP_APPLY_DEFAULT);
+ EXPECT_EQ(result, ID_REMAP_RESULT_SOURCE_UNASSIGNED);
+ EXPECT_EQ(idp, nullptr);
+
+ BKE_id_remapper_free(remapper);
+}
+
+} // namespace blender::bke::id::remapper::tests
diff --git a/source/blender/blenkernel/intern/lib_override.c b/source/blender/blenkernel/intern/lib_override.c
index 52bfeb4b4d3..d1375b1e5b5 100644
--- a/source/blender/blenkernel/intern/lib_override.c
+++ b/source/blender/blenkernel/intern/lib_override.c
@@ -57,6 +57,7 @@
#include "BLI_ghash.h"
#include "BLI_linklist.h"
#include "BLI_listbase.h"
+#include "BLI_memarena.h"
#include "BLI_string.h"
#include "BLI_task.h"
#include "BLI_utildefines.h"
@@ -281,6 +282,10 @@ ID *BKE_lib_override_library_create_from_id(Main *bmain,
BLI_assert(ID_IS_LINKED(reference_id));
ID *local_id = lib_override_library_create_from(bmain, reference_id, 0);
+ /* We cannot allow automatic hierarchy resync on this ID, it is highly likely to generate a giant
+ * mess in case there are a lot of hidden, non-instantiated, non-properly organized dependencies.
+ * Ref T94650. */
+ local_id->override_library->flag |= IDOVERRIDE_LIBRARY_FLAG_NO_HIERARCHY;
if (do_tagged_remap) {
Key *reference_key, *local_key = NULL;
@@ -371,7 +376,6 @@ bool BKE_lib_override_library_create_from_tag(Main *bmain,
* existing linked IDs usages. */
if (success) {
for (todo_id_iter = todo_ids.first; todo_id_iter != NULL; todo_id_iter = todo_id_iter->next) {
- ID *other_id;
reference_id = todo_id_iter->data;
ID *local_id = reference_id->newid;
@@ -389,6 +393,7 @@ bool BKE_lib_override_library_create_from_tag(Main *bmain,
* remapped to use newly created overriding IDs, if needed. */
ID *id;
FOREACH_MAIN_ID_BEGIN (bmain, id) {
+ ID *other_id;
/* In case we created new overrides as 'no main', they are not accessible directly in this
* loop, but we can get to them through their reference's `newid` pointer. */
if (do_no_main && id->lib == reference_id->lib && id->newid != NULL) {
@@ -452,8 +457,60 @@ typedef struct LibOverrideGroupTagData {
bool is_override;
/* Whether we are creating new override, or resyncing existing one. */
bool is_resync;
+
+ /* Mapping linked objects to all their instantiating collections (as a linked list).
+ * Avoids calling #BKE_collection_object_find over and over, this function is very expansive. */
+ GHash *linked_object_to_instantiating_collections;
+ MemArena *mem_arena;
} LibOverrideGroupTagData;
+static void lib_override_group_tag_data_object_to_collection_init_collection_process(
+ LibOverrideGroupTagData *data, Collection *collection)
+{
+ LISTBASE_FOREACH (CollectionObject *, collection_object, &collection->gobject) {
+ Object *ob = collection_object->ob;
+ if (!ID_IS_LINKED(ob)) {
+ continue;
+ }
+
+ LinkNodePair **collections_linkedlist_p;
+ if (!BLI_ghash_ensure_p(data->linked_object_to_instantiating_collections,
+ ob,
+ (void ***)&collections_linkedlist_p)) {
+ *collections_linkedlist_p = BLI_memarena_calloc(data->mem_arena,
+ sizeof(**collections_linkedlist_p));
+ }
+ BLI_linklist_append_arena(*collections_linkedlist_p, collection, data->mem_arena);
+ }
+}
+
+/* Initialize complex data, `data` is expected to be already initialized with basic pointers and
+ * other simple data.
+ *
+ * NOTE: Currently creates a mapping from linked object to all of their instantiating collections
+ * (as returned by #BKE_collection_object_find). */
+static void lib_override_group_tag_data_object_to_collection_init(LibOverrideGroupTagData *data)
+{
+ data->mem_arena = BLI_memarena_new(BLI_MEMARENA_STD_BUFSIZE, __func__);
+
+ data->linked_object_to_instantiating_collections = BLI_ghash_new(
+ BLI_ghashutil_ptrhash, BLI_ghashutil_ptrcmp, __func__);
+ if (data->scene != NULL) {
+ lib_override_group_tag_data_object_to_collection_init_collection_process(
+ data, data->scene->master_collection);
+ }
+ LISTBASE_FOREACH (Collection *, collection, &data->bmain->collections) {
+ lib_override_group_tag_data_object_to_collection_init_collection_process(data, collection);
+ }
+}
+
+static void lib_override_group_tag_data_clear(LibOverrideGroupTagData *data)
+{
+ BLI_ghash_free(data->linked_object_to_instantiating_collections, NULL, NULL);
+ BLI_memarena_free(data->mem_arena);
+ memset(data, 0, sizeof(*data));
+}
+
/* Tag all IDs in dependency relationships within an override hierarchy/group.
*
* Requires existing `Main.relations`.
@@ -559,6 +616,42 @@ static void lib_override_linked_group_tag_recursive(LibOverrideGroupTagData *dat
}
}
+static void lib_override_linked_group_tag_clear_boneshapes_objects(LibOverrideGroupTagData *data)
+{
+ Main *bmain = data->bmain;
+
+ /* Remove (untag) bone shape objects, they shall never need to be to directly/explicitly
+ * overridden. */
+ LISTBASE_FOREACH (Object *, ob, &bmain->objects) {
+ if (ob->type == OB_ARMATURE && ob->pose != NULL && (ob->id.tag & data->tag)) {
+ for (bPoseChannel *pchan = ob->pose->chanbase.first; pchan != NULL; pchan = pchan->next) {
+ if (pchan->custom != NULL) {
+ pchan->custom->id.tag &= ~data->tag;
+ }
+ }
+ }
+ }
+
+ /* Remove (untag) collections if they do not own any tagged object (either themselves, or in
+ * their children collections). */
+ LISTBASE_FOREACH (Collection *, collection, &bmain->collections) {
+ if ((collection->id.tag & data->tag) == 0) {
+ continue;
+ }
+ bool keep_tagged = false;
+ const ListBase object_bases = BKE_collection_object_cache_get(collection);
+ LISTBASE_FOREACH (Base *, base, &object_bases) {
+ if ((base->object->id.tag & data->tag) != 0) {
+ keep_tagged = true;
+ break;
+ }
+ }
+ if (!keep_tagged) {
+ collection->id.tag &= ~data->tag;
+ }
+ }
+}
+
/* This will tag at least all 'boundary' linked IDs for a potential override group.
*
* Requires existing `Main.relations`.
@@ -572,7 +665,6 @@ static void lib_override_linked_group_tag_recursive(LibOverrideGroupTagData *dat
static void lib_override_linked_group_tag(LibOverrideGroupTagData *data)
{
Main *bmain = data->bmain;
- Scene *scene = data->scene;
ID *id_root = data->id_root;
const bool is_resync = data->is_resync;
BLI_assert(!data->is_override);
@@ -584,36 +676,36 @@ static void lib_override_linked_group_tag(LibOverrideGroupTagData *data)
id_root->tag |= data->tag;
}
- if (ELEM(GS(id_root->name), ID_OB, ID_GR)) {
- /* Tag all collections and objects. */
- lib_override_linked_group_tag_recursive(data);
+ /* Only objects and groups are currently considered as 'keys' in override hierarchies. */
+ if (!ELEM(GS(id_root->name), ID_OB, ID_GR)) {
+ return;
+ }
- /* Then, we remove (untag) bone shape objects, you shall never want to directly/explicitly
- * override those. */
- LISTBASE_FOREACH (Object *, ob, &bmain->objects) {
- if (ob->type == OB_ARMATURE && ob->pose != NULL && (ob->id.tag & data->tag)) {
- for (bPoseChannel *pchan = ob->pose->chanbase.first; pchan != NULL; pchan = pchan->next) {
- if (pchan->custom != NULL) {
- pchan->custom->id.tag &= ~(data->tag | data->missing_tag);
- }
- }
- }
- }
+ /* Tag all collections and objects recursively. */
+ lib_override_linked_group_tag_recursive(data);
- /* For each object tagged for override, ensure we get at least one local or liboverride
- * collection to host it. Avoids getting a bunch of random object in the scene's master
- * collection when all objects' dependencies are not properly 'packed' into a single root
- * collection. */
- LISTBASE_FOREACH (Object *, ob, &bmain->objects) {
- if (ID_IS_LINKED(ob) && (ob->id.tag & data->tag) != 0) {
- Collection *instantiating_collection = NULL;
- Collection *instantiating_collection_override_candidate = NULL;
- /* Loop over all collections instantiating the object, if we already have a 'locale' one we
- * have nothing to do, otherwise try to find a 'linked' one that we can override too. */
- while ((instantiating_collection = BKE_collection_object_find(
- bmain, scene, instantiating_collection, ob)) != NULL) {
- /* In (recursive) resync case, if a collection of a 'parent' lib instantiates the linked
- * object, it is also fine. */
+ /* Do not override objects used as bone shapes, nor their collections if possible. */
+ lib_override_linked_group_tag_clear_boneshapes_objects(data);
+
+ /* For each object tagged for override, ensure we get at least one local or liboverride
+ * collection to host it. Avoids getting a bunch of random object in the scene's master
+ * collection when all objects' dependencies are not properly 'packed' into a single root
+ * collection. */
+ LISTBASE_FOREACH (Object *, ob, &bmain->objects) {
+ if (ID_IS_LINKED(ob) && (ob->id.tag & data->tag) != 0) {
+ Collection *instantiating_collection = NULL;
+ Collection *instantiating_collection_override_candidate = NULL;
+ /* Loop over all collections instantiating the object, if we already have a 'locale' one we
+ * have nothing to do, otherwise try to find a 'linked' one that we can override too. */
+ LinkNodePair *instantiating_collection_linklist = BLI_ghash_lookup(
+ data->linked_object_to_instantiating_collections, ob);
+ if (instantiating_collection_linklist != NULL) {
+ for (LinkNode *instantiating_collection_linknode = instantiating_collection_linklist->list;
+ instantiating_collection_linknode != NULL;
+ instantiating_collection_linknode = instantiating_collection_linknode->next) {
+ instantiating_collection = instantiating_collection_linknode->link;
+ /* In (recursive) resync case, if a collection of a 'parent' lib instantiates the
+ * linked object, it is also fine. */
if (!ID_IS_LINKED(instantiating_collection) ||
(is_resync && ID_IS_LINKED(id_root) &&
instantiating_collection->id.lib->temp_index < id_root->lib->temp_index)) {
@@ -623,16 +715,17 @@ static void lib_override_linked_group_tag(LibOverrideGroupTagData *data)
(!is_resync || instantiating_collection->id.lib == id_root->lib)) {
instantiating_collection_override_candidate = instantiating_collection;
}
+ instantiating_collection = NULL;
}
+ }
- if (instantiating_collection == NULL &&
- instantiating_collection_override_candidate != NULL) {
- if (instantiating_collection_override_candidate->id.tag & LIB_TAG_MISSING) {
- instantiating_collection_override_candidate->id.tag |= data->missing_tag;
- }
- else {
- instantiating_collection_override_candidate->id.tag |= data->tag;
- }
+ if (instantiating_collection == NULL &&
+ instantiating_collection_override_candidate != NULL) {
+ if (instantiating_collection_override_candidate->id.tag & LIB_TAG_MISSING) {
+ instantiating_collection_override_candidate->id.tag |= data->missing_tag;
+ }
+ else {
+ instantiating_collection_override_candidate->id.tag |= data->tag;
}
}
}
@@ -645,6 +738,12 @@ static void lib_override_overrides_group_tag_recursive(LibOverrideGroupTagData *
ID *id_owner = data->id_root;
BLI_assert(ID_IS_OVERRIDE_LIBRARY(id_owner));
BLI_assert(data->is_override);
+
+ if (ID_IS_OVERRIDE_LIBRARY_REAL(id_owner) &&
+ (id_owner->override_library->flag & IDOVERRIDE_LIBRARY_FLAG_NO_HIERARCHY) != 0) {
+ return;
+ }
+
const uint tag = data->tag;
const uint missing_tag = data->missing_tag;
@@ -724,12 +823,14 @@ static bool lib_override_library_create_do(Main *bmain, Scene *scene, ID *id_roo
.missing_tag = LIB_TAG_MISSING,
.is_override = false,
.is_resync = false};
+ lib_override_group_tag_data_object_to_collection_init(&data);
lib_override_linked_group_tag(&data);
BKE_main_relations_tag_set(bmain, MAINIDRELATIONS_ENTRY_TAGS_PROCESSED, false);
lib_override_hierarchy_dependencies_recursive_tag(&data);
BKE_main_relations_free(bmain);
+ lib_override_group_tag_data_clear(&data);
return BKE_lib_override_library_create_from_tag(bmain, id_root->lib, false);
}
@@ -1020,14 +1121,53 @@ void BKE_lib_override_library_main_proxy_convert(Main *bmain, BlendFileReadRepor
}
}
-bool BKE_lib_override_library_resync(Main *bmain,
- Scene *scene,
- ViewLayer *view_layer,
- ID *id_root,
- Collection *override_resync_residual_storage,
- const bool do_hierarchy_enforce,
- const bool do_post_process,
- BlendFileReadReport *reports)
+static void lib_override_library_remap(Main *bmain,
+ const ID *id_root_reference,
+ GHash *linkedref_to_old_override)
+{
+ ID *id;
+ struct IDRemapper *remapper = BKE_id_remapper_create();
+ FOREACH_MAIN_ID_BEGIN (bmain, id) {
+
+ if (id->tag & LIB_TAG_DOIT && id->newid != NULL && id->lib == id_root_reference->lib) {
+ ID *id_override_new = id->newid;
+ ID *id_override_old = BLI_ghash_lookup(linkedref_to_old_override, id);
+ if (id_override_old == NULL) {
+ continue;
+ }
+
+ BKE_id_remapper_add(remapper, id_override_old, id_override_new);
+ /* Remap no-main override IDs we just created too. */
+ GHashIterator linkedref_to_old_override_iter;
+ GHASH_ITER (linkedref_to_old_override_iter, linkedref_to_old_override) {
+ ID *id_override_old_iter = BLI_ghashIterator_getValue(&linkedref_to_old_override_iter);
+ if ((id_override_old_iter->tag & LIB_TAG_NO_MAIN) == 0) {
+ continue;
+ }
+
+ BKE_libblock_relink_ex(bmain,
+ id_override_old_iter,
+ id_override_old,
+ id_override_new,
+ ID_REMAP_FORCE_USER_REFCOUNT | ID_REMAP_FORCE_NEVER_NULL_USAGE);
+ }
+ }
+ }
+ FOREACH_MAIN_ID_END;
+
+ /* Remap all IDs to use the new override. */
+ BKE_libblock_remap_multiple(bmain, remapper, 0);
+ BKE_id_remapper_free(remapper);
+}
+
+static bool lib_override_library_resync(Main *bmain,
+ Scene *scene,
+ ViewLayer *view_layer,
+ ID *id_root,
+ Collection *override_resync_residual_storage,
+ const bool do_hierarchy_enforce,
+ const bool do_post_process,
+ BlendFileReadReport *reports)
{
BLI_assert(ID_IS_OVERRIDE_LIBRARY_REAL(id_root));
@@ -1050,6 +1190,7 @@ bool BKE_lib_override_library_resync(Main *bmain,
.missing_tag = LIB_TAG_MISSING,
.is_override = true,
.is_resync = true};
+ lib_override_group_tag_data_object_to_collection_init(&data);
lib_override_overrides_group_tag(&data);
BKE_main_relations_tag_set(bmain, MAINIDRELATIONS_ENTRY_TAGS_PROCESSED, false);
@@ -1140,6 +1281,7 @@ bool BKE_lib_override_library_resync(Main *bmain,
lib_override_hierarchy_dependencies_recursive_tag(&data);
BKE_main_relations_free(bmain);
+ lib_override_group_tag_data_clear(&data);
/* Make new override from linked data. */
/* Note that this call also remaps all pointers of tagged IDs from old override IDs to new
@@ -1209,32 +1351,9 @@ bool BKE_lib_override_library_resync(Main *bmain,
}
FOREACH_MAIN_LISTBASE_END;
- /* We need to remap old to new override usages in a separate loop, after all new overrides have
+ /* We remap old to new override usages in a separate loop, after all new overrides have
* been added to Main. */
- FOREACH_MAIN_ID_BEGIN (bmain, id) {
- if (id->tag & LIB_TAG_DOIT && id->newid != NULL && id->lib == id_root_reference->lib) {
- ID *id_override_new = id->newid;
- ID *id_override_old = BLI_ghash_lookup(linkedref_to_old_override, id);
-
- if (id_override_old != NULL) {
- /* Remap all IDs to use the new override. */
- BKE_libblock_remap(bmain, id_override_old, id_override_new, 0);
- /* Remap no-main override IDs we just created too. */
- GHashIterator linkedref_to_old_override_iter;
- GHASH_ITER (linkedref_to_old_override_iter, linkedref_to_old_override) {
- ID *id_override_old_iter = BLI_ghashIterator_getValue(&linkedref_to_old_override_iter);
- if (id_override_old_iter->tag & LIB_TAG_NO_MAIN) {
- BKE_libblock_relink_ex(bmain,
- id_override_old_iter,
- id_override_old,
- id_override_new,
- ID_REMAP_FORCE_USER_REFCOUNT | ID_REMAP_FORCE_NEVER_NULL_USAGE);
- }
- }
- }
- }
- }
- FOREACH_MAIN_ID_END;
+ lib_override_library_remap(bmain, id_root_reference, linkedref_to_old_override);
BKE_main_collection_sync(bmain);
@@ -1396,6 +1515,26 @@ bool BKE_lib_override_library_resync(Main *bmain,
return success;
}
+bool BKE_lib_override_library_resync(Main *bmain,
+ Scene *scene,
+ ViewLayer *view_layer,
+ ID *id_root,
+ Collection *override_resync_residual_storage,
+ const bool do_hierarchy_enforce,
+ BlendFileReadReport *reports)
+{
+ const bool success = lib_override_library_resync(bmain,
+ scene,
+ view_layer,
+ id_root,
+ override_resync_residual_storage,
+ do_hierarchy_enforce,
+ true,
+ reports);
+
+ return success;
+}
+
/* Also tag ancestors overrides for resync.
*
* WARNING: Expects `bmain` to have valid relation data.
@@ -1414,11 +1553,13 @@ static void lib_override_resync_tagging_finalize_recurse(Main *bmain,
CLOG_ERROR(
&LOG,
"While processing indirect level %d, ID %s from lib %s of indirect level %d detected "
- "as needing resync.",
+ "as needing resync, skipping.",
library_indirect_level,
id->name,
id->lib->filepath,
id->lib->temp_index);
+ id->tag &= ~LIB_TAG_LIB_OVERRIDE_NEED_RESYNC;
+ return;
}
MainIDRelationsEntry *entry = BLI_ghash_lookup(bmain->relations->relations_from_pointers, id);
@@ -1532,6 +1673,14 @@ static void lib_override_library_main_resync_on_library_indirect_level(
/* Detect all linked data that would need to be overridden if we had to create an override from
* those used by current existing overrides. */
+ LibOverrideGroupTagData data = {.bmain = bmain,
+ .scene = scene,
+ .id_root = NULL,
+ .tag = LIB_TAG_DOIT,
+ .missing_tag = LIB_TAG_MISSING,
+ .is_override = false,
+ .is_resync = true};
+ lib_override_group_tag_data_object_to_collection_init(&data);
ID *id;
FOREACH_MAIN_ID_BEGIN (bmain, id) {
if (!ID_IS_OVERRIDE_LIBRARY_REAL(id)) {
@@ -1542,19 +1691,19 @@ static void lib_override_library_main_resync_on_library_indirect_level(
continue;
}
- LibOverrideGroupTagData data = {.bmain = bmain,
- .scene = scene,
- .id_root = id->override_library->reference,
- .tag = LIB_TAG_DOIT,
- .missing_tag = LIB_TAG_MISSING,
- .is_override = false,
- .is_resync = true};
+ if (id->override_library->flag & IDOVERRIDE_LIBRARY_FLAG_NO_HIERARCHY) {
+ /* This ID is not part of an override hierarchy. */
+ continue;
+ }
+
+ data.id_root = id->override_library->reference;
lib_override_linked_group_tag(&data);
BKE_main_relations_tag_set(bmain, MAINIDRELATIONS_ENTRY_TAGS_PROCESSED, false);
lib_override_hierarchy_dependencies_recursive_tag(&data);
BKE_main_relations_tag_set(bmain, MAINIDRELATIONS_ENTRY_TAGS_PROCESSED, false);
}
FOREACH_MAIN_ID_END;
+ lib_override_group_tag_data_clear(&data);
/* Now check existing overrides, those needing resync will be the one either already tagged as
* such, or the one using linked data that is now tagged as needing override. */
@@ -1564,6 +1713,12 @@ static void lib_override_library_main_resync_on_library_indirect_level(
continue;
}
+ if (id->override_library->flag & IDOVERRIDE_LIBRARY_FLAG_NO_HIERARCHY) {
+ /* This ID is not part of an override hierarchy. */
+ BLI_assert((id->tag & LIB_TAG_LIB_OVERRIDE_NEED_RESYNC) == 0);
+ continue;
+ }
+
if (id->tag & LIB_TAG_LIB_OVERRIDE_NEED_RESYNC) {
CLOG_INFO(&LOG, 4, "ID %s (%p) was already tagged as needing resync", id->name, id->lib);
lib_override_resync_tagging_finalize_recurse(bmain, id, library_indirect_level);
@@ -1616,6 +1771,10 @@ static void lib_override_library_main_resync_on_library_indirect_level(
continue;
}
+ if (ID_IS_LINKED(id)) {
+ id->lib->tag |= LIBRARY_TAG_RESYNC_REQUIRED;
+ }
+
/* We cannot resync a scene that is currently active. */
if (id == &scene->id) {
id->tag &= ~LIB_TAG_LIB_OVERRIDE_NEED_RESYNC;
@@ -1642,7 +1801,7 @@ static void lib_override_library_main_resync_on_library_indirect_level(
do_continue = true;
CLOG_INFO(&LOG, 2, "Resyncing %s (%p)...", id->name, library);
- const bool success = BKE_lib_override_library_resync(
+ const bool success = lib_override_library_resync(
bmain, scene, view_layer, id, override_resync_residual_storage, false, false, reports);
CLOG_INFO(&LOG, 2, "\tSuccess: %d", success);
if (success) {
@@ -1772,6 +1931,16 @@ void BKE_lib_override_library_main_resync(Main *bmain,
if (BKE_collection_is_empty(override_resync_residual_storage)) {
BKE_collection_delete(bmain, override_resync_residual_storage, true);
}
+
+ LISTBASE_FOREACH (Library *, library, &bmain->libraries) {
+ if (library->tag & LIBRARY_TAG_RESYNC_REQUIRED) {
+ CLOG_INFO(&LOG,
+ 2,
+ "library '%s' contains some linked overrides that required recursive resync, "
+ "consider updating it",
+ library->filepath);
+ }
+ }
}
void BKE_lib_override_library_delete(Main *bmain, ID *id_root)
@@ -1787,9 +1956,11 @@ void BKE_lib_override_library_delete(Main *bmain, ID *id_root)
.missing_tag = LIB_TAG_MISSING,
.is_override = true,
.is_resync = false};
+ lib_override_group_tag_data_object_to_collection_init(&data);
lib_override_overrides_group_tag(&data);
BKE_main_relations_free(bmain);
+ lib_override_group_tag_data_clear(&data);
ID *id;
FOREACH_MAIN_ID_BEGIN (bmain, id) {
diff --git a/source/blender/blenkernel/intern/lib_query.c b/source/blender/blenkernel/intern/lib_query.c
index 4ad0186f9b5..1f20a84098c 100644
--- a/source/blender/blenkernel/intern/lib_query.c
+++ b/source/blender/blenkernel/intern/lib_query.c
@@ -439,7 +439,7 @@ bool BKE_library_id_can_use_idtype(ID *id_owner, const short id_type_used)
case ID_LA:
return (ELEM(id_type_used, ID_TE));
case ID_CA:
- return ELEM(id_type_used, ID_OB);
+ return ELEM(id_type_used, ID_OB, ID_IM);
case ID_KE:
/* Warning! key->from, could be more types in future? */
return ELEM(id_type_used, ID_ME, ID_CU, ID_LT);
diff --git a/source/blender/blenkernel/intern/lib_remap.c b/source/blender/blenkernel/intern/lib_remap.c
index 3cea0de32ee..c3ccedb9608 100644
--- a/source/blender/blenkernel/intern/lib_remap.c
+++ b/source/blender/blenkernel/intern/lib_remap.c
@@ -91,6 +91,97 @@ enum {
ID_REMAP_IS_USER_ONE_SKIPPED = 1 << 1, /* There was some skipped 'user_one' usages of old_id. */
};
+static void foreach_libblock_remap_callback_skip(const ID *id_owner,
+ ID **id_ptr,
+ IDRemap *id_remap_data,
+ const int cb_flag,
+ const bool is_indirect,
+ const bool is_reference,
+ const bool is_never_null,
+ const bool is_obj,
+ const bool is_obj_editmode)
+{
+ if (is_indirect) {
+ id_remap_data->skipped_indirect++;
+ if (is_obj) {
+ Object *ob = (Object *)id_owner;
+ if (ob->data == *id_ptr && ob->proxy != NULL) {
+ /* And another 'Proudly brought to you by Proxy Hell' hack!
+ * This will allow us to avoid clearing 'LIB_EXTERN' flag of obdata of proxies... */
+ id_remap_data->skipped_direct++;
+ }
+ }
+ }
+ else if (is_never_null || is_obj_editmode || is_reference) {
+ id_remap_data->skipped_direct++;
+ }
+ else {
+ BLI_assert(0);
+ }
+ if (cb_flag & IDWALK_CB_USER) {
+ id_remap_data->skipped_refcounted++;
+ }
+ else if (cb_flag & IDWALK_CB_USER_ONE) {
+ /* No need to count number of times this happens, just a flag is enough. */
+ id_remap_data->status |= ID_REMAP_IS_USER_ONE_SKIPPED;
+ }
+}
+
+static void foreach_libblock_remap_callback_apply(ID *id_owner,
+ ID *id_self,
+ ID *old_id,
+ ID *new_id,
+ ID **id_ptr,
+ IDRemap *id_remap_data,
+ const int cb_flag,
+ const bool is_indirect,
+ const bool is_never_null,
+ const bool force_user_refcount,
+ const bool is_obj_proxy)
+{
+ if (!is_never_null) {
+ *id_ptr = new_id;
+ DEG_id_tag_update_ex(id_remap_data->bmain,
+ id_self,
+ ID_RECALC_COPY_ON_WRITE | ID_RECALC_TRANSFORM | ID_RECALC_GEOMETRY);
+ if (id_self != id_owner) {
+ DEG_id_tag_update_ex(id_remap_data->bmain,
+ id_owner,
+ ID_RECALC_COPY_ON_WRITE | ID_RECALC_TRANSFORM | ID_RECALC_GEOMETRY);
+ }
+ }
+ if (cb_flag & IDWALK_CB_USER) {
+ /* NOTE: by default we don't user-count IDs which are not in the main database.
+ * This is because in certain conditions we can have data-blocks in
+ * the main which are referencing data-blocks outside of it.
+ * For example, BKE_mesh_new_from_object() called on an evaluated
+ * object will cause such situation.
+ */
+ if (force_user_refcount || (old_id->tag & LIB_TAG_NO_MAIN) == 0) {
+ id_us_min(old_id);
+ }
+ if (new_id != NULL && (force_user_refcount || (new_id->tag & LIB_TAG_NO_MAIN) == 0)) {
+ /* We do not want to handle LIB_TAG_INDIRECT/LIB_TAG_EXTERN here. */
+ new_id->us++;
+ }
+ }
+ else if (cb_flag & IDWALK_CB_USER_ONE) {
+ id_us_ensure_real(new_id);
+ /* We cannot affect old_id->us directly, LIB_TAG_EXTRAUSER(_SET)
+ * are assumed to be set as needed, that extra user is processed in final handling. */
+ }
+ if (!is_indirect || is_obj_proxy) {
+ id_remap_data->status |= ID_REMAP_IS_LINKED_DIRECT;
+ }
+ /* We need to remap proxy_from pointer of remapped proxy... sigh. */
+ if (is_obj_proxy && new_id != NULL) {
+ Object *ob = (Object *)id_owner;
+ if (ob->proxy == (Object *)new_id) {
+ ob->proxy->proxy_from = ob;
+ }
+ }
+}
+
static int foreach_libblock_remap_callback(LibraryIDLinkCallbackData *cb_data)
{
const int cb_flag = cb_data->cb_flag;
@@ -116,125 +207,82 @@ static int foreach_libblock_remap_callback(LibraryIDLinkCallbackData *cb_data)
old_id = *id_p;
}
- if (*id_p && (*id_p == old_id)) {
- /* Better remap to NULL than not remapping at all,
- * then we can handle it as a regular remap-to-NULL case. */
- if ((cb_flag & IDWALK_CB_NEVER_SELF) && (new_id == id_self)) {
- new_id = NULL;
- }
+ /* Early exit when id pointer isn't set to an expected value. */
+ if (*id_p == NULL || *id_p != old_id) {
+ return IDWALK_RET_NOP;
+ }
+
+ /* Better remap to NULL than not remapping at all,
+ * then we can handle it as a regular remap-to-NULL case. */
+ if ((cb_flag & IDWALK_CB_NEVER_SELF) && (new_id == id_self)) {
+ new_id = NULL;
+ }
- const bool is_reference = (cb_flag & IDWALK_CB_OVERRIDE_LIBRARY_REFERENCE) != 0;
- const bool is_indirect = (cb_flag & IDWALK_CB_INDIRECT_USAGE) != 0;
- const bool skip_indirect = (id_remap_data->flag & ID_REMAP_SKIP_INDIRECT_USAGE) != 0;
- /* NOTE: proxy usage implies LIB_TAG_EXTERN, so on this aspect it is direct,
- * on the other hand since they get reset to lib data on file open/reload it is indirect too.
- * Edit Mode is also a 'skip direct' case. */
- const bool is_obj = (GS(id_owner->name) == ID_OB);
- const bool is_obj_proxy = (is_obj &&
- (((Object *)id_owner)->proxy || ((Object *)id_owner)->proxy_group));
- const bool is_obj_editmode = (is_obj && BKE_object_is_in_editmode((Object *)id_owner) &&
- (id_remap_data->flag & ID_REMAP_FORCE_OBDATA_IN_EDITMODE) == 0);
- const bool is_never_null = ((cb_flag & IDWALK_CB_NEVER_NULL) && (new_id == NULL) &&
- (id_remap_data->flag & ID_REMAP_FORCE_NEVER_NULL_USAGE) == 0);
- const bool skip_reference = (id_remap_data->flag & ID_REMAP_SKIP_OVERRIDE_LIBRARY) != 0;
- const bool skip_never_null = (id_remap_data->flag & ID_REMAP_SKIP_NEVER_NULL_USAGE) != 0;
- const bool force_user_refcount = (id_remap_data->flag & ID_REMAP_FORCE_USER_REFCOUNT) != 0;
+ const bool is_reference = (cb_flag & IDWALK_CB_OVERRIDE_LIBRARY_REFERENCE) != 0;
+ const bool is_indirect = (cb_flag & IDWALK_CB_INDIRECT_USAGE) != 0;
+ const bool skip_indirect = (id_remap_data->flag & ID_REMAP_SKIP_INDIRECT_USAGE) != 0;
+ /* NOTE: proxy usage implies LIB_TAG_EXTERN, so on this aspect it is direct,
+ * on the other hand since they get reset to lib data on file open/reload it is indirect too.
+ * Edit Mode is also a 'skip direct' case. */
+ const bool is_obj = (GS(id_owner->name) == ID_OB);
+ const bool is_obj_proxy = (is_obj &&
+ (((Object *)id_owner)->proxy || ((Object *)id_owner)->proxy_group));
+ const bool is_obj_editmode = (is_obj && BKE_object_is_in_editmode((Object *)id_owner) &&
+ (id_remap_data->flag & ID_REMAP_FORCE_OBDATA_IN_EDITMODE) == 0);
+ const bool is_never_null = ((cb_flag & IDWALK_CB_NEVER_NULL) && (new_id == NULL) &&
+ (id_remap_data->flag & ID_REMAP_FORCE_NEVER_NULL_USAGE) == 0);
+ const bool skip_reference = (id_remap_data->flag & ID_REMAP_SKIP_OVERRIDE_LIBRARY) != 0;
+ const bool skip_never_null = (id_remap_data->flag & ID_REMAP_SKIP_NEVER_NULL_USAGE) != 0;
+ const bool force_user_refcount = (id_remap_data->flag & ID_REMAP_FORCE_USER_REFCOUNT) != 0;
#ifdef DEBUG_PRINT
- printf(
- "In %s (lib %p): Remapping %s (%p) to %s (%p) "
- "(is_indirect: %d, skip_indirect: %d, is_reference: %d, skip_reference: %d)\n",
- id->name,
- id->lib,
- old_id->name,
- old_id,
- new_id ? new_id->name : "<NONE>",
- new_id,
- is_indirect,
- skip_indirect,
- is_reference,
- skip_reference);
+ printf(
+ "In %s (lib %p): Remapping %s (%p) to %s (%p) "
+ "(is_indirect: %d, skip_indirect: %d, is_reference: %d, skip_reference: %d)\n",
+ id->name,
+ id->lib,
+ old_id->name,
+ old_id,
+ new_id ? new_id->name : "<NONE>",
+ new_id,
+ is_indirect,
+ skip_indirect,
+ is_reference,
+ skip_reference);
#endif
- if ((id_remap_data->flag & ID_REMAP_FLAG_NEVER_NULL_USAGE) &&
- (cb_flag & IDWALK_CB_NEVER_NULL)) {
- id_owner->tag |= LIB_TAG_DOIT;
- }
+ if ((id_remap_data->flag & ID_REMAP_FLAG_NEVER_NULL_USAGE) && (cb_flag & IDWALK_CB_NEVER_NULL)) {
+ id_owner->tag |= LIB_TAG_DOIT;
+ }
- /* Special hack in case it's Object->data and we are in edit mode, and new_id is not NULL
- * (otherwise, we follow common NEVER_NULL flags).
- * (skipped_indirect too). */
- if ((is_never_null && skip_never_null) ||
- (is_obj_editmode && (((Object *)id_owner)->data == *id_p) && new_id != NULL) ||
- (skip_indirect && is_indirect) || (is_reference && skip_reference)) {
- if (is_indirect) {
- id_remap_data->skipped_indirect++;
- if (is_obj) {
- Object *ob = (Object *)id_owner;
- if (ob->data == *id_p && ob->proxy != NULL) {
- /* And another 'Proudly brought to you by Proxy Hell' hack!
- * This will allow us to avoid clearing 'LIB_EXTERN' flag of obdata of proxies... */
- id_remap_data->skipped_direct++;
- }
- }
- }
- else if (is_never_null || is_obj_editmode || is_reference) {
- id_remap_data->skipped_direct++;
- }
- else {
- BLI_assert(0);
- }
- if (cb_flag & IDWALK_CB_USER) {
- id_remap_data->skipped_refcounted++;
- }
- else if (cb_flag & IDWALK_CB_USER_ONE) {
- /* No need to count number of times this happens, just a flag is enough. */
- id_remap_data->status |= ID_REMAP_IS_USER_ONE_SKIPPED;
- }
- }
- else {
- if (!is_never_null) {
- *id_p = new_id;
- DEG_id_tag_update_ex(id_remap_data->bmain,
- id_self,
- ID_RECALC_COPY_ON_WRITE | ID_RECALC_TRANSFORM | ID_RECALC_GEOMETRY);
- if (id_self != id_owner) {
- DEG_id_tag_update_ex(id_remap_data->bmain,
- id_owner,
- ID_RECALC_COPY_ON_WRITE | ID_RECALC_TRANSFORM | ID_RECALC_GEOMETRY);
- }
- }
- if (cb_flag & IDWALK_CB_USER) {
- /* NOTE: by default we don't user-count IDs which are not in the main database.
- * This is because in certain conditions we can have data-blocks in
- * the main which are referencing data-blocks outside of it.
- * For example, BKE_mesh_new_from_object() called on an evaluated
- * object will cause such situation.
- */
- if (force_user_refcount || (old_id->tag & LIB_TAG_NO_MAIN) == 0) {
- id_us_min(old_id);
- }
- if (new_id != NULL && (force_user_refcount || (new_id->tag & LIB_TAG_NO_MAIN) == 0)) {
- /* We do not want to handle LIB_TAG_INDIRECT/LIB_TAG_EXTERN here. */
- new_id->us++;
- }
- }
- else if (cb_flag & IDWALK_CB_USER_ONE) {
- id_us_ensure_real(new_id);
- /* We cannot affect old_id->us directly, LIB_TAG_EXTRAUSER(_SET)
- * are assumed to be set as needed, that extra user is processed in final handling. */
- }
- if (!is_indirect || is_obj_proxy) {
- id_remap_data->status |= ID_REMAP_IS_LINKED_DIRECT;
- }
- /* We need to remap proxy_from pointer of remapped proxy... sigh. */
- if (is_obj_proxy && new_id != NULL) {
- Object *ob = (Object *)id_owner;
- if (ob->proxy == (Object *)new_id) {
- ob->proxy->proxy_from = ob;
- }
- }
- }
+ /* Special hack in case it's Object->data and we are in edit mode, and new_id is not NULL
+ * (otherwise, we follow common NEVER_NULL flags).
+ * (skipped_indirect too). */
+ if ((is_never_null && skip_never_null) ||
+ (is_obj_editmode && (((Object *)id_owner)->data == *id_p) && new_id != NULL) ||
+ (skip_indirect && is_indirect) || (is_reference && skip_reference)) {
+ foreach_libblock_remap_callback_skip(id_owner,
+ id_p,
+ id_remap_data,
+ cb_flag,
+ is_indirect,
+ is_reference,
+ is_never_null,
+ is_obj,
+ is_obj_editmode);
+ }
+ else {
+ foreach_libblock_remap_callback_apply(id_owner,
+ id_self,
+ old_id,
+ new_id,
+ id_p,
+ id_remap_data,
+ cb_flag,
+ is_indirect,
+ is_never_null,
+ force_user_refcount,
+ is_obj_proxy);
}
return IDWALK_RET_NOP;
@@ -282,6 +330,11 @@ static void libblock_remap_data_postprocess_object_update(Main *bmain,
* to remove the NULL children from collections not used in any scene. */
BKE_collections_object_remove_nulls(bmain);
}
+ else {
+ /* Remapping may have created duplicates of CollectionObject pointing to the same object within
+ * the same collection. */
+ BKE_collections_object_remove_duplicates(bmain);
+ }
BKE_main_collection_sync_remap(bmain);
@@ -319,6 +372,7 @@ static void libblock_remap_data_postprocess_collection_update(Main *bmain,
else {
/* Temp safe fix, but a "tad" brute force... We should probably be able to use parents from
* old_collection instead? */
+ /* NOTE: Also takes care of duplicated child collections that remapping may have created. */
BKE_main_collections_parent_relations_rebuild(bmain);
}
@@ -346,7 +400,7 @@ static void libblock_remap_data_postprocess_obdata_relink(Main *bmain, Object *o
static void libblock_remap_data_postprocess_nodetree_update(Main *bmain, ID *new_id)
{
/* Update all group nodes using a node group. */
- ntreeUpdateAllUsers(bmain, new_id, 0);
+ ntreeUpdateAllUsers(bmain, new_id);
}
/**
@@ -456,11 +510,18 @@ static void libblock_remap_data(
#endif
}
-void BKE_libblock_remap_locked(Main *bmain, void *old_idv, void *new_idv, const short remap_flags)
+typedef struct LibblockRemapMultipleUserData {
+ Main *bmain;
+ short remap_flags;
+} LibBlockRemapMultipleUserData;
+
+static void libblock_remap_foreach_idpair_cb(ID *old_id, ID *new_id, void *user_data)
{
+ LibBlockRemapMultipleUserData *data = user_data;
+ Main *bmain = data->bmain;
+ const short remap_flags = data->remap_flags;
+
IDRemap id_remap_data;
- ID *old_id = old_idv;
- ID *new_id = new_idv;
int skipped_direct, skipped_refcounted;
BLI_assert(old_id != NULL);
@@ -473,13 +534,6 @@ void BKE_libblock_remap_locked(Main *bmain, void *old_idv, void *new_idv, const
free_notifier_reference_cb(old_id);
}
- /* We assume editors do not hold references to their IDs... This is false in some cases
- * (Image is especially tricky here),
- * editors' code is to handle refcount (id->us) itself then. */
- if (remap_editor_id_reference_cb) {
- remap_editor_id_reference_cb(old_id, new_id);
- }
-
skipped_direct = id_remap_data.skipped_direct;
skipped_refcounted = id_remap_data.skipped_refcounted;
@@ -552,6 +606,41 @@ void BKE_libblock_remap_locked(Main *bmain, void *old_idv, void *new_idv, const
DEG_relations_tag_update(bmain);
}
+void BKE_libblock_remap_multiple_locked(Main *bmain,
+ const struct IDRemapper *mappings,
+ const short remap_flags)
+{
+ if (BKE_id_remapper_is_empty(mappings)) {
+ /* Early exit nothing to do. */
+ return;
+ }
+
+ LibBlockRemapMultipleUserData user_data;
+ user_data.bmain = bmain;
+ user_data.remap_flags = remap_flags;
+ BKE_id_remapper_iter(mappings, libblock_remap_foreach_idpair_cb, &user_data);
+
+ /* We assume editors do not hold references to their IDs... This is false in some cases
+ * (Image is especially tricky here),
+ * editors' code is to handle refcount (id->us) itself then. */
+ if (remap_editor_id_reference_cb) {
+ remap_editor_id_reference_cb(mappings);
+ }
+
+ /* Full rebuild of DEG! */
+ DEG_relations_tag_update(bmain);
+}
+
+void BKE_libblock_remap_locked(Main *bmain, void *old_idv, void *new_idv, const short remap_flags)
+{
+ struct IDRemapper *remapper = BKE_id_remapper_create();
+ ID *old_id = old_idv;
+ ID *new_id = new_idv;
+ BKE_id_remapper_add(remapper, old_id, new_id);
+ BKE_libblock_remap_multiple_locked(bmain, remapper, remap_flags);
+ BKE_id_remapper_free(remapper);
+}
+
void BKE_libblock_remap(Main *bmain, void *old_idv, void *new_idv, const short remap_flags)
{
BKE_main_lock(bmain);
@@ -561,6 +650,17 @@ void BKE_libblock_remap(Main *bmain, void *old_idv, void *new_idv, const short r
BKE_main_unlock(bmain);
}
+void BKE_libblock_remap_multiple(Main *bmain,
+ const struct IDRemapper *mappings,
+ const short remap_flags)
+{
+ BKE_main_lock(bmain);
+
+ BKE_libblock_remap_multiple_locked(bmain, mappings, remap_flags);
+
+ BKE_main_unlock(bmain);
+}
+
void BKE_libblock_unlink(Main *bmain,
void *idv,
const bool do_flag_never_null,
diff --git a/source/blender/blenkernel/intern/lib_remap_test.cc b/source/blender/blenkernel/intern/lib_remap_test.cc
new file mode 100644
index 00000000000..266ada3663d
--- /dev/null
+++ b/source/blender/blenkernel/intern/lib_remap_test.cc
@@ -0,0 +1,369 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2022 by Blender Foundation.
+ */
+#include "testing/testing.h"
+
+#include "BLI_utildefines.h"
+
+#include "CLG_log.h"
+
+#include "DNA_mesh_types.h"
+#include "DNA_node_types.h"
+#include "DNA_object_types.h"
+#include "DNA_scene_types.h"
+
+#include "RNA_define.h"
+
+#include "BKE_appdir.h"
+#include "BKE_context.h"
+#include "BKE_global.h"
+#include "BKE_idtype.h"
+#include "BKE_lib_id.h"
+#include "BKE_lib_remap.h"
+#include "BKE_main.h"
+#include "BKE_mesh.h"
+#include "BKE_node.h"
+#include "BKE_object.h"
+#include "BKE_scene.h"
+
+#include "IMB_imbuf.h"
+
+#include "ED_node.h"
+
+#include "MEM_guardedalloc.h"
+
+namespace blender::bke::tests {
+
+class TestData {
+ public:
+ Main *bmain = nullptr;
+ struct bContext *C = nullptr;
+
+ virtual void setup()
+ {
+ if (bmain == nullptr) {
+ bmain = BKE_main_new();
+ G.main = bmain;
+ }
+
+ if (C == nullptr) {
+ C = CTX_create();
+ CTX_data_main_set(C, bmain);
+ }
+ }
+
+ virtual void teardown()
+ {
+ if (bmain != nullptr) {
+ BKE_main_free(bmain);
+ bmain = nullptr;
+ G.main = nullptr;
+ }
+
+ if (C != nullptr) {
+ CTX_free(C);
+ C = nullptr;
+ }
+ }
+};
+
+class SceneTestData : public TestData {
+ public:
+ Scene *scene = nullptr;
+ void setup() override
+ {
+ TestData::setup();
+ scene = BKE_scene_add(bmain, "IDRemapScene");
+ CTX_data_scene_set(C, scene);
+ }
+};
+
+class CompositorTestData : public SceneTestData {
+ public:
+ bNodeTree *compositor_nodetree = nullptr;
+ void setup() override
+ {
+ SceneTestData::setup();
+ ED_node_composit_default(C, scene);
+ compositor_nodetree = scene->nodetree;
+ }
+};
+
+class MeshTestData : public TestData {
+ public:
+ Mesh *mesh = nullptr;
+
+ void setup() override
+ {
+ TestData::setup();
+ mesh = BKE_mesh_add(bmain, nullptr);
+ }
+};
+
+class TwoMeshesTestData : public MeshTestData {
+ public:
+ Mesh *other_mesh = nullptr;
+
+ void setup() override
+ {
+ MeshTestData::setup();
+ other_mesh = BKE_mesh_add(bmain, nullptr);
+ }
+};
+
+class MeshObjectTestData : public MeshTestData {
+ public:
+ Object *object;
+ void setup() override
+ {
+ MeshTestData::setup();
+
+ object = BKE_object_add_only_object(bmain, OB_MESH, nullptr);
+ object->data = mesh;
+ }
+};
+
+template<typename TestData> class Context {
+ public:
+ TestData test_data;
+
+ Context()
+ {
+ CLG_init();
+ BKE_idtype_init();
+ RNA_init();
+ BKE_node_system_init();
+ BKE_appdir_init();
+ IMB_init();
+
+ test_data.setup();
+ }
+
+ ~Context()
+ {
+ test_data.teardown();
+
+ BKE_node_system_exit();
+ RNA_exit();
+ IMB_exit();
+ BKE_appdir_exit();
+ CLG_exit();
+ }
+};
+
+/* -------------------------------------------------------------------- */
+/** \name Embedded IDs
+ * \{ */
+
+TEST(lib_remap, embedded_ids_can_not_be_remapped)
+{
+ Context<CompositorTestData> context;
+ bNodeTree *other_tree = static_cast<bNodeTree *>(BKE_id_new_nomain(ID_NT, nullptr));
+
+ EXPECT_NE(context.test_data.scene, nullptr);
+ EXPECT_NE(context.test_data.compositor_nodetree, nullptr);
+ EXPECT_EQ(context.test_data.compositor_nodetree, context.test_data.scene->nodetree);
+
+ BKE_libblock_remap(
+ context.test_data.bmain, context.test_data.compositor_nodetree, other_tree, 0);
+
+ EXPECT_EQ(context.test_data.compositor_nodetree, context.test_data.scene->nodetree);
+ EXPECT_NE(context.test_data.scene->nodetree, other_tree);
+
+ BKE_id_free(nullptr, other_tree);
+}
+
+TEST(lib_remap, embedded_ids_can_not_be_deleted)
+{
+ Context<CompositorTestData> context;
+
+ EXPECT_NE(context.test_data.scene, nullptr);
+ EXPECT_NE(context.test_data.compositor_nodetree, nullptr);
+ EXPECT_EQ(context.test_data.compositor_nodetree, context.test_data.scene->nodetree);
+
+ BKE_libblock_remap(context.test_data.bmain,
+ context.test_data.compositor_nodetree,
+ nullptr,
+ ID_REMAP_SKIP_NEVER_NULL_USAGE);
+
+ EXPECT_EQ(context.test_data.compositor_nodetree, context.test_data.scene->nodetree);
+ EXPECT_NE(context.test_data.scene->nodetree, nullptr);
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+/** \name Remap to self
+ * \{ */
+
+TEST(lib_remap, delete_when_remap_to_self_not_allowed)
+{
+ Context<TwoMeshesTestData> context;
+
+ EXPECT_NE(context.test_data.mesh, nullptr);
+ EXPECT_NE(context.test_data.other_mesh, nullptr);
+ context.test_data.mesh->texcomesh = context.test_data.other_mesh;
+
+ BKE_libblock_remap(
+ context.test_data.bmain, context.test_data.other_mesh, context.test_data.mesh, 0);
+
+ EXPECT_EQ(context.test_data.mesh->texcomesh, nullptr);
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+/** \name User Reference Counting
+ * \{ */
+
+TEST(lib_remap, users_are_decreased_when_not_skipping_never_null)
+{
+ Context<MeshObjectTestData> context;
+
+ EXPECT_NE(context.test_data.object, nullptr);
+ EXPECT_EQ(context.test_data.object->data, context.test_data.mesh);
+ EXPECT_EQ(context.test_data.object->id.tag & LIB_TAG_DOIT, 0);
+ EXPECT_EQ(context.test_data.mesh->id.us, 1);
+
+ /* This is an invalid situation, test case tests this in between value until we have a better
+ * solution. */
+ BKE_libblock_remap(context.test_data.bmain, context.test_data.mesh, nullptr, 0);
+ EXPECT_EQ(context.test_data.mesh->id.us, 0);
+ EXPECT_EQ(context.test_data.object->data, context.test_data.mesh);
+ EXPECT_NE(context.test_data.object->data, nullptr);
+ EXPECT_EQ(context.test_data.object->id.tag & LIB_TAG_DOIT, 0);
+}
+
+TEST(lib_remap, users_are_same_when_skipping_never_null)
+{
+ Context<MeshObjectTestData> context;
+
+ EXPECT_NE(context.test_data.object, nullptr);
+ EXPECT_EQ(context.test_data.object->data, context.test_data.mesh);
+ EXPECT_EQ(context.test_data.object->id.tag & LIB_TAG_DOIT, 0);
+ EXPECT_EQ(context.test_data.mesh->id.us, 1);
+
+ BKE_libblock_remap(
+ context.test_data.bmain, context.test_data.mesh, nullptr, ID_REMAP_SKIP_NEVER_NULL_USAGE);
+ EXPECT_EQ(context.test_data.mesh->id.us, 1);
+ EXPECT_EQ(context.test_data.object->data, context.test_data.mesh);
+ EXPECT_NE(context.test_data.object->data, nullptr);
+ EXPECT_EQ(context.test_data.object->id.tag & LIB_TAG_DOIT, 0);
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+/** \name Never Null
+ * \{ */
+
+TEST(lib_remap, do_not_delete_when_cannot_unset)
+{
+ Context<MeshObjectTestData> context;
+
+ EXPECT_NE(context.test_data.object, nullptr);
+ EXPECT_EQ(context.test_data.object->data, context.test_data.mesh);
+
+ BKE_libblock_remap(
+ context.test_data.bmain, context.test_data.mesh, nullptr, ID_REMAP_SKIP_NEVER_NULL_USAGE);
+ EXPECT_EQ(context.test_data.object->data, context.test_data.mesh);
+ EXPECT_NE(context.test_data.object->data, nullptr);
+}
+
+TEST(lib_remap, force_never_null_usage)
+{
+ Context<MeshObjectTestData> context;
+
+ EXPECT_NE(context.test_data.object, nullptr);
+ EXPECT_EQ(context.test_data.object->data, context.test_data.mesh);
+
+ BKE_libblock_remap(
+ context.test_data.bmain, context.test_data.mesh, nullptr, ID_REMAP_FORCE_NEVER_NULL_USAGE);
+ EXPECT_EQ(context.test_data.object->data, nullptr);
+}
+
+TEST(lib_remap, never_null_usage_flag_not_requested_on_delete)
+{
+ Context<MeshObjectTestData> context;
+
+ EXPECT_NE(context.test_data.object, nullptr);
+ EXPECT_EQ(context.test_data.object->data, context.test_data.mesh);
+ EXPECT_EQ(context.test_data.object->id.tag & LIB_TAG_DOIT, 0);
+
+ /* Never null usage isn't requested so the flag should not be set. */
+ BKE_libblock_remap(
+ context.test_data.bmain, context.test_data.mesh, nullptr, ID_REMAP_SKIP_NEVER_NULL_USAGE);
+ EXPECT_EQ(context.test_data.object->data, context.test_data.mesh);
+ EXPECT_NE(context.test_data.object->data, nullptr);
+ EXPECT_EQ(context.test_data.object->id.tag & LIB_TAG_DOIT, 0);
+}
+
+TEST(lib_remap, never_null_usage_flag_requested_on_delete)
+{
+ Context<MeshObjectTestData> context;
+
+ EXPECT_NE(context.test_data.object, nullptr);
+ EXPECT_EQ(context.test_data.object->data, context.test_data.mesh);
+ EXPECT_EQ(context.test_data.object->id.tag & LIB_TAG_DOIT, 0);
+
+ /* Never null usage is requested so the flag should be set. */
+ BKE_libblock_remap(context.test_data.bmain,
+ context.test_data.mesh,
+ nullptr,
+ ID_REMAP_SKIP_NEVER_NULL_USAGE | ID_REMAP_FLAG_NEVER_NULL_USAGE);
+ EXPECT_EQ(context.test_data.object->data, context.test_data.mesh);
+ EXPECT_NE(context.test_data.object->data, nullptr);
+ EXPECT_EQ(context.test_data.object->id.tag & LIB_TAG_DOIT, LIB_TAG_DOIT);
+}
+
+TEST(lib_remap, never_null_usage_flag_not_requested_on_remap)
+{
+ Context<MeshObjectTestData> context;
+ Mesh *other_mesh = BKE_mesh_add(context.test_data.bmain, nullptr);
+
+ EXPECT_NE(context.test_data.object, nullptr);
+ EXPECT_EQ(context.test_data.object->data, context.test_data.mesh);
+ EXPECT_EQ(context.test_data.object->id.tag & LIB_TAG_DOIT, 0);
+
+ /* Never null usage isn't requested so the flag should not be set. */
+ BKE_libblock_remap(
+ context.test_data.bmain, context.test_data.mesh, other_mesh, ID_REMAP_SKIP_NEVER_NULL_USAGE);
+ EXPECT_EQ(context.test_data.object->data, other_mesh);
+ EXPECT_EQ(context.test_data.object->id.tag & LIB_TAG_DOIT, 0);
+}
+
+TEST(lib_remap, never_null_usage_flag_requested_on_remap)
+{
+ Context<MeshObjectTestData> context;
+ Mesh *other_mesh = BKE_mesh_add(context.test_data.bmain, nullptr);
+
+ EXPECT_NE(context.test_data.object, nullptr);
+ EXPECT_EQ(context.test_data.object->data, context.test_data.mesh);
+ EXPECT_EQ(context.test_data.object->id.tag & LIB_TAG_DOIT, 0);
+
+ /* Never null usage is requested so the flag should be set. */
+ BKE_libblock_remap(context.test_data.bmain,
+ context.test_data.mesh,
+ other_mesh,
+ ID_REMAP_SKIP_NEVER_NULL_USAGE | ID_REMAP_FLAG_NEVER_NULL_USAGE);
+ EXPECT_EQ(context.test_data.object->data, other_mesh);
+ EXPECT_EQ(context.test_data.object->id.tag & LIB_TAG_DOIT, LIB_TAG_DOIT);
+}
+
+/** \} */
+
+} // namespace blender::bke::tests
diff --git a/source/blender/blenkernel/intern/linestyle.c b/source/blender/blenkernel/intern/linestyle.c
index ac0dbcb715d..95f41ab4b39 100644
--- a/source/blender/blenkernel/intern/linestyle.c
+++ b/source/blender/blenkernel/intern/linestyle.c
@@ -50,6 +50,7 @@
#include "BKE_linestyle.h"
#include "BKE_main.h"
#include "BKE_node.h"
+#include "BKE_node_tree_update.h"
#include "BKE_texture.h"
#include "BLO_read_write.h"
@@ -2085,5 +2086,5 @@ void BKE_linestyle_default_shader(const bContext *C, FreestyleLineStyle *linesty
tosock = BLI_findlink(&output_linestyle->inputs, 0); /* Color */
nodeAddLink(ntree, input_texure, fromsock, output_linestyle, tosock);
- ntreeUpdateTree(CTX_data_main(C), ntree);
+ BKE_ntree_update_main_tree(CTX_data_main(C), ntree, NULL);
}
diff --git a/source/blender/blenkernel/intern/mask.c b/source/blender/blenkernel/intern/mask.c
index 6f498c5c9e7..12bbab57cf2 100644
--- a/source/blender/blenkernel/intern/mask.c
+++ b/source/blender/blenkernel/intern/mask.c
@@ -38,6 +38,7 @@
#include "BLT_translation.h"
+#include "DNA_defaults.h"
#include "DNA_mask_types.h"
#include "BKE_animsys.h"
@@ -1308,7 +1309,7 @@ void BKE_mask_point_parent_matrix_get(MaskSplinePoint *point,
MovieTrackingObject *ob = BKE_tracking_object_get_named(tracking, parent->parent);
if (ob) {
- MovieClipUser user = {0};
+ MovieClipUser user = *DNA_struct_default_get(MovieClipUser);
float clip_framenr = BKE_movieclip_remap_scene_to_clip_frame(clip, ctime);
BKE_movieclip_user_set_frame(&user, ctime);
diff --git a/source/blender/blenkernel/intern/material.c b/source/blender/blenkernel/intern/material.c
index d6035887790..15469f910b4 100644
--- a/source/blender/blenkernel/intern/material.c
+++ b/source/blender/blenkernel/intern/material.c
@@ -720,8 +720,9 @@ static ID *get_evaluated_object_data_with_materials(Object *ob)
/* Meshes in edit mode need special handling. */
if (ob->type == OB_MESH && ob->mode == OB_MODE_EDIT) {
Mesh *mesh = ob->data;
- if (mesh->edit_mesh && mesh->edit_mesh->mesh_eval_final) {
- data = &mesh->edit_mesh->mesh_eval_final->id;
+ Mesh *editmesh_eval_final = BKE_object_get_editmesh_eval_final(ob);
+ if (mesh->edit_mesh && editmesh_eval_final) {
+ data = &editmesh_eval_final->id;
}
}
return data;
diff --git a/source/blender/blenkernel/intern/mesh.cc b/source/blender/blenkernel/intern/mesh.cc
index 05aa9111fa3..73fe279552d 100644
--- a/source/blender/blenkernel/intern/mesh.cc
+++ b/source/blender/blenkernel/intern/mesh.cc
@@ -38,11 +38,14 @@
#include "BLI_endian_switch.h"
#include "BLI_ghash.h"
#include "BLI_hash.h"
+#include "BLI_index_range.hh"
#include "BLI_linklist.h"
#include "BLI_listbase.h"
#include "BLI_math.h"
+#include "BLI_math_vec_types.hh"
#include "BLI_memarena.h"
#include "BLI_string.h"
+#include "BLI_task.hh"
#include "BLI_utildefines.h"
#include "BLT_translation.h"
@@ -91,6 +94,10 @@ static void mesh_init_data(ID *id)
BKE_mesh_runtime_init_data(mesh);
+ /* A newly created mesh does not have normals, so tag them dirty. This will be cleared
+ * by #BKE_mesh_vertex_normals_clear_dirty or #BKE_mesh_poly_normals_ensure. */
+ BKE_mesh_normals_tag_dirty(mesh);
+
mesh->face_sets_color_seed = BLI_hash_int(PIL_check_seconds_timer_i() & UINT_MAX);
}
@@ -143,16 +150,40 @@ static void mesh_copy_data(Main *bmain, ID *id_dst, const ID *id_src, const int
BKE_mesh_update_customdata_pointers(mesh_dst, do_tessface);
+ mesh_dst->cd_flag = mesh_src->cd_flag;
+
mesh_dst->edit_mesh = nullptr;
mesh_dst->mselect = (MSelect *)MEM_dupallocN(mesh_dst->mselect);
+ /* Set normal layers dirty, since they aren't included in CD_MASK_MESH and are therefore not
+ * copied to the destination mesh. Alternatively normal layers could be copied if they aren't
+ * dirty, avoiding recomputation in some cases. However, a copied mesh is often changed anyway,
+ * so that idea is not clearly better. With proper reference counting, all custom data layers
+ * could be copied as the cost would be much lower. */
+ BKE_mesh_normals_tag_dirty(mesh_dst);
+
/* TODO: Do we want to add flag to prevent this? */
if (mesh_src->key && (flag & LIB_ID_COPY_SHAPEKEY)) {
BKE_id_copy_ex(bmain, &mesh_src->key->id, (ID **)&mesh_dst->key, flag);
/* XXX This is not nice, we need to make BKE_id_copy_ex fully re-entrant... */
mesh_dst->key->from = &mesh_dst->id;
}
+
+ BKE_mesh_assert_normals_dirty_or_calculated(mesh_dst);
+}
+
+void BKE_mesh_free_editmesh(struct Mesh *mesh)
+{
+ if (mesh->edit_mesh == nullptr) {
+ return;
+ }
+
+ if (mesh->edit_mesh->is_shallow_copy == false) {
+ BKE_editmesh_free_data(mesh->edit_mesh);
+ }
+ MEM_freeN(mesh->edit_mesh);
+ mesh->edit_mesh = nullptr;
}
static void mesh_free_data(ID *id)
@@ -161,13 +192,7 @@ static void mesh_free_data(ID *id)
BLI_freelistN(&mesh->vertex_group_names);
- if (mesh->edit_mesh) {
- if (mesh->edit_mesh->is_shallow_copy == false) {
- BKE_editmesh_free_data(mesh->edit_mesh);
- }
- MEM_freeN(mesh->edit_mesh);
- mesh->edit_mesh = nullptr;
- }
+ BKE_mesh_free_editmesh(mesh);
BKE_mesh_runtime_free_data(mesh);
mesh_clear_geometry(mesh);
@@ -332,6 +357,10 @@ static void mesh_blend_read_data(BlendDataReader *reader, ID *id)
BLI_endian_switch_uint32_array(tf->col, 4);
}
}
+
+ /* We don't expect to load normals from files, since they are derived data. */
+ BKE_mesh_normals_tag_dirty(mesh);
+ BKE_mesh_assert_normals_dirty_or_calculated(mesh);
}
static void mesh_blend_read_lib(BlendLibReader *reader, ID *id)
@@ -453,14 +482,14 @@ static int customdata_compare(
for (int i = 0; i < c1->totlayer; i++) {
l1 = &c1->layers[i];
- if (CD_TYPE_AS_MASK(l1->type) & cd_mask_all_attr && l1->anonymous_id != nullptr) {
+ if ((CD_TYPE_AS_MASK(l1->type) & cd_mask_all_attr) && l1->anonymous_id == nullptr) {
layer_count1++;
}
}
for (int i = 0; i < c2->totlayer; i++) {
l2 = &c2->layers[i];
- if (CD_TYPE_AS_MASK(l1->type) & cd_mask_all_attr && l2->anonymous_id != nullptr) {
+ if ((CD_TYPE_AS_MASK(l2->type) & cd_mask_all_attr) && l2->anonymous_id == nullptr) {
layer_count2++;
}
}
@@ -1033,6 +1062,8 @@ void BKE_mesh_copy_parameters_for_eval(Mesh *me_dst, const Mesh *me_src)
BKE_mesh_copy_parameters(me_dst, me_src);
+ BKE_mesh_assert_normals_dirty_or_calculated(me_dst);
+
/* Copy vertex group names. */
BLI_assert(BLI_listbase_is_empty(&me_dst->vertex_group_names));
BKE_defgroup_copy_list(&me_dst->vertex_group_names, &me_src->vertex_group_names);
@@ -1080,6 +1111,18 @@ Mesh *BKE_mesh_new_nomain_from_template_ex(const Mesh *me_src,
mesh_tessface_clear_intern(me_dst, false);
}
+ me_dst->runtime.cd_dirty_poly = me_src->runtime.cd_dirty_poly;
+ me_dst->runtime.cd_dirty_vert = me_src->runtime.cd_dirty_vert;
+
+ /* Ensure that when no normal layers exist, they are marked dirty, because
+ * normals might not have been included in the mask of copied layers. */
+ if (!CustomData_has_layer(&me_dst->vdata, CD_NORMAL)) {
+ me_dst->runtime.cd_dirty_vert |= CD_MASK_NORMAL;
+ }
+ if (!CustomData_has_layer(&me_dst->pdata, CD_NORMAL)) {
+ me_dst->runtime.cd_dirty_poly |= CD_MASK_NORMAL;
+ }
+
/* The destination mesh should at least have valid primary CD layers,
* even in cases where the source mesh does not. */
mesh_ensure_cdlayers_primary(me_dst, do_tessface);
@@ -1577,13 +1620,35 @@ void BKE_mesh_looptri_get_real_edges(const Mesh *mesh, const MLoopTri *looptri,
bool BKE_mesh_minmax(const Mesh *me, float r_min[3], float r_max[3])
{
- int i = me->totvert;
- MVert *mvert;
- for (mvert = me->mvert; i--; mvert++) {
- minmax_v3v3_v3(r_min, r_max, mvert->co);
+ using namespace blender;
+ if (me->totvert == 0) {
+ return false;
}
- return (me->totvert != 0);
+ struct Result {
+ float3 min;
+ float3 max;
+ };
+
+ const Result minmax = threading::parallel_reduce(
+ IndexRange(me->totvert),
+ 1024,
+ Result{float3(FLT_MAX), float3(-FLT_MAX)},
+ [&](IndexRange range, const Result &init) {
+ Result result = init;
+ for (const int i : range) {
+ math::min_max(float3(me->mvert[i].co), result.min, result.max);
+ }
+ return result;
+ },
+ [](const Result &a, const Result &b) {
+ return Result{math::min(a.min, b.min), math::max(a.max, b.max)};
+ });
+
+ copy_v3_v3(r_min, math::min(minmax.min, float3(r_min)));
+ copy_v3_v3(r_max, math::max(minmax.max, float3(r_max)));
+
+ return true;
}
void BKE_mesh_transform(Mesh *me, const float mat[4][4], bool do_keys)
@@ -1857,24 +1922,10 @@ void BKE_mesh_vert_coords_apply_with_mat4(Mesh *mesh,
BKE_mesh_normals_tag_dirty(mesh);
}
-void BKE_mesh_vert_normals_apply(Mesh *mesh, const short (*vert_normals)[3])
-{
- /* This will just return the pointer if it wasn't a referenced layer. */
- MVert *mv = (MVert *)CustomData_duplicate_referenced_layer(
- &mesh->vdata, CD_MVERT, mesh->totvert);
- mesh->mvert = mv;
- for (int i = 0; i < mesh->totvert; i++, mv++) {
- copy_v3_v3_short(mv->no, vert_normals[i]);
- }
- mesh->runtime.cd_dirty_vert &= ~CD_MASK_NORMAL;
-}
-
void BKE_mesh_calc_normals_split_ex(Mesh *mesh, MLoopNorSpaceArray *r_lnors_spacearr)
{
float(*r_loopnors)[3];
- float(*polynors)[3];
short(*clnors)[2] = nullptr;
- bool free_polynors = false;
/* Note that we enforce computing clnors when the clnor space array is requested by caller here.
* However, we obviously only use the auto-smooth angle threshold
@@ -1896,26 +1947,8 @@ void BKE_mesh_calc_normals_split_ex(Mesh *mesh, MLoopNorSpaceArray *r_lnors_spac
/* may be nullptr */
clnors = (short(*)[2])CustomData_get_layer(&mesh->ldata, CD_CUSTOMLOOPNORMAL);
- if (CustomData_has_layer(&mesh->pdata, CD_NORMAL)) {
- /* This assume that layer is always up to date, not sure this is the case
- * (esp. in Edit mode?)... */
- polynors = (float(*)[3])CustomData_get_layer(&mesh->pdata, CD_NORMAL);
- free_polynors = false;
- }
- else {
- polynors = (float(*)[3])MEM_malloc_arrayN(mesh->totpoly, sizeof(float[3]), __func__);
- BKE_mesh_calc_normals_poly_and_vertex(mesh->mvert,
- mesh->totvert,
- mesh->mloop,
- mesh->totloop,
- mesh->mpoly,
- mesh->totpoly,
- polynors,
- nullptr);
- free_polynors = true;
- }
-
BKE_mesh_normals_loop_split(mesh->mvert,
+ BKE_mesh_vertex_normals_ensure(mesh),
mesh->totvert,
mesh->medge,
mesh->totedge,
@@ -1923,7 +1956,7 @@ void BKE_mesh_calc_normals_split_ex(Mesh *mesh, MLoopNorSpaceArray *r_lnors_spac
r_loopnors,
mesh->totloop,
mesh->mpoly,
- (const float(*)[3])polynors,
+ BKE_mesh_poly_normals_ensure(mesh),
mesh->totpoly,
use_split_normals,
split_angle,
@@ -1931,12 +1964,8 @@ void BKE_mesh_calc_normals_split_ex(Mesh *mesh, MLoopNorSpaceArray *r_lnors_spac
clnors,
nullptr);
- if (free_polynors) {
- MEM_freeN(polynors);
- }
+ BKE_mesh_assert_normals_dirty_or_calculated(mesh);
- mesh->runtime.cd_dirty_vert &= ~CD_MASK_NORMAL;
- mesh->runtime.cd_dirty_poly &= ~CD_MASK_NORMAL;
mesh->runtime.cd_dirty_loop &= ~CD_MASK_NORMAL;
}
@@ -1964,7 +1993,7 @@ struct SplitFaceNewEdge {
/* Detect needed new vertices, and update accordingly loops' vertex indices.
* WARNING! Leaves mesh in invalid state. */
-static int split_faces_prepare_new_verts(const Mesh *mesh,
+static int split_faces_prepare_new_verts(Mesh *mesh,
MLoopNorSpaceArray *lnors_spacearr,
SplitFaceNewVert **new_verts,
MemArena *memarena)
@@ -1976,8 +2005,9 @@ static int split_faces_prepare_new_verts(const Mesh *mesh,
const int loops_len = mesh->totloop;
int verts_len = mesh->totvert;
- MVert *mvert = mesh->mvert;
MLoop *mloop = mesh->mloop;
+ BKE_mesh_vertex_normals_ensure(mesh);
+ float(*vert_normals)[3] = BKE_mesh_vertex_normals_for_write(mesh);
BLI_bitmap *verts_used = BLI_BITMAP_NEW(verts_len, __func__);
BLI_bitmap *done_loops = BLI_BITMAP_NEW(loops_len, __func__);
@@ -2021,7 +2051,7 @@ static int split_faces_prepare_new_verts(const Mesh *mesh,
* vnor should always be defined to 'automatic normal' value computed from its polys,
* not some custom normal.
* Fortunately, that's the loop normal space's 'lnor' reference vector. ;) */
- normal_float_to_short_v3(mvert[vert_idx].no, (*lnor_space)->vec_lnor);
+ copy_v3_v3(vert_normals[vert_idx], (*lnor_space)->vec_lnor);
}
else {
/* Add new vert to list. */
@@ -2112,6 +2142,7 @@ static void split_faces_split_new_verts(Mesh *mesh,
{
const int verts_len = mesh->totvert - num_new_verts;
MVert *mvert = mesh->mvert;
+ float(*vert_normals)[3] = BKE_mesh_vertex_normals_for_write(mesh);
/* Remember new_verts is a single linklist, so its items are in reversed order... */
MVert *new_mv = &mvert[mesh->totvert - 1];
@@ -2120,9 +2151,10 @@ static void split_faces_split_new_verts(Mesh *mesh,
BLI_assert(new_verts->new_index != new_verts->orig_index);
CustomData_copy_data(&mesh->vdata, &mesh->vdata, new_verts->orig_index, i, 1);
if (new_verts->vnor) {
- normal_float_to_short_v3(new_mv->no, new_verts->vnor);
+ copy_v3_v3(vert_normals[i], new_verts->vnor);
}
}
+ BKE_mesh_vertex_normals_clear_dirty(mesh);
}
/* Perform actual split of edges. */
@@ -2210,6 +2242,7 @@ void BKE_mesh_split_faces(Mesh *mesh, bool free_loop_normals)
/* Also frees new_verts/edges temp data, since we used its memarena to allocate them. */
BKE_lnor_spacearr_free(&lnors_spacearr);
+ BKE_mesh_assert_normals_dirty_or_calculated(mesh);
#ifdef VALIDATE_MESH
BKE_mesh_validate(mesh, true, true);
#endif
diff --git a/source/blender/blenkernel/intern/mesh_boolean_convert.cc b/source/blender/blenkernel/intern/mesh_boolean_convert.cc
index 771d79a0445..a4a5fe2be2e 100644
--- a/source/blender/blenkernel/intern/mesh_boolean_convert.cc
+++ b/source/blender/blenkernel/intern/mesh_boolean_convert.cc
@@ -32,9 +32,9 @@
#include "BLI_alloca.h"
#include "BLI_array.hh"
-#include "BLI_float2.hh"
#include "BLI_float4x4.hh"
#include "BLI_math.h"
+#include "BLI_math_vec_types.hh"
#include "BLI_mesh_boolean.hh"
#include "BLI_mesh_intersect.hh"
#include "BLI_span.hh"
diff --git a/source/blender/blenkernel/intern/mesh_convert.cc b/source/blender/blenkernel/intern/mesh_convert.cc
index e8054884f26..7d5f156040d 100644
--- a/source/blender/blenkernel/intern/mesh_convert.cc
+++ b/source/blender/blenkernel/intern/mesh_convert.cc
@@ -32,6 +32,7 @@
#include "DNA_scene_types.h"
#include "BLI_edgehash.h"
+#include "BLI_index_range.hh"
#include "BLI_listbase.h"
#include "BLI_math.h"
#include "BLI_string.h"
@@ -65,6 +66,8 @@
#include "DEG_depsgraph.h"
#include "DEG_depsgraph_query.h"
+using blender::IndexRange;
+
/* Define for cases when you want extra validation of mesh
* after certain modifications.
*/
@@ -85,7 +88,6 @@ void BKE_mesh_from_metaball(ListBase *lb, Mesh *me)
MVert *mvert;
MLoop *mloop, *allloop;
MPoly *mpoly;
- const float *nors, *verts;
int a, *index;
dl = (DispList *)lb->first;
@@ -104,15 +106,8 @@ void BKE_mesh_from_metaball(ListBase *lb, Mesh *me)
me->totvert = dl->nr;
me->totpoly = dl->parts;
- a = dl->nr;
- nors = dl->nors;
- verts = dl->verts;
- while (a--) {
- copy_v3_v3(mvert->co, verts);
- normal_float_to_short_v3(mvert->no, nors);
- mvert++;
- nors += 3;
- verts += 3;
+ for (const int i : IndexRange(dl->nr)) {
+ copy_v3_v3(me->mvert[i].co, &dl->verts[3 * i]);
}
a = dl->parts;
@@ -139,7 +134,7 @@ void BKE_mesh_from_metaball(ListBase *lb, Mesh *me)
BKE_mesh_update_customdata_pointers(me, true);
- BKE_mesh_calc_normals(me);
+ BKE_mesh_normals_tag_dirty(me);
BKE_mesh_calc_edges(me, true, false);
}
@@ -589,14 +584,14 @@ struct VertLink {
static void prependPolyLineVert(ListBase *lb, uint index)
{
- VertLink *vl = (VertLink *)MEM_callocN(sizeof(VertLink), "VertLink");
+ VertLink *vl = MEM_cnew<VertLink>("VertLink");
vl->index = index;
BLI_addhead(lb, vl);
}
static void appendPolyLineVert(ListBase *lb, uint index)
{
- VertLink *vl = (VertLink *)MEM_callocN(sizeof(VertLink), "VertLink");
+ VertLink *vl = MEM_cnew<VertLink>("VertLink");
vl->index = index;
BLI_addtail(lb, vl);
}
@@ -632,7 +627,7 @@ void BKE_mesh_to_curve_nurblist(const Mesh *me, ListBase *nurblist, const int ed
med = medge;
for (i = 0; i < medge_len; i++, med++) {
if (edge_users[i] == edge_users_test) {
- EdgeLink *edl = (EdgeLink *)MEM_callocN(sizeof(EdgeLink), "EdgeLink");
+ EdgeLink *edl = MEM_cnew<EdgeLink>("EdgeLink");
edl->edge = med;
BLI_addtail(&edges, edl);
@@ -719,7 +714,7 @@ void BKE_mesh_to_curve_nurblist(const Mesh *me, ListBase *nurblist, const int ed
VertLink *vl;
/* create new 'nurb' within the curve */
- nu = (Nurb *)MEM_callocN(sizeof(Nurb), "MeshNurb");
+ nu = MEM_cnew<Nurb>("MeshNurb");
nu->pntsu = totpoly;
nu->pntsv = 1;
@@ -901,6 +896,20 @@ static Object *object_for_curve_to_mesh_create(const Object *object)
return temp_object;
}
+static void object_for_curve_to_mesh_free(Object *temp_object)
+{
+ /* Clear edit mode pointers that were explicitly copied to the temporary curve. */
+ ID *final_object_data = static_cast<ID *>(temp_object->data);
+ if (GS(final_object_data->name) == ID_CU) {
+ Curve &curve = *reinterpret_cast<Curve *>(final_object_data);
+ curve.editfont = nullptr;
+ curve.editnurb = nullptr;
+ }
+
+ BKE_id_free(nullptr, temp_object->data);
+ BKE_id_free(nullptr, temp_object);
+}
+
/**
* Populate `object->runtime.curve_cache` which is then used to create the mesh.
*/
@@ -917,7 +926,7 @@ static void curve_to_mesh_eval_ensure(Object &object)
* will have no modifiers. */
Object bevel_object = {{nullptr}};
if (curve.bevobj != nullptr) {
- bevel_object = *curve.bevobj;
+ memcpy(&bevel_object, curve.bevobj, sizeof(bevel_object));
BLI_listbase_clear(&bevel_object.modifiers);
BKE_object_runtime_reset(&bevel_object);
curve.bevobj = &bevel_object;
@@ -926,7 +935,7 @@ static void curve_to_mesh_eval_ensure(Object &object)
/* Same thing for taper. */
Object taper_object = {{nullptr}};
if (curve.taperobj != nullptr) {
- taper_object = *curve.taperobj;
+ memcpy(&taper_object, curve.taperobj, sizeof(taper_object));
BLI_listbase_clear(&taper_object.modifiers);
BKE_object_runtime_reset(&taper_object);
curve.taperobj = &taper_object;
@@ -1003,8 +1012,7 @@ static Mesh *mesh_new_from_curve_type_object(const Object *object)
Mesh *mesh = mesh_new_from_evaluated_curve_type_object(temp_object);
- BKE_id_free(nullptr, temp_object->data);
- BKE_id_free(nullptr, temp_object);
+ object_for_curve_to_mesh_free(temp_object);
return mesh;
}
@@ -1065,7 +1073,8 @@ static Mesh *mesh_new_from_mesh_object_with_layers(Depsgraph *depsgraph,
return nullptr;
}
- Object object_for_eval = *object;
+ Object object_for_eval;
+ memcpy(&object_for_eval, object, sizeof(object_for_eval));
if (object_for_eval.runtime.data_orig != nullptr) {
object_for_eval.data = object_for_eval.runtime.data_orig;
}
@@ -1093,8 +1102,11 @@ static Mesh *mesh_new_from_mesh_object(Depsgraph *depsgraph,
Mesh *mesh_input = (Mesh *)object->data;
/* If we are in edit mode, use evaluated mesh from edit structure, matching to what
* viewport is using for visualization. */
- if (mesh_input->edit_mesh != nullptr && mesh_input->edit_mesh->mesh_eval_final) {
- mesh_input = mesh_input->edit_mesh->mesh_eval_final;
+ if (mesh_input->edit_mesh != nullptr) {
+ Mesh *editmesh_eval_final = BKE_object_get_editmesh_eval_final(object);
+ if (editmesh_eval_final != nullptr) {
+ mesh_input = editmesh_eval_final;
+ }
}
return mesh_new_from_mesh(object, mesh_input);
}
@@ -1440,7 +1452,8 @@ void BKE_mesh_nomain_to_mesh(Mesh *mesh_src,
/* mesh_src might depend on mesh_dst, so we need to do everything with a local copy */
/* TODO(Sybren): the above claim came from 2.7x derived-mesh code (DM_to_mesh);
* check whether it is still true with Mesh */
- Mesh tmp = *mesh_dst;
+ Mesh tmp;
+ memcpy(&tmp, mesh_dst, sizeof(tmp));
int totvert, totedge /*, totface */ /* UNUSED */, totloop, totpoly;
bool did_shapekeys = false;
eCDAllocType alloctype = CD_DUPLICATE;
@@ -1461,8 +1474,6 @@ void BKE_mesh_nomain_to_mesh(Mesh *mesh_src,
CustomData_reset(&tmp.ldata);
CustomData_reset(&tmp.pdata);
- BKE_mesh_ensure_normals(mesh_src);
-
totvert = tmp.totvert = mesh_src->totvert;
totedge = tmp.totedge = mesh_src->totedge;
totloop = tmp.totloop = mesh_src->totloop;
@@ -1476,6 +1487,18 @@ void BKE_mesh_nomain_to_mesh(Mesh *mesh_src,
tmp.cd_flag = mesh_src->cd_flag;
tmp.runtime.deformed_only = mesh_src->runtime.deformed_only;
+ tmp.runtime.cd_dirty_poly = mesh_src->runtime.cd_dirty_poly;
+ tmp.runtime.cd_dirty_vert = mesh_src->runtime.cd_dirty_vert;
+
+ /* Ensure that when no normal layers exist, they are marked dirty, because
+ * normals might not have been included in the mask of copied layers. */
+ if (!CustomData_has_layer(&tmp.vdata, CD_NORMAL)) {
+ tmp.runtime.cd_dirty_vert |= CD_MASK_NORMAL;
+ }
+ if (!CustomData_has_layer(&tmp.pdata, CD_NORMAL)) {
+ tmp.runtime.cd_dirty_poly |= CD_MASK_NORMAL;
+ }
+
if (CustomData_has_layer(&mesh_src->vdata, CD_SHAPEKEY)) {
KeyBlock *kb;
int uid;
@@ -1599,6 +1622,8 @@ void BKE_mesh_nomain_to_mesh(Mesh *mesh_src,
}
BKE_id_free(nullptr, mesh_src);
}
+
+ BKE_mesh_assert_normals_dirty_or_calculated(mesh_dst);
}
void BKE_mesh_nomain_to_meshkey(Mesh *mesh_src, Mesh *mesh_dst, KeyBlock *kb)
diff --git a/source/blender/blenkernel/intern/mesh_debug.cc b/source/blender/blenkernel/intern/mesh_debug.cc
new file mode 100644
index 00000000000..017f96c2ece
--- /dev/null
+++ b/source/blender/blenkernel/intern/mesh_debug.cc
@@ -0,0 +1,115 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/** \file
+ * \ingroup bke
+ *
+ * Evaluated mesh info printing function, to help track down differences output.
+ *
+ * Output from these functions can be evaluated as Python literals.
+ * See `bmesh_debug.c` for the equivalent #BMesh functionality.
+ */
+
+#ifndef NDEBUG
+
+# include <stdio.h>
+
+# include "MEM_guardedalloc.h"
+
+# include "DNA_mesh_types.h"
+# include "DNA_meshdata_types.h"
+# include "DNA_object_types.h"
+
+# include "BLI_utildefines.h"
+
+# include "BKE_customdata.h"
+
+# include "BKE_mesh.h"
+
+# include "BLI_dynstr.h"
+
+static void mesh_debug_info_from_cd_flag(const Mesh *me, DynStr *dynstr)
+{
+ BLI_dynstr_append(dynstr, "'cd_flag': {");
+ if (me->cd_flag & ME_CDFLAG_VERT_BWEIGHT) {
+ BLI_dynstr_append(dynstr, "'VERT_BWEIGHT', ");
+ }
+ if (me->cd_flag & ME_CDFLAG_EDGE_BWEIGHT) {
+ BLI_dynstr_append(dynstr, "'EDGE_BWEIGHT', ");
+ }
+ if (me->cd_flag & ME_CDFLAG_EDGE_CREASE) {
+ BLI_dynstr_append(dynstr, "'EDGE_CREASE', ");
+ }
+ BLI_dynstr_append(dynstr, "},\n");
+}
+
+char *BKE_mesh_debug_info(const Mesh *me)
+{
+ DynStr *dynstr = BLI_dynstr_new();
+ char *ret;
+
+ const char *indent4 = " ";
+ const char *indent8 = " ";
+
+ BLI_dynstr_append(dynstr, "{\n");
+ BLI_dynstr_appendf(dynstr, " 'ptr': '%p',\n", (void *)me);
+ BLI_dynstr_appendf(dynstr, " 'totvert': %d,\n", me->totvert);
+ BLI_dynstr_appendf(dynstr, " 'totedge': %d,\n", me->totedge);
+ BLI_dynstr_appendf(dynstr, " 'totface': %d,\n", me->totface);
+ BLI_dynstr_appendf(dynstr, " 'totpoly': %d,\n", me->totpoly);
+
+ BLI_dynstr_appendf(dynstr, " 'runtime.deformed_only': %d,\n", me->runtime.deformed_only);
+ BLI_dynstr_appendf(dynstr, " 'runtime.is_original': %d,\n", me->runtime.is_original);
+
+ BLI_dynstr_append(dynstr, " 'vert_layers': (\n");
+ CustomData_debug_info_from_layers(&me->vdata, indent8, dynstr);
+ BLI_dynstr_append(dynstr, " ),\n");
+
+ BLI_dynstr_append(dynstr, " 'edge_layers': (\n");
+ CustomData_debug_info_from_layers(&me->edata, indent8, dynstr);
+ BLI_dynstr_append(dynstr, " ),\n");
+
+ BLI_dynstr_append(dynstr, " 'loop_layers': (\n");
+ CustomData_debug_info_from_layers(&me->ldata, indent8, dynstr);
+ BLI_dynstr_append(dynstr, " ),\n");
+
+ BLI_dynstr_append(dynstr, " 'poly_layers': (\n");
+ CustomData_debug_info_from_layers(&me->pdata, indent8, dynstr);
+ BLI_dynstr_append(dynstr, " ),\n");
+
+ BLI_dynstr_append(dynstr, " 'tessface_layers': (\n");
+ CustomData_debug_info_from_layers(&me->fdata, indent8, dynstr);
+ BLI_dynstr_append(dynstr, " ),\n");
+
+ BLI_dynstr_append(dynstr, indent4);
+ mesh_debug_info_from_cd_flag(me, dynstr);
+
+ BLI_dynstr_append(dynstr, "}\n");
+
+ ret = BLI_dynstr_get_cstring(dynstr);
+ BLI_dynstr_free(dynstr);
+ return ret;
+}
+
+void BKE_mesh_debug_print(const Mesh *me)
+{
+ char *str = BKE_mesh_debug_info(me);
+ puts(str);
+ fflush(stdout);
+ MEM_freeN(str);
+}
+
+#endif /* NDEBUG */
diff --git a/source/blender/blenkernel/intern/mesh_iterators.c b/source/blender/blenkernel/intern/mesh_iterators.c
index 3b6afc1f47a..ff2ac8ecee9 100644
--- a/source/blender/blenkernel/intern/mesh_iterators.c
+++ b/source/blender/blenkernel/intern/mesh_iterators.c
@@ -34,14 +34,11 @@
#include "MEM_guardedalloc.h"
-void BKE_mesh_foreach_mapped_vert(Mesh *mesh,
- void (*func)(void *userData,
- int index,
- const float co[3],
- const float no_f[3],
- const short no_s[3]),
- void *userData,
- MeshForeachFlag flag)
+void BKE_mesh_foreach_mapped_vert(
+ Mesh *mesh,
+ void (*func)(void *userData, int index, const float co[3], const float no[3]),
+ void *userData,
+ MeshForeachFlag flag)
{
if (mesh->edit_mesh != NULL) {
BMEditMesh *em = mesh->edit_mesh;
@@ -49,7 +46,7 @@ void BKE_mesh_foreach_mapped_vert(Mesh *mesh,
BMIter iter;
BMVert *eve;
int i;
- if (mesh->runtime.edit_data->vertexCos != NULL) {
+ if (mesh->runtime.edit_data != NULL && mesh->runtime.edit_data->vertexCos != NULL) {
const float(*vertexCos)[3] = mesh->runtime.edit_data->vertexCos;
const float(*vertexNos)[3];
if (flag & MESH_FOREACH_USE_NORMAL) {
@@ -61,34 +58,37 @@ void BKE_mesh_foreach_mapped_vert(Mesh *mesh,
}
BM_ITER_MESH_INDEX (eve, &iter, bm, BM_VERTS_OF_MESH, i) {
const float *no = (flag & MESH_FOREACH_USE_NORMAL) ? vertexNos[i] : NULL;
- func(userData, i, vertexCos[i], no, NULL);
+ func(userData, i, vertexCos[i], no);
}
}
else {
BM_ITER_MESH_INDEX (eve, &iter, bm, BM_VERTS_OF_MESH, i) {
const float *no = (flag & MESH_FOREACH_USE_NORMAL) ? eve->no : NULL;
- func(userData, i, eve->co, no, NULL);
+ func(userData, i, eve->co, no);
}
}
}
else {
const MVert *mv = mesh->mvert;
const int *index = CustomData_get_layer(&mesh->vdata, CD_ORIGINDEX);
+ const float(*vert_normals)[3] = (flag & MESH_FOREACH_USE_NORMAL) ?
+ BKE_mesh_vertex_normals_ensure(mesh) :
+ NULL;
if (index) {
for (int i = 0; i < mesh->totvert; i++, mv++) {
- const short *no = (flag & MESH_FOREACH_USE_NORMAL) ? mv->no : NULL;
+ const float *no = (flag & MESH_FOREACH_USE_NORMAL) ? vert_normals[i] : NULL;
const int orig = *index++;
if (orig == ORIGINDEX_NONE) {
continue;
}
- func(userData, orig, mv->co, NULL, no);
+ func(userData, orig, mv->co, no);
}
}
else {
for (int i = 0; i < mesh->totvert; i++, mv++) {
- const short *no = (flag & MESH_FOREACH_USE_NORMAL) ? mv->no : NULL;
- func(userData, i, mv->co, NULL, no);
+ const float *no = (flag & MESH_FOREACH_USE_NORMAL) ? vert_normals[i] : NULL;
+ func(userData, i, mv->co, no);
}
}
}
@@ -106,7 +106,7 @@ void BKE_mesh_foreach_mapped_edge(
BMIter iter;
BMEdge *eed;
int i;
- if (mesh->runtime.edit_data->vertexCos != NULL) {
+ if (mesh->runtime.edit_data != NULL && mesh->runtime.edit_data->vertexCos != NULL) {
const float(*vertexCos)[3] = mesh->runtime.edit_data->vertexCos;
BM_mesh_elem_index_ensure(bm, BM_VERT);
@@ -164,7 +164,8 @@ void BKE_mesh_foreach_mapped_loop(Mesh *mesh,
BMIter iter;
BMFace *efa;
- const float(*vertexCos)[3] = mesh->runtime.edit_data->vertexCos;
+ const float(*vertexCos)[3] = mesh->runtime.edit_data ? mesh->runtime.edit_data->vertexCos :
+ NULL;
/* XXX: investigate using EditMesh data. */
const float(*lnors)[3] = (flag & MESH_FOREACH_USE_NORMAL) ?
@@ -231,7 +232,7 @@ void BKE_mesh_foreach_mapped_face_center(
void *userData,
MeshForeachFlag flag)
{
- if (mesh->edit_mesh != NULL) {
+ if (mesh->edit_mesh != NULL && mesh->runtime.edit_data != NULL) {
BMEditMesh *em = mesh->edit_mesh;
BMesh *bm = em->bm;
const float(*polyCos)[3];
@@ -310,8 +311,9 @@ void BKE_mesh_foreach_mapped_subdiv_face_center(
const MPoly *mp = mesh->mpoly;
const MLoop *ml;
const MVert *mv;
- float _no_buf[3];
- float *no = (flag & MESH_FOREACH_USE_NORMAL) ? _no_buf : NULL;
+ const float(*vert_normals)[3] = (flag & MESH_FOREACH_USE_NORMAL) ?
+ BKE_mesh_vertex_normals_ensure(mesh) :
+ NULL;
const int *index = CustomData_get_layer(&mesh->pdata, CD_ORIGINDEX);
if (index) {
@@ -324,10 +326,11 @@ void BKE_mesh_foreach_mapped_subdiv_face_center(
for (int j = 0; j < mp->totloop; j++, ml++) {
mv = &mesh->mvert[ml->v];
if (mv->flag & ME_VERT_FACEDOT) {
- if (flag & MESH_FOREACH_USE_NORMAL) {
- normal_short_to_float_v3(no, mv->no);
- }
- func(userData, orig, mv->co, no);
+
+ func(userData,
+ orig,
+ mv->co,
+ (flag & MESH_FOREACH_USE_NORMAL) ? vert_normals[ml->v] : NULL);
}
}
}
@@ -338,10 +341,7 @@ void BKE_mesh_foreach_mapped_subdiv_face_center(
for (int j = 0; j < mp->totloop; j++, ml++) {
mv = &mesh->mvert[ml->v];
if (mv->flag & ME_VERT_FACEDOT) {
- if (flag & MESH_FOREACH_USE_NORMAL) {
- normal_short_to_float_v3(no, mv->no);
- }
- func(userData, i, mv->co, no);
+ func(userData, i, mv->co, (flag & MESH_FOREACH_USE_NORMAL) ? vert_normals[ml->v] : NULL);
}
}
}
@@ -358,8 +358,7 @@ typedef struct MappedVCosData {
static void get_vertexcos__mapFunc(void *user_data,
int index,
const float co[3],
- const float UNUSED(no_f[3]),
- const short UNUSED(no_s[3]))
+ const float UNUSED(no[3]))
{
MappedVCosData *mapped_vcos_data = (MappedVCosData *)user_data;
diff --git a/source/blender/blenkernel/intern/mesh_merge.c b/source/blender/blenkernel/intern/mesh_merge.c
index 0115a70a52a..134a1344f83 100644
--- a/source/blender/blenkernel/intern/mesh_merge.c
+++ b/source/blender/blenkernel/intern/mesh_merge.c
@@ -615,10 +615,18 @@ Mesh *BKE_mesh_merge_verts(Mesh *mesh,
}
/* Copy over data. #CustomData_add_layer can do this, need to look it up. */
- memcpy(result->mvert, mvert, sizeof(MVert) * STACK_SIZE(mvert));
- memcpy(result->medge, medge, sizeof(MEdge) * STACK_SIZE(medge));
- memcpy(result->mloop, mloop, sizeof(MLoop) * STACK_SIZE(mloop));
- memcpy(result->mpoly, mpoly, sizeof(MPoly) * STACK_SIZE(mpoly));
+ if (STACK_SIZE(mvert)) {
+ memcpy(result->mvert, mvert, sizeof(MVert) * STACK_SIZE(mvert));
+ }
+ if (STACK_SIZE(medge)) {
+ memcpy(result->medge, medge, sizeof(MEdge) * STACK_SIZE(medge));
+ }
+ if (STACK_SIZE(mloop)) {
+ memcpy(result->mloop, mloop, sizeof(MLoop) * STACK_SIZE(mloop));
+ }
+ if (STACK_SIZE(mpoly)) {
+ memcpy(result->mpoly, mpoly, sizeof(MPoly) * STACK_SIZE(mpoly));
+ }
MEM_freeN(mvert);
MEM_freeN(medge);
diff --git a/source/blender/blenkernel/intern/mesh_mirror.c b/source/blender/blenkernel/intern/mesh_mirror.c
index 2d4308945fc..abc0b518d92 100644
--- a/source/blender/blenkernel/intern/mesh_mirror.c
+++ b/source/blender/blenkernel/intern/mesh_mirror.c
@@ -236,7 +236,7 @@ Mesh *BKE_mesh_mirror_apply_mirror_on_axis_for_modifier(MirrorModifierData *mmd,
}
/* Copy custom-data to new geometry,
- * copy from its self because this data may have been created in the checks above. */
+ * copy from itself because this data may have been created in the checks above. */
CustomData_copy_data(&result->vdata, &result->vdata, 0, maxVerts, maxVerts);
CustomData_copy_data(&result->edata, &result->edata, 0, maxEdges, maxEdges);
/* loops are copied later */
@@ -410,7 +410,6 @@ Mesh *BKE_mesh_mirror_apply_mirror_on_axis_for_modifier(MirrorModifierData *mmd,
CustomData *ldata = &result->ldata;
short(*clnors)[2] = CustomData_get_layer(ldata, CD_CUSTOMLOOPNORMAL);
MLoopNorSpaceArray lnors_spacearr = {NULL};
- float(*poly_normals)[3] = MEM_mallocN(sizeof(*poly_normals) * totpoly, __func__);
/* The transform matrix of a normal must be
* the transpose of inverse of transform matrix of the geometry... */
@@ -420,16 +419,8 @@ Mesh *BKE_mesh_mirror_apply_mirror_on_axis_for_modifier(MirrorModifierData *mmd,
/* calculate custom normals into loop_normals, then mirror first half into second half */
- BKE_mesh_calc_normals_poly_and_vertex(result->mvert,
- result->totvert,
- result->mloop,
- totloop,
- result->mpoly,
- totpoly,
- poly_normals,
- NULL);
-
BKE_mesh_normals_loop_split(result->mvert,
+ BKE_mesh_vertex_normals_ensure(mesh),
result->totvert,
result->medge,
result->totedge,
@@ -437,7 +428,7 @@ Mesh *BKE_mesh_mirror_apply_mirror_on_axis_for_modifier(MirrorModifierData *mmd,
loop_normals,
totloop,
result->mpoly,
- poly_normals,
+ BKE_mesh_poly_normals_ensure(mesh),
totpoly,
true,
mesh->smoothresh,
@@ -463,7 +454,6 @@ Mesh *BKE_mesh_mirror_apply_mirror_on_axis_for_modifier(MirrorModifierData *mmd,
}
}
- MEM_freeN(poly_normals);
MEM_freeN(loop_normals);
BKE_lnor_spacearr_free(&lnors_spacearr);
}
diff --git a/source/blender/blenkernel/intern/mesh_normals.cc b/source/blender/blenkernel/intern/mesh_normals.cc
index da5b4ccc764..08a17060549 100644
--- a/source/blender/blenkernel/intern/mesh_normals.cc
+++ b/source/blender/blenkernel/intern/mesh_normals.cc
@@ -38,7 +38,9 @@
#include "BLI_linklist.h"
#include "BLI_linklist_stack.h"
#include "BLI_math.h"
+#include "BLI_math_vec_types.hh"
#include "BLI_memarena.h"
+#include "BLI_span.hh"
#include "BLI_stack.h"
#include "BLI_task.h"
#include "BLI_utildefines.h"
@@ -50,6 +52,8 @@
#include "atomic_ops.h"
+using blender::Span;
+
// #define DEBUG_TIME
#ifdef DEBUG_TIME
@@ -109,6 +113,52 @@ void BKE_mesh_normals_tag_dirty(Mesh *mesh)
mesh->runtime.cd_dirty_poly |= CD_MASK_NORMAL;
}
+float (*BKE_mesh_vertex_normals_for_write(Mesh *mesh))[3]
+{
+ CustomData_duplicate_referenced_layer(&mesh->vdata, CD_NORMAL, mesh->totvert);
+ return (float(*)[3])CustomData_add_layer(
+ &mesh->vdata, CD_NORMAL, CD_CALLOC, nullptr, mesh->totvert);
+}
+
+float (*BKE_mesh_poly_normals_for_write(Mesh *mesh))[3]
+{
+ CustomData_duplicate_referenced_layer(&mesh->pdata, CD_NORMAL, mesh->totpoly);
+ return (float(*)[3])CustomData_add_layer(
+ &mesh->pdata, CD_NORMAL, CD_CALLOC, nullptr, mesh->totpoly);
+}
+
+void BKE_mesh_vertex_normals_clear_dirty(Mesh *mesh)
+{
+ mesh->runtime.cd_dirty_vert &= ~CD_MASK_NORMAL;
+ BKE_mesh_assert_normals_dirty_or_calculated(mesh);
+}
+
+void BKE_mesh_poly_normals_clear_dirty(Mesh *mesh)
+{
+ mesh->runtime.cd_dirty_poly &= ~CD_MASK_NORMAL;
+ BKE_mesh_assert_normals_dirty_or_calculated(mesh);
+}
+
+bool BKE_mesh_vertex_normals_are_dirty(const Mesh *mesh)
+{
+ return mesh->runtime.cd_dirty_vert & CD_MASK_NORMAL;
+}
+
+bool BKE_mesh_poly_normals_are_dirty(const Mesh *mesh)
+{
+ return mesh->runtime.cd_dirty_poly & CD_MASK_NORMAL;
+}
+
+void BKE_mesh_assert_normals_dirty_or_calculated(const Mesh *mesh)
+{
+ if (!(mesh->runtime.cd_dirty_vert & CD_MASK_NORMAL)) {
+ BLI_assert(CustomData_has_layer(&mesh->vdata, CD_NORMAL) || mesh->totvert == 0);
+ }
+ if (!(mesh->runtime.cd_dirty_poly & CD_MASK_NORMAL)) {
+ BLI_assert(CustomData_has_layer(&mesh->pdata, CD_NORMAL) || mesh->totpoly == 0);
+ }
+}
+
/** \} */
/* -------------------------------------------------------------------- */
@@ -161,8 +211,6 @@ void BKE_mesh_calc_normals_poly(const MVert *mvert,
/* -------------------------------------------------------------------- */
/** \name Mesh Normal Calculation (Polygons & Vertices)
*
- * Implement #BKE_mesh_calc_normals_poly_and_vertex,
- *
* Take care making optimizations to this function as improvements to low-poly
* meshes can slow down high-poly meshes. For details on performance, see D11993.
* \{ */
@@ -253,18 +301,16 @@ static void mesh_calc_normals_poly_and_vertex_finalize_fn(
/* Following Mesh convention; we use vertex coordinate itself for normal in this case. */
normalize_v3_v3(no, mv->co);
}
-
- normal_float_to_short_v3(mv->no, no);
}
-void BKE_mesh_calc_normals_poly_and_vertex(MVert *mvert,
- const int mvert_len,
- const MLoop *mloop,
- const int UNUSED(mloop_len),
- const MPoly *mpoly,
- const int mpoly_len,
- float (*r_poly_normals)[3],
- float (*r_vert_normals)[3])
+static void mesh_calc_normals_poly_and_vertex(MVert *mvert,
+ const int mvert_len,
+ const MLoop *mloop,
+ const int UNUSED(mloop_len),
+ const MPoly *mpoly,
+ const int mpoly_len,
+ float (*r_poly_normals)[3],
+ float (*r_vert_normals)[3])
{
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
@@ -308,19 +354,90 @@ void BKE_mesh_calc_normals_poly_and_vertex(MVert *mvert,
/** \name Mesh Normal Calculation
* \{ */
-void BKE_mesh_ensure_normals(Mesh *mesh)
+const float (*BKE_mesh_vertex_normals_ensure(const Mesh *mesh))[3]
{
- if (mesh->runtime.cd_dirty_vert & CD_MASK_NORMAL) {
- BKE_mesh_calc_normals(mesh);
+ if (!(BKE_mesh_vertex_normals_are_dirty(mesh) || BKE_mesh_poly_normals_are_dirty(mesh))) {
+ BLI_assert(CustomData_has_layer(&mesh->vdata, CD_NORMAL) || mesh->totvert == 0);
+ return (const float(*)[3])CustomData_get_layer(&mesh->vdata, CD_NORMAL);
+ }
+
+ if (mesh->totvert == 0) {
+ return nullptr;
+ }
+
+ ThreadMutex *normals_mutex = (ThreadMutex *)mesh->runtime.normals_mutex;
+ BLI_mutex_lock(normals_mutex);
+ if (!(BKE_mesh_vertex_normals_are_dirty(mesh) || BKE_mesh_poly_normals_are_dirty(mesh))) {
+ BLI_assert(CustomData_has_layer(&mesh->vdata, CD_NORMAL));
+ BLI_mutex_unlock(normals_mutex);
+ return (const float(*)[3])CustomData_get_layer(&mesh->vdata, CD_NORMAL);
}
- BLI_assert((mesh->runtime.cd_dirty_vert & CD_MASK_NORMAL) == 0);
+
+ Mesh &mesh_mutable = *const_cast<Mesh *>(mesh);
+
+ float(*vert_normals)[3] = BKE_mesh_vertex_normals_for_write(&mesh_mutable);
+ float(*poly_normals)[3] = BKE_mesh_poly_normals_for_write(&mesh_mutable);
+
+ mesh_calc_normals_poly_and_vertex(mesh_mutable.mvert,
+ mesh_mutable.totvert,
+ mesh_mutable.mloop,
+ mesh_mutable.totloop,
+ mesh_mutable.mpoly,
+ mesh_mutable.totpoly,
+ poly_normals,
+ vert_normals);
+
+ BKE_mesh_vertex_normals_clear_dirty(&mesh_mutable);
+ BKE_mesh_poly_normals_clear_dirty(&mesh_mutable);
+
+ BLI_mutex_unlock(normals_mutex);
+ return vert_normals;
+}
+
+const float (*BKE_mesh_poly_normals_ensure(const Mesh *mesh))[3]
+{
+ if (!BKE_mesh_poly_normals_are_dirty(mesh)) {
+ BLI_assert(CustomData_has_layer(&mesh->pdata, CD_NORMAL) || mesh->totpoly == 0);
+ return (const float(*)[3])CustomData_get_layer(&mesh->pdata, CD_NORMAL);
+ }
+
+ if (mesh->totpoly == 0) {
+ return nullptr;
+ }
+
+ ThreadMutex *normals_mutex = (ThreadMutex *)mesh->runtime.normals_mutex;
+ BLI_mutex_lock(normals_mutex);
+ if (!BKE_mesh_poly_normals_are_dirty(mesh)) {
+ BLI_assert(CustomData_has_layer(&mesh->pdata, CD_NORMAL));
+ BLI_mutex_unlock(normals_mutex);
+ return (const float(*)[3])CustomData_get_layer(&mesh->pdata, CD_NORMAL);
+ }
+
+ Mesh &mesh_mutable = *const_cast<Mesh *>(mesh);
+
+ float(*poly_normals)[3] = BKE_mesh_poly_normals_for_write(&mesh_mutable);
+
+ BKE_mesh_calc_normals_poly(mesh_mutable.mvert,
+ mesh_mutable.totvert,
+ mesh_mutable.mloop,
+ mesh_mutable.totloop,
+ mesh_mutable.mpoly,
+ mesh_mutable.totpoly,
+ poly_normals);
+
+ BKE_mesh_poly_normals_clear_dirty(&mesh_mutable);
+
+ BLI_mutex_unlock(normals_mutex);
+ return poly_normals;
}
void BKE_mesh_ensure_normals_for_display(Mesh *mesh)
{
switch ((eMeshWrapperType)mesh->runtime.wrapper_type) {
+ case ME_WRAPPER_TYPE_SUBD:
case ME_WRAPPER_TYPE_MDATA:
- /* Run code below. */
+ BKE_mesh_vertex_normals_ensure(mesh);
+ BKE_mesh_poly_normals_ensure(mesh);
break;
case ME_WRAPPER_TYPE_BMESH: {
struct BMEditMesh *em = mesh->edit_mesh;
@@ -332,47 +449,6 @@ void BKE_mesh_ensure_normals_for_display(Mesh *mesh)
return;
}
}
-
- float(*poly_nors)[3] = (float(*)[3])CustomData_get_layer(&mesh->pdata, CD_NORMAL);
- const bool do_vert_normals = (mesh->runtime.cd_dirty_vert & CD_MASK_NORMAL) != 0;
- const bool do_poly_normals = (mesh->runtime.cd_dirty_poly & CD_MASK_NORMAL ||
- poly_nors == nullptr);
-
- if (do_vert_normals || do_poly_normals) {
- const bool do_add_poly_nors_cddata = (poly_nors == nullptr);
- if (do_add_poly_nors_cddata) {
- poly_nors = (float(*)[3])MEM_malloc_arrayN(
- (size_t)mesh->totpoly, sizeof(*poly_nors), __func__);
- }
-
- /* Calculate poly/vert normals. */
- if (do_vert_normals) {
- BKE_mesh_calc_normals_poly_and_vertex(mesh->mvert,
- mesh->totvert,
- mesh->mloop,
- mesh->totloop,
- mesh->mpoly,
- mesh->totpoly,
- poly_nors,
- nullptr);
- }
- else {
- BKE_mesh_calc_normals_poly(mesh->mvert,
- mesh->totvert,
- mesh->mloop,
- mesh->totloop,
- mesh->mpoly,
- mesh->totpoly,
- poly_nors);
- }
-
- if (do_add_poly_nors_cddata) {
- CustomData_add_layer(&mesh->pdata, CD_NORMAL, CD_ASSIGN, poly_nors, mesh->totpoly);
- }
-
- mesh->runtime.cd_dirty_vert &= ~CD_MASK_NORMAL;
- mesh->runtime.cd_dirty_poly &= ~CD_MASK_NORMAL;
- }
}
void BKE_mesh_calc_normals(Mesh *mesh)
@@ -380,18 +456,10 @@ void BKE_mesh_calc_normals(Mesh *mesh)
#ifdef DEBUG_TIME
TIMEIT_START_AVERAGED(BKE_mesh_calc_normals);
#endif
- BKE_mesh_calc_normals_poly_and_vertex(mesh->mvert,
- mesh->totvert,
- mesh->mloop,
- mesh->totloop,
- mesh->mpoly,
- mesh->totpoly,
- nullptr,
- nullptr);
+ BKE_mesh_vertex_normals_ensure(mesh);
#ifdef DEBUG_TIME
TIMEIT_END_AVERAGED(BKE_mesh_calc_normals);
#endif
- mesh->runtime.cd_dirty_vert &= ~CD_MASK_NORMAL;
}
void BKE_mesh_calc_normals_looptri(MVert *mverts,
@@ -438,8 +506,6 @@ void BKE_mesh_calc_normals_looptri(MVert *mverts,
if (UNLIKELY(normalize_v3(no) == 0.0f)) {
normalize_v3_v3(no, mv->co);
}
-
- normal_float_to_short_v3(mv->no, no);
}
cleanup:
@@ -752,6 +818,7 @@ struct LoopSplitTaskDataCommon {
int (*edge_to_loops)[2];
int *loop_to_poly;
const float (*polynors)[3];
+ const float (*vert_normals)[3];
int numEdges;
int numLoops;
@@ -768,7 +835,6 @@ static void mesh_edges_sharp_tag(LoopSplitTaskDataCommon *data,
const float split_angle,
const bool do_sharp_edges_tag)
{
- const MVert *mverts = data->mverts;
const MEdge *medges = data->medges;
const MLoop *mloops = data->mloops;
@@ -807,7 +873,7 @@ static void mesh_edges_sharp_tag(LoopSplitTaskDataCommon *data,
* this way we don't have to compute those later!
*/
if (loopnors) {
- normal_short_to_float_v3(loopnors[ml_curr_index], mverts[ml_curr->v].no);
+ copy_v3_v3(loopnors[ml_curr_index], data->vert_normals[ml_curr->v]);
}
/* Check whether current edge might be smooth or sharp */
@@ -1532,6 +1598,7 @@ static void loop_split_generator(TaskPool *pool, LoopSplitTaskDataCommon *common
}
void BKE_mesh_normals_loop_split(const MVert *mverts,
+ const float (*vert_normals)[3],
const int UNUSED(numVerts),
MEdge *medges,
const int numEdges,
@@ -1574,7 +1641,7 @@ void BKE_mesh_normals_loop_split(const MVert *mverts,
copy_v3_v3(r_loopnors[ml_index], polynors[mp_index]);
}
else {
- normal_short_to_float_v3(r_loopnors[ml_index], mverts[mloops[ml_index].v].no);
+ copy_v3_v3(r_loopnors[ml_index], vert_normals[mloops[ml_index].v]);
}
}
}
@@ -1632,6 +1699,7 @@ void BKE_mesh_normals_loop_split(const MVert *mverts,
common_data.edge_to_loops = edge_to_loops;
common_data.loop_to_poly = loop_to_poly;
common_data.polynors = polynors;
+ common_data.vert_normals = vert_normals;
common_data.numEdges = numEdges;
common_data.numLoops = numLoops;
common_data.numPolys = numPolys;
@@ -1683,6 +1751,7 @@ void BKE_mesh_normals_loop_split(const MVert *mverts,
* in which case they will be replaced by default loop/vertex normal.
*/
static void mesh_normals_loop_custom_set(const MVert *mverts,
+ const float (*vert_normals)[3],
const int numVerts,
MEdge *medges,
const int numEdges,
@@ -1714,6 +1783,7 @@ static void mesh_normals_loop_custom_set(const MVert *mverts,
/* Compute current lnor spacearr. */
BKE_mesh_normals_loop_split(mverts,
+ vert_normals,
numVerts,
medges,
numEdges,
@@ -1733,7 +1803,7 @@ static void mesh_normals_loop_custom_set(const MVert *mverts,
if (use_vertices) {
for (int i = 0; i < numVerts; i++) {
if (is_zero_v3(r_custom_loopnors[i])) {
- normal_short_to_float_v3(r_custom_loopnors[i], mverts[i].no);
+ copy_v3_v3(r_custom_loopnors[i], vert_normals[i]);
}
}
}
@@ -1836,6 +1906,7 @@ static void mesh_normals_loop_custom_set(const MVert *mverts,
/* And now, recompute our new auto lnors and lnor spacearr! */
BKE_lnor_spacearr_clear(&lnors_spacearr);
BKE_mesh_normals_loop_split(mverts,
+ vert_normals,
numVerts,
medges,
numEdges,
@@ -1917,6 +1988,7 @@ static void mesh_normals_loop_custom_set(const MVert *mverts,
}
void BKE_mesh_normals_loop_custom_set(const MVert *mverts,
+ const float (*vert_normals)[3],
const int numVerts,
MEdge *medges,
const int numEdges,
@@ -1929,6 +2001,7 @@ void BKE_mesh_normals_loop_custom_set(const MVert *mverts,
short (*r_clnors_data)[2])
{
mesh_normals_loop_custom_set(mverts,
+ vert_normals,
numVerts,
medges,
numEdges,
@@ -1943,6 +2016,7 @@ void BKE_mesh_normals_loop_custom_set(const MVert *mverts,
}
void BKE_mesh_normals_loop_custom_from_vertices_set(const MVert *mverts,
+ const float (*vert_normals)[3],
float (*r_custom_vertnors)[3],
const int numVerts,
MEdge *medges,
@@ -1955,6 +2029,7 @@ void BKE_mesh_normals_loop_custom_from_vertices_set(const MVert *mverts,
short (*r_clnors_data)[2])
{
mesh_normals_loop_custom_set(mverts,
+ vert_normals,
numVerts,
medges,
numEdges,
@@ -1982,22 +2057,8 @@ static void mesh_set_custom_normals(Mesh *mesh, float (*r_custom_nors)[3], const
&mesh->ldata, CD_CUSTOMLOOPNORMAL, CD_CALLOC, nullptr, numloops);
}
- float(*polynors)[3] = (float(*)[3])CustomData_get_layer(&mesh->pdata, CD_NORMAL);
- bool free_polynors = false;
- if (polynors == nullptr) {
- polynors = (float(*)[3])MEM_mallocN(sizeof(float[3]) * (size_t)mesh->totpoly, __func__);
- BKE_mesh_calc_normals_poly_and_vertex(mesh->mvert,
- mesh->totvert,
- mesh->mloop,
- mesh->totloop,
- mesh->mpoly,
- mesh->totpoly,
- polynors,
- nullptr);
- free_polynors = true;
- }
-
mesh_normals_loop_custom_set(mesh->mvert,
+ BKE_mesh_vertex_normals_ensure(mesh),
mesh->totvert,
mesh->medge,
mesh->totedge,
@@ -2005,14 +2066,10 @@ static void mesh_set_custom_normals(Mesh *mesh, float (*r_custom_nors)[3], const
r_custom_nors,
mesh->totloop,
mesh->mpoly,
- polynors,
+ BKE_mesh_poly_normals_ensure(mesh),
mesh->totpoly,
clnors,
use_vertices);
-
- if (free_polynors) {
- MEM_freeN(polynors);
- }
}
void BKE_mesh_set_custom_normals(Mesh *mesh, float (*r_custom_loopnors)[3])
diff --git a/source/blender/blenkernel/intern/mesh_remap.c b/source/blender/blenkernel/intern/mesh_remap.c
index 5b5378bd829..a9f61e9827b 100644
--- a/source/blender/blenkernel/intern/mesh_remap.c
+++ b/source/blender/blenkernel/intern/mesh_remap.c
@@ -594,6 +594,7 @@ void BKE_mesh_remap_calc_verts_from_mesh(const int mode,
MPoly *polys_src = me_src->mpoly;
MLoop *loops_src = me_src->mloop;
float(*vcos_src)[3] = BKE_mesh_vert_coords_alloc(me_src, NULL);
+ const float(*vert_normals_src)[3] = BKE_mesh_vertex_normals_ensure(me_src);
size_t tmp_buff_size = MREMAP_DEFAULT_BUFSIZE;
float(*vcos)[3] = MEM_mallocN(sizeof(*vcos) * tmp_buff_size, __func__);
@@ -605,7 +606,7 @@ void BKE_mesh_remap_calc_verts_from_mesh(const int mode,
if (mode == MREMAP_MODE_VERT_POLYINTERP_VNORPROJ) {
for (i = 0; i < numverts_dst; i++) {
copy_v3_v3(tmp_co, verts_dst[i].co);
- normal_short_to_float_v3(tmp_no, verts_dst[i].no);
+ copy_v3_v3(tmp_no, vert_normals_src[i]);
/* Convert the vertex to tree coordinates, if needed. */
if (space_transform) {
@@ -951,6 +952,8 @@ void BKE_mesh_remap_calc_edges_from_mesh(const int mode,
BKE_bvhtree_from_mesh_get(&treedata, me_src, BVHTREE_FROM_EDGES, 2);
+ const float(*vert_normals_dst)[3] = BKE_mesh_vertex_normals_ensure(me_src);
+
for (i = 0; i < numedges_dst; i++) {
/* For each dst edge, we sample some rays from it (interpolated from its vertices)
* and use their hits to interpolate from source edges. */
@@ -970,8 +973,8 @@ void BKE_mesh_remap_calc_edges_from_mesh(const int mode,
copy_v3_v3(v1_co, verts_dst[me->v1].co);
copy_v3_v3(v2_co, verts_dst[me->v2].co);
- normal_short_to_float_v3(v1_no, verts_dst[me->v1].no);
- normal_short_to_float_v3(v2_no, verts_dst[me->v2].no);
+ copy_v3_v3(v1_no, vert_normals_dst[me->v1]);
+ copy_v3_v3(v2_no, vert_normals_dst[me->v2]);
/* We do our transform here, allows to interpolate from normals already in src space. */
if (space_transform) {
@@ -1242,6 +1245,7 @@ void BKE_mesh_remap_calc_loops_from_mesh(const int mode,
const SpaceTransform *space_transform,
const float max_dist,
const float ray_radius,
+ Mesh *mesh_dst,
MVert *verts_dst,
const int numverts_dst,
MEdge *edges_dst,
@@ -1251,7 +1255,6 @@ void BKE_mesh_remap_calc_loops_from_mesh(const int mode,
MPoly *polys_dst,
const int numpolys_dst,
CustomData *ldata_dst,
- CustomData *pdata_dst,
const bool use_split_nors_dst,
const float split_angle_dst,
const bool dirty_nors_dst,
@@ -1297,9 +1300,9 @@ void BKE_mesh_remap_calc_loops_from_mesh(const int mode,
1) :
0);
- float(*poly_nors_src)[3] = NULL;
- float(*loop_nors_src)[3] = NULL;
- float(*poly_nors_dst)[3] = NULL;
+ const float(*poly_nors_src)[3] = NULL;
+ const float(*loop_nors_src)[3] = NULL;
+ const float(*poly_nors_dst)[3] = NULL;
float(*loop_nors_dst)[3] = NULL;
float(*poly_cents_src)[3] = NULL;
@@ -1356,23 +1359,7 @@ void BKE_mesh_remap_calc_loops_from_mesh(const int mode,
const bool need_pnors_dst = need_lnors_dst || need_pnors_src;
if (need_pnors_dst) {
- /* Cache poly nors into a temp CDLayer. */
- poly_nors_dst = CustomData_get_layer(pdata_dst, CD_NORMAL);
- const bool do_poly_nors_dst = (poly_nors_dst == NULL);
- if (!poly_nors_dst) {
- poly_nors_dst = CustomData_add_layer(
- pdata_dst, CD_NORMAL, CD_CALLOC, NULL, numpolys_dst);
- CustomData_set_layer_flag(pdata_dst, CD_NORMAL, CD_FLAG_TEMPORARY);
- }
- if (dirty_nors_dst || do_poly_nors_dst) {
- BKE_mesh_calc_normals_poly(verts_dst,
- numverts_dst,
- loops_dst,
- numloops_dst,
- polys_dst,
- numpolys_dst,
- poly_nors_dst);
- }
+ poly_nors_dst = BKE_mesh_poly_normals_ensure(mesh_dst);
}
if (need_lnors_dst) {
short(*custom_nors_dst)[2] = CustomData_get_layer(ldata_dst, CD_CUSTOMLOOPNORMAL);
@@ -1387,6 +1374,7 @@ void BKE_mesh_remap_calc_loops_from_mesh(const int mode,
}
if (dirty_nors_dst || do_loop_nors_dst) {
BKE_mesh_normals_loop_split(verts_dst,
+ BKE_mesh_vertex_normals_ensure(mesh_dst),
numverts_dst,
edges_dst,
numedges_dst,
@@ -1394,7 +1382,7 @@ void BKE_mesh_remap_calc_loops_from_mesh(const int mode,
loop_nors_dst,
numloops_dst,
polys_dst,
- (const float(*)[3])poly_nors_dst,
+ poly_nors_dst,
numpolys_dst,
use_split_nors_dst,
split_angle_dst,
@@ -1405,8 +1393,7 @@ void BKE_mesh_remap_calc_loops_from_mesh(const int mode,
}
if (need_pnors_src || need_lnors_src) {
if (need_pnors_src) {
- poly_nors_src = CustomData_get_layer(&me_src->pdata, CD_NORMAL);
- BLI_assert(poly_nors_src != NULL);
+ poly_nors_src = BKE_mesh_poly_normals_ensure(me_src);
}
if (need_lnors_src) {
loop_nors_src = CustomData_get_layer(&me_src->ldata, CD_NORMAL);
@@ -1648,7 +1635,7 @@ void BKE_mesh_remap_calc_loops_from_mesh(const int mode,
if (mesh_remap_bvhtree_query_nearest(
tdata, &nearest, tmp_co, max_dist_sq, &hit_dist)) {
float(*nor_dst)[3];
- float(*nors_src)[3];
+ const float(*nors_src)[3];
float best_nor_dot = -2.0f;
float best_sqdist_fallback = FLT_MAX;
int best_index_src = -1;
@@ -2188,41 +2175,24 @@ void BKE_mesh_remap_calc_polys_from_mesh(const int mode,
const SpaceTransform *space_transform,
const float max_dist,
const float ray_radius,
+ Mesh *mesh_dst,
MVert *verts_dst,
- const int numverts_dst,
MLoop *loops_dst,
- const int numloops_dst,
MPoly *polys_dst,
const int numpolys_dst,
- CustomData *pdata_dst,
- const bool dirty_nors_dst,
Mesh *me_src,
MeshPairRemap *r_map)
{
const float full_weight = 1.0f;
const float max_dist_sq = max_dist * max_dist;
- float(*poly_nors_dst)[3] = NULL;
+ const float(*poly_nors_dst)[3] = NULL;
float tmp_co[3], tmp_no[3];
int i;
BLI_assert(mode & MREMAP_MODE_POLY);
if (mode & (MREMAP_USE_NORMAL | MREMAP_USE_NORPROJ)) {
- /* Cache poly nors into a temp CDLayer. */
- poly_nors_dst = CustomData_get_layer(pdata_dst, CD_NORMAL);
- if (!poly_nors_dst) {
- poly_nors_dst = CustomData_add_layer(pdata_dst, CD_NORMAL, CD_CALLOC, NULL, numpolys_dst);
- CustomData_set_layer_flag(pdata_dst, CD_NORMAL, CD_FLAG_TEMPORARY);
- }
- if (dirty_nors_dst) {
- BKE_mesh_calc_normals_poly(verts_dst,
- numverts_dst,
- loops_dst,
- numloops_dst,
- polys_dst,
- numpolys_dst,
- poly_nors_dst);
- }
+ poly_nors_dst = BKE_mesh_poly_normals_ensure(mesh_dst);
}
BKE_mesh_remap_init(r_map, numpolys_dst);
diff --git a/source/blender/blenkernel/intern/mesh_remesh_voxel.cc b/source/blender/blenkernel/intern/mesh_remesh_voxel.cc
index 3447185089d..50464da86e9 100644
--- a/source/blender/blenkernel/intern/mesh_remesh_voxel.cc
+++ b/source/blender/blenkernel/intern/mesh_remesh_voxel.cc
@@ -31,8 +31,8 @@
#include "MEM_guardedalloc.h"
#include "BLI_array.hh"
-#include "BLI_float3.hh"
#include "BLI_index_range.hh"
+#include "BLI_math_vec_types.hh"
#include "BLI_span.hh"
#include "DNA_mesh_types.h"
diff --git a/source/blender/blenkernel/intern/mesh_runtime.c b/source/blender/blenkernel/intern/mesh_runtime.c
index 45c84ed0862..e7e5064df7c 100644
--- a/source/blender/blenkernel/intern/mesh_runtime.c
+++ b/source/blender/blenkernel/intern/mesh_runtime.c
@@ -53,6 +53,8 @@ static void mesh_runtime_init_mutexes(Mesh *mesh)
{
mesh->runtime.eval_mutex = MEM_mallocN(sizeof(ThreadMutex), "mesh runtime eval_mutex");
BLI_mutex_init(mesh->runtime.eval_mutex);
+ mesh->runtime.normals_mutex = MEM_mallocN(sizeof(ThreadMutex), "mesh runtime normals_mutex");
+ BLI_mutex_init(mesh->runtime.normals_mutex);
mesh->runtime.render_mutex = MEM_mallocN(sizeof(ThreadMutex), "mesh runtime render_mutex");
BLI_mutex_init(mesh->runtime.render_mutex);
}
@@ -67,6 +69,11 @@ static void mesh_runtime_free_mutexes(Mesh *mesh)
MEM_freeN(mesh->runtime.eval_mutex);
mesh->runtime.eval_mutex = NULL;
}
+ if (mesh->runtime.normals_mutex != NULL) {
+ BLI_mutex_end(mesh->runtime.normals_mutex);
+ MEM_freeN(mesh->runtime.normals_mutex);
+ mesh->runtime.normals_mutex = NULL;
+ }
if (mesh->runtime.render_mutex != NULL) {
BLI_mutex_end(mesh->runtime.render_mutex);
MEM_freeN(mesh->runtime.render_mutex);
@@ -291,129 +298,10 @@ void BKE_mesh_batch_cache_free(Mesh *me)
/** \} */
/* -------------------------------------------------------------------- */
-/** \name Mesh Runtime Debug Helpers
+/** \name Mesh Runtime Validation
* \{ */
-/* Evaluated mesh info printing function, to help track down differences output. */
-
#ifndef NDEBUG
-# include "BLI_dynstr.h"
-
-static void mesh_runtime_debug_info_layers(DynStr *dynstr, CustomData *cd)
-{
- int type;
-
- for (type = 0; type < CD_NUMTYPES; type++) {
- if (CustomData_has_layer(cd, type)) {
- /* NOTE: doesn't account for multiple layers. */
- const char *name = CustomData_layertype_name(type);
- const int size = CustomData_sizeof(type);
- const void *pt = CustomData_get_layer(cd, type);
- const int pt_size = pt ? (int)(MEM_allocN_len(pt) / size) : 0;
- const char *structname;
- int structnum;
- CustomData_file_write_info(type, &structname, &structnum);
- BLI_dynstr_appendf(
- dynstr,
- " dict(name='%s', struct='%s', type=%d, ptr='%p', elem=%d, length=%d),\n",
- name,
- structname,
- type,
- (const void *)pt,
- size,
- pt_size);
- }
- }
-}
-
-char *BKE_mesh_runtime_debug_info(Mesh *me_eval)
-{
- DynStr *dynstr = BLI_dynstr_new();
- char *ret;
-
- BLI_dynstr_append(dynstr, "{\n");
- BLI_dynstr_appendf(dynstr, " 'ptr': '%p',\n", (void *)me_eval);
-# if 0
- const char *tstr;
- switch (me_eval->type) {
- case DM_TYPE_CDDM:
- tstr = "DM_TYPE_CDDM";
- break;
- case DM_TYPE_CCGDM:
- tstr = "DM_TYPE_CCGDM";
- break;
- default:
- tstr = "UNKNOWN";
- break;
- }
- BLI_dynstr_appendf(dynstr, " 'type': '%s',\n", tstr);
-# endif
- BLI_dynstr_appendf(dynstr, " 'totvert': %d,\n", me_eval->totvert);
- BLI_dynstr_appendf(dynstr, " 'totedge': %d,\n", me_eval->totedge);
- BLI_dynstr_appendf(dynstr, " 'totface': %d,\n", me_eval->totface);
- BLI_dynstr_appendf(dynstr, " 'totpoly': %d,\n", me_eval->totpoly);
- BLI_dynstr_appendf(dynstr, " 'deformed_only': %d,\n", me_eval->runtime.deformed_only);
-
- BLI_dynstr_append(dynstr, " 'vertexLayers': (\n");
- mesh_runtime_debug_info_layers(dynstr, &me_eval->vdata);
- BLI_dynstr_append(dynstr, " ),\n");
-
- BLI_dynstr_append(dynstr, " 'edgeLayers': (\n");
- mesh_runtime_debug_info_layers(dynstr, &me_eval->edata);
- BLI_dynstr_append(dynstr, " ),\n");
-
- BLI_dynstr_append(dynstr, " 'loopLayers': (\n");
- mesh_runtime_debug_info_layers(dynstr, &me_eval->ldata);
- BLI_dynstr_append(dynstr, " ),\n");
-
- BLI_dynstr_append(dynstr, " 'polyLayers': (\n");
- mesh_runtime_debug_info_layers(dynstr, &me_eval->pdata);
- BLI_dynstr_append(dynstr, " ),\n");
-
- BLI_dynstr_append(dynstr, " 'tessFaceLayers': (\n");
- mesh_runtime_debug_info_layers(dynstr, &me_eval->fdata);
- BLI_dynstr_append(dynstr, " ),\n");
-
- BLI_dynstr_append(dynstr, "}\n");
-
- ret = BLI_dynstr_get_cstring(dynstr);
- BLI_dynstr_free(dynstr);
- return ret;
-}
-
-void BKE_mesh_runtime_debug_print(Mesh *me_eval)
-{
- char *str = BKE_mesh_runtime_debug_info(me_eval);
- puts(str);
- fflush(stdout);
- MEM_freeN(str);
-}
-
-void BKE_mesh_runtime_debug_print_cdlayers(CustomData *data)
-{
- int i;
- const CustomDataLayer *layer;
-
- printf("{\n");
-
- for (i = 0, layer = data->layers; i < data->totlayer; i++, layer++) {
-
- const char *name = CustomData_layertype_name(layer->type);
- const int size = CustomData_sizeof(layer->type);
- const char *structname;
- int structnum;
- CustomData_file_write_info(layer->type, &structname, &structnum);
- printf(" dict(name='%s', struct='%s', type=%d, ptr='%p', elem=%d, length=%d),\n",
- name,
- structname,
- layer->type,
- (const void *)layer->data,
- size,
- (int)(MEM_allocN_len(layer->data) / size));
- }
-
- printf("}\n");
-}
bool BKE_mesh_runtime_is_valid(Mesh *me_eval)
{
diff --git a/source/blender/blenkernel/intern/mesh_tangent.c b/source/blender/blenkernel/intern/mesh_tangent.c
index c7a1b22dad1..73cef6b925b 100644
--- a/source/blender/blenkernel/intern/mesh_tangent.c
+++ b/source/blender/blenkernel/intern/mesh_tangent.c
@@ -224,7 +224,8 @@ typedef struct {
MLoopUV *mloopuv; /* texture coordinates */
const MPoly *mpoly; /* indices */
const MLoop *mloop; /* indices */
- const MVert *mvert; /* vertices & normals */
+ const MVert *mvert; /* vertex coordinates */
+ const float (*vert_normals)[3];
const float (*orco)[3];
float (*tangent)[4]; /* destination */
int numTessFaces;
@@ -398,8 +399,7 @@ finally:
}
}
else {
- const short *no = pMesh->mvert[pMesh->mloop[loop_index].v].no;
- normal_short_to_float_v3(r_no, no);
+ copy_v3_v3(r_no, pMesh->vert_normals[pMesh->mloop[loop_index].v]);
}
}
@@ -557,6 +557,7 @@ void BKE_mesh_calc_loop_tangent_ex(const MVert *mvert,
bool calc_active_tangent,
const char (*tangent_names)[MAX_NAME],
int tangent_names_len,
+ const float (*vert_normals)[3],
const float (*poly_normals)[3],
const float (*loop_normals)[3],
const float (*vert_orco)[3],
@@ -651,6 +652,7 @@ void BKE_mesh_calc_loop_tangent_ex(const MVert *mvert,
mesh2tangent->num_face_as_quad_map = num_face_as_quad_map;
#endif
mesh2tangent->mvert = mvert;
+ mesh2tangent->vert_normals = vert_normals;
mesh2tangent->mpoly = mpoly;
mesh2tangent->mloop = mloop;
mesh2tangent->looptri = looptri;
@@ -743,7 +745,8 @@ void BKE_mesh_calc_loop_tangents(Mesh *me_eval,
calc_active_tangent,
tangent_names,
tangent_names_len,
- CustomData_get_layer(&me_eval->pdata, CD_NORMAL),
+ BKE_mesh_vertex_normals_ensure(me_eval),
+ BKE_mesh_poly_normals_ensure(me_eval),
CustomData_get_layer(&me_eval->ldata, CD_NORMAL),
CustomData_get_layer(&me_eval->vdata, CD_ORCO), /* may be NULL */
/* result */
diff --git a/source/blender/blenkernel/intern/mesh_validate.c b/source/blender/blenkernel/intern/mesh_validate.c
index ba86c0fd449..005c916b4e0 100644
--- a/source/blender/blenkernel/intern/mesh_validate.c
+++ b/source/blender/blenkernel/intern/mesh_validate.c
@@ -303,6 +303,12 @@ bool BKE_mesh_validate_arrays(Mesh *mesh,
recalc_flag.edges = do_fixes;
}
+ const float(*vert_normals)[3] = NULL;
+ BKE_mesh_assert_normals_dirty_or_calculated(mesh);
+ if (!BKE_mesh_vertex_normals_are_dirty(mesh)) {
+ vert_normals = BKE_mesh_vertex_normals_ensure(mesh);
+ }
+
for (i = 0; i < totvert; i++, mv++) {
bool fix_normal = true;
@@ -317,13 +323,13 @@ bool BKE_mesh_validate_arrays(Mesh *mesh,
}
}
- if (mv->no[j] != 0) {
+ if (vert_normals && vert_normals[i][j] != 0.0f) {
fix_normal = false;
break;
}
}
- if (fix_normal) {
+ if (vert_normals && fix_normal) {
/* If the vertex normal accumulates to zero or isn't part of a face, the location is used.
* When the location is also zero, a zero normal warning should not be raised.
* since this is the expected behavior of normal calculation.
@@ -336,7 +342,8 @@ bool BKE_mesh_validate_arrays(Mesh *mesh,
if (!is_zero_v3(mv->co)) {
PRINT_ERR("\tVertex %u: has zero normal, assuming Z-up normal", i);
if (do_fixes) {
- mv->no[2] = SHRT_MAX;
+ float *normal = (float *)vert_normals[i];
+ normal[2] = 1.0f;
fix_flag.verts = true;
}
}
@@ -1001,6 +1008,10 @@ bool BKE_mesh_validate_all_customdata(CustomData *vdata,
CustomData_MeshMasks mask = {0};
if (check_meshmask) {
mask = CD_MASK_MESH;
+ /* Normal data isn't in the mask since it is derived data,
+ * but it is valid and should not be removed. */
+ mask.vmask |= CD_MASK_NORMAL;
+ mask.pmask |= CD_MASK_NORMAL;
}
is_valid &= mesh_validate_customdata(
@@ -1098,6 +1109,8 @@ bool BKE_mesh_is_valid(Mesh *me)
bool is_valid = true;
bool changed = true;
+ BKE_mesh_assert_normals_dirty_or_calculated(me);
+
is_valid &= BKE_mesh_validate_all_customdata(
&me->vdata,
me->totvert,
diff --git a/source/blender/blenkernel/intern/mesh_wrapper.c b/source/blender/blenkernel/intern/mesh_wrapper.c
index bc1ffeb8cf4..d1f15cf9007 100644
--- a/source/blender/blenkernel/intern/mesh_wrapper.c
+++ b/source/blender/blenkernel/intern/mesh_wrapper.c
@@ -36,6 +36,7 @@
#include "DNA_mesh_types.h"
#include "DNA_meshdata_types.h"
+#include "DNA_modifier_types.h"
#include "DNA_object_types.h"
#include "BLI_ghash.h"
@@ -50,8 +51,14 @@
#include "BKE_mesh.h"
#include "BKE_mesh_runtime.h"
#include "BKE_mesh_wrapper.h"
+#include "BKE_modifier.h"
+#include "BKE_object.h"
+#include "BKE_subdiv.h"
+#include "BKE_subdiv_mesh.h"
+#include "BKE_subdiv_modifier.h"
#include "DEG_depsgraph.h"
+#include "DEG_depsgraph_query.h"
Mesh *BKE_mesh_wrapper_from_editmesh_with_coords(BMEditMesh *em,
const CustomData_MeshMasks *cd_mask_extra,
@@ -106,7 +113,8 @@ static void mesh_wrapper_ensure_mdata_isolated(void *userdata)
me->runtime.wrapper_type = ME_WRAPPER_TYPE_MDATA;
switch (geom_type_orig) {
- case ME_WRAPPER_TYPE_MDATA: {
+ case ME_WRAPPER_TYPE_MDATA:
+ case ME_WRAPPER_TYPE_SUBD: {
break; /* Quiet warning. */
}
case ME_WRAPPER_TYPE_BMESH: {
@@ -157,6 +165,7 @@ bool BKE_mesh_wrapper_minmax(const Mesh *me, float min[3], float max[3])
case ME_WRAPPER_TYPE_BMESH:
return BKE_editmesh_cache_calc_minmax(me->edit_mesh, me->runtime.edit_data, min, max);
case ME_WRAPPER_TYPE_MDATA:
+ case ME_WRAPPER_TYPE_SUBD:
return BKE_mesh_minmax(me, min, max);
}
BLI_assert_unreachable();
@@ -191,7 +200,8 @@ void BKE_mesh_wrapper_vert_coords_copy(const Mesh *me,
}
return;
}
- case ME_WRAPPER_TYPE_MDATA: {
+ case ME_WRAPPER_TYPE_MDATA:
+ case ME_WRAPPER_TYPE_SUBD: {
BLI_assert(vert_coords_len <= me->totvert);
const MVert *mvert = me->mvert;
for (int i = 0; i < vert_coords_len; i++) {
@@ -228,7 +238,8 @@ void BKE_mesh_wrapper_vert_coords_copy_with_mat4(const Mesh *me,
}
return;
}
- case ME_WRAPPER_TYPE_MDATA: {
+ case ME_WRAPPER_TYPE_MDATA:
+ case ME_WRAPPER_TYPE_SUBD: {
BLI_assert(vert_coords_len == me->totvert);
const MVert *mvert = me->mvert;
for (int i = 0; i < vert_coords_len; i++) {
@@ -252,6 +263,7 @@ int BKE_mesh_wrapper_vert_len(const Mesh *me)
case ME_WRAPPER_TYPE_BMESH:
return me->edit_mesh->bm->totvert;
case ME_WRAPPER_TYPE_MDATA:
+ case ME_WRAPPER_TYPE_SUBD:
return me->totvert;
}
BLI_assert_unreachable();
@@ -264,6 +276,7 @@ int BKE_mesh_wrapper_edge_len(const Mesh *me)
case ME_WRAPPER_TYPE_BMESH:
return me->edit_mesh->bm->totedge;
case ME_WRAPPER_TYPE_MDATA:
+ case ME_WRAPPER_TYPE_SUBD:
return me->totedge;
}
BLI_assert_unreachable();
@@ -276,6 +289,7 @@ int BKE_mesh_wrapper_loop_len(const Mesh *me)
case ME_WRAPPER_TYPE_BMESH:
return me->edit_mesh->bm->totloop;
case ME_WRAPPER_TYPE_MDATA:
+ case ME_WRAPPER_TYPE_SUBD:
return me->totloop;
}
BLI_assert_unreachable();
@@ -288,6 +302,7 @@ int BKE_mesh_wrapper_poly_len(const Mesh *me)
case ME_WRAPPER_TYPE_BMESH:
return me->edit_mesh->bm->totface;
case ME_WRAPPER_TYPE_MDATA:
+ case ME_WRAPPER_TYPE_SUBD:
return me->totpoly;
}
BLI_assert_unreachable();
@@ -295,3 +310,73 @@ int BKE_mesh_wrapper_poly_len(const Mesh *me)
}
/** \} */
+
+/* -------------------------------------------------------------------- */
+/** \name CPU Subdivision Evaluation
+ * \{ */
+
+Mesh *BKE_mesh_wrapper_ensure_subdivision(const Object *ob, Mesh *me)
+{
+ ThreadMutex *mesh_eval_mutex = (ThreadMutex *)me->runtime.eval_mutex;
+ BLI_mutex_lock(mesh_eval_mutex);
+
+ if (me->runtime.wrapper_type == ME_WRAPPER_TYPE_SUBD) {
+ BLI_mutex_unlock(mesh_eval_mutex);
+ return me->runtime.mesh_eval;
+ }
+
+ SubsurfModifierData *smd = BKE_object_get_last_subsurf_modifier(ob);
+ if (!smd) {
+ BLI_mutex_unlock(mesh_eval_mutex);
+ return me;
+ }
+
+ /* Initialize the settings before ensuring the descriptor as this is checked to decide whether
+ * subdivision is needed at all, and checking the descriptor status might involve checking if the
+ * data is out-of-date, which is a very expensive operation. */
+ SubdivToMeshSettings mesh_settings;
+ mesh_settings.resolution = me->runtime.subsurf_resolution;
+ mesh_settings.use_optimal_display = me->runtime.subsurf_use_optimal_display;
+
+ if (mesh_settings.resolution < 3) {
+ BLI_mutex_unlock(mesh_eval_mutex);
+ return me;
+ }
+
+ const bool apply_render = me->runtime.subsurf_apply_render;
+
+ SubdivSettings subdiv_settings;
+ BKE_subsurf_modifier_subdiv_settings_init(&subdiv_settings, smd, apply_render);
+ if (subdiv_settings.level == 0) {
+ BLI_mutex_unlock(mesh_eval_mutex);
+ return me;
+ }
+
+ SubsurfRuntimeData *runtime_data = BKE_subsurf_modifier_ensure_runtime(smd);
+
+ Subdiv *subdiv = BKE_subsurf_modifier_subdiv_descriptor_ensure(smd, &subdiv_settings, me, false);
+ if (subdiv == NULL) {
+ /* Happens on bad topology, but also on empty input mesh. */
+ BLI_mutex_unlock(mesh_eval_mutex);
+ return me;
+ }
+
+ Mesh *subdiv_mesh = BKE_subdiv_to_mesh(subdiv, &mesh_settings, me);
+
+ if (subdiv != runtime_data->subdiv) {
+ BKE_subdiv_free(subdiv);
+ }
+
+ if (subdiv_mesh != me) {
+ if (me->runtime.mesh_eval != NULL) {
+ BKE_id_free(NULL, me->runtime.mesh_eval);
+ }
+ me->runtime.mesh_eval = subdiv_mesh;
+ me->runtime.wrapper_type = ME_WRAPPER_TYPE_SUBD;
+ }
+
+ BLI_mutex_unlock(mesh_eval_mutex);
+ return me->runtime.mesh_eval;
+}
+
+/** \} */
diff --git a/source/blender/blenkernel/intern/modifier.c b/source/blender/blenkernel/intern/modifier.c
index 5ad8f143b2b..e1fd8ff45d1 100644
--- a/source/blender/blenkernel/intern/modifier.c
+++ b/source/blender/blenkernel/intern/modifier.c
@@ -933,7 +933,7 @@ const char *BKE_modifier_path_relbase(Main *bmain, Object *ob)
* - Else if the file has been saved return the blend file path.
* - Else if the file isn't saved and the ID isn't from a library, return the temp dir.
*/
- if (G.relbase_valid || ID_IS_LINKED(ob)) {
+ if ((bmain->filepath[0] != '\0') || ID_IS_LINKED(ob)) {
return ID_BLEND_PATH(bmain, &ob->id);
}
@@ -948,7 +948,8 @@ const char *BKE_modifier_path_relbase_from_global(Object *ob)
void BKE_modifier_path_init(char *path, int path_maxlen, const char *name)
{
- BLI_join_dirfile(path, path_maxlen, G.relbase_valid ? "//" : BKE_tempdir_session(), name);
+ const char *blendfile_path = BKE_main_blendfile_path_from_global();
+ BLI_join_dirfile(path, path_maxlen, blendfile_path[0] ? "//" : BKE_tempdir_session(), name);
}
/**
@@ -969,6 +970,7 @@ static void modwrap_dependsOnNormals(Mesh *me)
}
break;
}
+ case ME_WRAPPER_TYPE_SUBD:
case ME_WRAPPER_TYPE_MDATA:
BKE_mesh_calc_normals(me);
break;
@@ -982,7 +984,6 @@ struct Mesh *BKE_modifier_modify_mesh(ModifierData *md,
struct Mesh *me)
{
const ModifierTypeInfo *mti = BKE_modifier_get_info(md->type);
- BLI_assert(CustomData_has_layer(&me->pdata, CD_NORMAL) == false);
if (me->runtime.wrapper_type == ME_WRAPPER_TYPE_BMESH) {
if ((mti->flags & eModifierTypeFlag_AcceptsBMesh) == 0) {
@@ -1003,8 +1004,6 @@ void BKE_modifier_deform_verts(ModifierData *md,
int numVerts)
{
const ModifierTypeInfo *mti = BKE_modifier_get_info(md->type);
- BLI_assert(!me || CustomData_has_layer(&me->pdata, CD_NORMAL) == false);
-
if (me && mti->dependsOnNormals && mti->dependsOnNormals(md)) {
modwrap_dependsOnNormals(me);
}
@@ -1019,8 +1018,6 @@ void BKE_modifier_deform_vertsEM(ModifierData *md,
int numVerts)
{
const ModifierTypeInfo *mti = BKE_modifier_get_info(md->type);
- BLI_assert(!me || CustomData_has_layer(&me->pdata, CD_NORMAL) == false);
-
if (me && mti->dependsOnNormals && mti->dependsOnNormals(md)) {
BKE_mesh_calc_normals(me);
}
@@ -1039,8 +1036,11 @@ Mesh *BKE_modifier_get_evaluated_mesh_from_evaluated_object(Object *ob_eval,
BMEditMesh *em = BKE_editmesh_from_object(ob_eval);
/* 'em' might not exist yet in some cases, just after loading a .blend file, see T57878. */
if (em != NULL) {
- me = (get_cage_mesh && em->mesh_eval_cage != NULL) ? em->mesh_eval_cage :
- em->mesh_eval_final;
+ Mesh *editmesh_eval_final = BKE_object_get_editmesh_eval_final(ob_eval);
+ Mesh *editmesh_eval_cage = BKE_object_get_editmesh_eval_cage(ob_eval);
+
+ me = (get_cage_mesh && editmesh_eval_cage != NULL) ? editmesh_eval_cage :
+ editmesh_eval_final;
}
}
if (me == NULL) {
diff --git a/source/blender/blenkernel/intern/movieclip.c b/source/blender/blenkernel/intern/movieclip.c
index fc2e7d0a6a3..88da789cdc4 100644
--- a/source/blender/blenkernel/intern/movieclip.c
+++ b/source/blender/blenkernel/intern/movieclip.c
@@ -70,6 +70,7 @@
#include "BKE_main.h"
#include "BKE_movieclip.h"
#include "BKE_node.h"
+#include "BKE_node_tree_update.h"
#include "BKE_tracking.h"
#include "IMB_imbuf.h"
@@ -105,7 +106,7 @@ static void movie_clip_copy_data(Main *UNUSED(bmain), ID *id_dst, const ID *id_s
MovieClip *movie_clip_dst = (MovieClip *)id_dst;
const MovieClip *movie_clip_src = (const MovieClip *)id_src;
- /* We never handle usercount here for own data. */
+ /* We never handle user-count here for own data. */
const int flag_subdata = flag | LIB_ID_CREATE_NO_USER_REFCOUNT;
movie_clip_dst->anim = NULL;
@@ -952,7 +953,7 @@ static MovieClip *movieclip_alloc(Main *bmain, const char *name)
static void movieclip_load_get_size(MovieClip *clip)
{
int width, height;
- MovieClipUser user = {0};
+ MovieClipUser user = *DNA_struct_default_get(MovieClipUser);
user.framenr = BKE_movieclip_remap_clip_to_scene_frame(clip, 1);
BKE_movieclip_get_size(clip, &user, &width, &height);
@@ -1177,7 +1178,7 @@ static ImBuf *get_postprocessed_cached_frame(const MovieClip *clip,
return NULL;
}
- /* postprocessing happened for other frame */
+ /* Postprocessing happened for other frame. */
if (cache->postprocessed.framenr != framenr) {
return NULL;
}
@@ -1695,17 +1696,7 @@ void BKE_movieclip_reload(Main *bmain, MovieClip *clip)
movieclip_calc_length(clip);
- /* same as for image update -- don't use notifiers because they are not 100% sure to succeeded
- * (node trees which are not currently visible wouldn't be refreshed)
- */
- {
- Scene *scene;
- for (scene = bmain->scenes.first; scene; scene = scene->id.next) {
- if (scene->nodetree) {
- nodeUpdateID(scene->nodetree, &clip->id);
- }
- }
- }
+ BKE_ntree_update_tag_id_changed(bmain, &clip->id);
}
void BKE_movieclip_update_scopes(MovieClip *clip, MovieClipUser *user, MovieClipScopes *scopes)
diff --git a/source/blender/blenkernel/intern/multires_reshape.h b/source/blender/blenkernel/intern/multires_reshape.h
index db419418998..a038ce5f108 100644
--- a/source/blender/blenkernel/intern/multires_reshape.h
+++ b/source/blender/blenkernel/intern/multires_reshape.h
@@ -106,6 +106,9 @@ typedef struct MultiresReshapeContext {
/* Indexed by base face index, returns first ptex face index corresponding
* to that base face. */
int *face_ptex_offset;
+
+ /* Vertex crease custom data layer, null if none is present. */
+ const float *cd_vertex_crease;
} MultiresReshapeContext;
/**
@@ -225,8 +228,8 @@ GridCoord multires_reshape_ptex_coord_to_grid(const MultiresReshapeContext *resh
* Is calculated for the given surface derivatives at a given base face corner.
*/
void multires_reshape_tangent_matrix_for_corner(const MultiresReshapeContext *reshape_context,
- const int face_index,
- const int corner,
+ int face_index,
+ int corner,
const float dPdu[3],
const float dPdv[3],
float r_tangent_matrix[3][3]);
@@ -266,7 +269,7 @@ void multires_reshape_evaluate_limit_at_grid(const MultiresReshapeContext *resha
/**
* Make sure custom data is allocated for the given level.
*/
-void multires_reshape_ensure_grids(struct Mesh *mesh, const int level);
+void multires_reshape_ensure_grids(struct Mesh *mesh, int level);
/* --------------------------------------------------------------------
* Functions specific to reshaping from a set of vertices in a object position.
@@ -283,7 +286,7 @@ void multires_reshape_ensure_grids(struct Mesh *mesh, const int level);
bool multires_reshape_assign_final_coords_from_vertcos(
const MultiresReshapeContext *reshape_context,
const float (*vert_coords)[3],
- const int num_vert_coords);
+ int num_vert_coords);
/* --------------------------------------------------------------------
* Functions specific to reshaping from CCG.
@@ -338,7 +341,7 @@ void multires_reshape_smooth_object_grids_with_details(
* Makes it so surface on top level looks smooth. Details are not preserved
*/
void multires_reshape_smooth_object_grids(const MultiresReshapeContext *reshape_context,
- const enum eMultiresSubdivideModeType mode);
+ enum eMultiresSubdivideModeType mode);
/* --------------------------------------------------------------------
* Displacement, space conversion.
diff --git a/source/blender/blenkernel/intern/multires_reshape_smooth.c b/source/blender/blenkernel/intern/multires_reshape_smooth.c
index 3665d01926b..839c457dd84 100644
--- a/source/blender/blenkernel/intern/multires_reshape_smooth.c
+++ b/source/blender/blenkernel/intern/multires_reshape_smooth.c
@@ -75,6 +75,7 @@ typedef struct Vertex {
int num_grid_coords;
GridCoord *grid_coords;
+ float sharpness;
bool is_infinite_sharp;
} Vertex;
@@ -489,19 +490,33 @@ static int get_reshape_level_resolution(const MultiresReshapeContext *reshape_co
return (1 << reshape_context->reshape.level) + 1;
}
+static bool is_crease_supported(const MultiresReshapeSmoothContext *reshape_smooth_context)
+{
+ return !ELEM(reshape_smooth_context->smoothing_type,
+ MULTIRES_SUBDIVIDE_LINEAR,
+ MULTIRES_SUBDIVIDE_SIMPLE);
+}
+
/* Get crease which will be used for communication to OpenSubdiv topology.
* Note that simple subdivision treats all base edges as infinitely sharp. */
-static char get_effective_edge_crease_char(
- const MultiresReshapeSmoothContext *reshape_smooth_context, const MEdge *base_edge)
+static char get_effective_crease_char(const MultiresReshapeSmoothContext *reshape_smooth_context,
+ const MEdge *base_edge)
{
- if (ELEM(reshape_smooth_context->smoothing_type,
- MULTIRES_SUBDIVIDE_LINEAR,
- MULTIRES_SUBDIVIDE_SIMPLE)) {
+ if (!is_crease_supported(reshape_smooth_context)) {
return 255;
}
return base_edge->crease;
}
+static float get_effective_crease_float(const MultiresReshapeSmoothContext *reshape_smooth_context,
+ const float crease)
+{
+ if (!is_crease_supported(reshape_smooth_context)) {
+ return 1.0f;
+ }
+ return crease;
+}
+
static void context_init(MultiresReshapeSmoothContext *reshape_smooth_context,
const MultiresReshapeContext *reshape_context,
const eMultiresSubdivideModeType mode)
@@ -566,7 +581,8 @@ static bool foreach_topology_info(const SubdivForeachContext *foreach_context,
const int num_vertices,
const int num_edges,
const int num_loops,
- const int num_polygons)
+ const int num_polygons,
+ const int *UNUSED(subdiv_polygon_offset))
{
MultiresReshapeSmoothContext *reshape_smooth_context = foreach_context->user_data;
const int max_edges = reshape_smooth_context->smoothing_type == MULTIRES_SUBDIVIDE_LINEAR ?
@@ -595,6 +611,7 @@ static bool foreach_topology_info(const SubdivForeachContext *foreach_context,
static void foreach_single_vertex(const SubdivForeachContext *foreach_context,
const GridCoord *grid_coord,
+ const int coarse_vertex_index,
const int subdiv_vertex_index)
{
const MultiresReshapeSmoothContext *reshape_smooth_context = foreach_context->user_data;
@@ -607,11 +624,32 @@ static void foreach_single_vertex(const SubdivForeachContext *foreach_context,
sizeof(Vertex) * (vertex->num_grid_coords + 1));
vertex->grid_coords[vertex->num_grid_coords] = *grid_coord;
++vertex->num_grid_coords;
+
+ if (coarse_vertex_index == -1) {
+ return;
+ }
+
+ const MultiresReshapeContext *reshape_context = reshape_smooth_context->reshape_context;
+ const float *cd_vertex_crease = reshape_context->cd_vertex_crease;
+
+ if (cd_vertex_crease == NULL) {
+ return;
+ }
+
+ float crease = cd_vertex_crease[coarse_vertex_index];
+
+ if (crease == 0.0f) {
+ return;
+ }
+
+ crease = get_effective_crease_float(reshape_smooth_context, crease);
+ vertex->sharpness = BKE_subdiv_crease_to_sharpness_f(crease);
}
/* TODO(sergey): De-duplicate with similar function in multires_reshape_vertcos.c */
static void foreach_vertex(const SubdivForeachContext *foreach_context,
const PTexCoord *ptex_coord,
+ const int coarse_vertex_index,
const int subdiv_vertex_index)
{
const MultiresReshapeSmoothContext *reshape_smooth_context = foreach_context->user_data;
@@ -631,12 +669,13 @@ static void foreach_vertex(const SubdivForeachContext *foreach_context,
for (int current_corner = 0; current_corner < num_corners; ++current_corner) {
GridCoord corner_grid_coord = grid_coord;
corner_grid_coord.grid_index = start_grid_index + current_corner;
- foreach_single_vertex(foreach_context, &corner_grid_coord, subdiv_vertex_index);
+ foreach_single_vertex(
+ foreach_context, &corner_grid_coord, coarse_vertex_index, subdiv_vertex_index);
}
return;
}
- foreach_single_vertex(foreach_context, &grid_coord, subdiv_vertex_index);
+ foreach_single_vertex(foreach_context, &grid_coord, coarse_vertex_index, subdiv_vertex_index);
if (grid_coord.u == 0.0f) {
GridCoord prev_grid_coord;
@@ -644,7 +683,8 @@ static void foreach_vertex(const SubdivForeachContext *foreach_context,
prev_grid_coord.u = grid_coord.v;
prev_grid_coord.v = 0.0f;
- foreach_single_vertex(foreach_context, &prev_grid_coord, subdiv_vertex_index);
+ foreach_single_vertex(
+ foreach_context, &prev_grid_coord, coarse_vertex_index, subdiv_vertex_index);
}
if (grid_coord.v == 0.0f) {
@@ -653,7 +693,8 @@ static void foreach_vertex(const SubdivForeachContext *foreach_context,
next_grid_coord.u = 0.0f;
next_grid_coord.v = grid_coord.u;
- foreach_single_vertex(foreach_context, &next_grid_coord, subdiv_vertex_index);
+ foreach_single_vertex(
+ foreach_context, &next_grid_coord, coarse_vertex_index, subdiv_vertex_index);
}
}
@@ -671,7 +712,7 @@ static void foreach_vertex_inner(const struct SubdivForeachContext *foreach_cont
.u = ptex_face_u,
.v = ptex_face_v,
};
- foreach_vertex(foreach_context, &ptex_coord, subdiv_vertex_index);
+ foreach_vertex(foreach_context, &ptex_coord, -1, subdiv_vertex_index);
}
static void foreach_vertex_every_corner(const struct SubdivForeachContext *foreach_context,
@@ -679,7 +720,7 @@ static void foreach_vertex_every_corner(const struct SubdivForeachContext *forea
const int ptex_face_index,
const float ptex_face_u,
const float ptex_face_v,
- const int UNUSED(coarse_vertex_index),
+ const int coarse_vertex_index,
const int UNUSED(coarse_face_index),
const int UNUSED(coarse_face_corner),
const int subdiv_vertex_index)
@@ -689,7 +730,7 @@ static void foreach_vertex_every_corner(const struct SubdivForeachContext *forea
.u = ptex_face_u,
.v = ptex_face_v,
};
- foreach_vertex(foreach_context, &ptex_coord, subdiv_vertex_index);
+ foreach_vertex(foreach_context, &ptex_coord, coarse_vertex_index, subdiv_vertex_index);
}
static void foreach_vertex_every_edge(const struct SubdivForeachContext *foreach_context,
@@ -707,7 +748,7 @@ static void foreach_vertex_every_edge(const struct SubdivForeachContext *foreach
.u = ptex_face_u,
.v = ptex_face_v,
};
- foreach_vertex(foreach_context, &ptex_coord, subdiv_vertex_index);
+ foreach_vertex(foreach_context, &ptex_coord, -1, subdiv_vertex_index);
}
static void foreach_loop(const struct SubdivForeachContext *foreach_context,
@@ -777,7 +818,7 @@ static void store_edge(MultiresReshapeSmoothContext *reshape_smooth_context,
Edge *edge = &reshape_smooth_context->geometry.edges[edge_index];
edge->v1 = subdiv_v1;
edge->v2 = subdiv_v2;
- edge->sharpness = BKE_subdiv_edge_crease_to_sharpness_char(crease);
+ edge->sharpness = BKE_subdiv_crease_to_sharpness_char(crease);
}
static void foreach_edge(const struct SubdivForeachContext *foreach_context,
@@ -808,7 +849,7 @@ static void foreach_edge(const struct SubdivForeachContext *foreach_context,
/* Edges without crease are to be ignored as well. */
const Mesh *base_mesh = reshape_context->base_mesh;
const MEdge *base_edge = &base_mesh->medge[coarse_edge_index];
- const char crease = get_effective_edge_crease_char(reshape_smooth_context, base_edge);
+ const char crease = get_effective_crease_char(reshape_smooth_context, base_edge);
if (crease == 0) {
return;
}
@@ -834,8 +875,7 @@ static void geometry_init_loose_information(MultiresReshapeSmoothContext *reshap
if (!BLI_BITMAP_TEST_BOOL(reshape_smooth_context->non_loose_base_edge_map, loop->e)) {
BLI_BITMAP_ENABLE(reshape_smooth_context->non_loose_base_edge_map, loop->e);
- const char crease = get_effective_edge_crease_char(reshape_smooth_context,
- &base_edge[loop->e]);
+ const char crease = get_effective_crease_char(reshape_smooth_context, &base_edge[loop->e]);
if (crease != 0) {
++num_used_edges;
}
@@ -978,6 +1018,15 @@ static float get_edge_sharpness(const OpenSubdiv_Converter *converter, const int
return edge->sharpness;
}
+static float get_vertex_sharpness(const OpenSubdiv_Converter *converter, const int vertex_index)
+{
+ const MultiresReshapeSmoothContext *reshape_smooth_context = converter->user_data;
+ BLI_assert(vertex_index < reshape_smooth_context->geometry.num_vertices);
+
+ const Vertex *vertex = &reshape_smooth_context->geometry.vertices[vertex_index];
+ return vertex->sharpness;
+}
+
static bool is_infinite_sharp_vertex(const OpenSubdiv_Converter *converter, int vertex_index)
{
const MultiresReshapeSmoothContext *reshape_smooth_context = converter->user_data;
@@ -1014,7 +1063,7 @@ static void converter_init(const MultiresReshapeSmoothContext *reshape_smooth_co
converter->getNumVertexFaces = NULL;
converter->getVertexFaces = NULL;
converter->isInfiniteSharpVertex = is_infinite_sharp_vertex;
- converter->getVertexSharpness = NULL;
+ converter->getVertexSharpness = get_vertex_sharpness;
converter->getNumUVLayers = NULL;
converter->precalcUVLayer = NULL;
@@ -1037,7 +1086,7 @@ static void reshape_subdiv_create(MultiresReshapeSmoothContext *reshape_smooth_c
converter_init(reshape_smooth_context, &converter);
Subdiv *reshape_subdiv = BKE_subdiv_new_from_converter(settings, &converter);
- BKE_subdiv_eval_begin(reshape_subdiv);
+ BKE_subdiv_eval_begin(reshape_subdiv, SUBDIV_EVALUATOR_TYPE_CPU, NULL);
reshape_smooth_context->reshape_subdiv = reshape_subdiv;
diff --git a/source/blender/blenkernel/intern/multires_reshape_util.c b/source/blender/blenkernel/intern/multires_reshape_util.c
index b7572204182..810cf328531 100644
--- a/source/blender/blenkernel/intern/multires_reshape_util.c
+++ b/source/blender/blenkernel/intern/multires_reshape_util.c
@@ -65,7 +65,7 @@ Subdiv *multires_reshape_create_subdiv(Depsgraph *depsgraph,
SubdivSettings subdiv_settings;
BKE_multires_subdiv_settings_init(&subdiv_settings, mmd);
Subdiv *subdiv = BKE_subdiv_new_from_mesh(&subdiv_settings, base_mesh);
- if (!BKE_subdiv_eval_begin_from_mesh(subdiv, base_mesh, NULL)) {
+ if (!BKE_subdiv_eval_begin_from_mesh(subdiv, base_mesh, NULL, SUBDIV_EVALUATOR_TYPE_CPU, NULL)) {
BKE_subdiv_free(subdiv);
return NULL;
}
@@ -135,7 +135,7 @@ static void context_init_commoon(MultiresReshapeContext *reshape_context)
static bool context_is_valid(MultiresReshapeContext *reshape_context)
{
if (reshape_context->mdisps == NULL) {
- /* Multires displacement has been removed before current changes were applies. */
+ /* Multi-resolution displacement has been removed before current changes were applies. */
return false;
}
return true;
@@ -211,6 +211,8 @@ bool multires_reshape_context_create_from_object(MultiresReshapeContext *reshape
reshape_context->top.level = mmd->totlvl;
reshape_context->top.grid_size = BKE_subdiv_grid_size_from_level(reshape_context->top.level);
+ reshape_context->cd_vertex_crease = CustomData_get_layer(&base_mesh->vdata, CD_CREASE);
+
context_init_commoon(reshape_context);
return context_verify_or_free(reshape_context);
diff --git a/source/blender/blenkernel/intern/multires_reshape_vertcos.c b/source/blender/blenkernel/intern/multires_reshape_vertcos.c
index ed2df1ba8c5..c009349ff1b 100644
--- a/source/blender/blenkernel/intern/multires_reshape_vertcos.c
+++ b/source/blender/blenkernel/intern/multires_reshape_vertcos.c
@@ -114,7 +114,8 @@ static bool multires_reshape_vertcos_foreach_topology_info(
const int num_vertices,
const int UNUSED(num_edges),
const int UNUSED(num_loops),
- const int UNUSED(num_polygons))
+ const int UNUSED(num_polygons),
+ const int *UNUSED(subdiv_polygon_offset))
{
MultiresReshapeAssignVertcosContext *reshape_vertcos_context = foreach_context->user_data;
if (num_vertices != reshape_vertcos_context->num_vert_coords) {
diff --git a/source/blender/blenkernel/intern/multires_versioning.c b/source/blender/blenkernel/intern/multires_versioning.c
index 4c0d7165cd0..18708c43f26 100644
--- a/source/blender/blenkernel/intern/multires_versioning.c
+++ b/source/blender/blenkernel/intern/multires_versioning.c
@@ -61,7 +61,7 @@ static Subdiv *subdiv_for_simple_to_catmull_clark(Object *object, MultiresModifi
Subdiv *subdiv = BKE_subdiv_new_from_converter(&subdiv_settings, &converter);
BKE_subdiv_converter_free(&converter);
- if (!BKE_subdiv_eval_begin_from_mesh(subdiv, base_mesh, NULL)) {
+ if (!BKE_subdiv_eval_begin_from_mesh(subdiv, base_mesh, NULL, SUBDIV_EVALUATOR_TYPE_CPU, NULL)) {
BKE_subdiv_free(subdiv);
return NULL;
}
diff --git a/source/blender/blenkernel/intern/node.cc b/source/blender/blenkernel/intern/node.cc
index a4de6730f8f..40d0c24c9af 100644
--- a/source/blender/blenkernel/intern/node.cc
+++ b/source/blender/blenkernel/intern/node.cc
@@ -65,6 +65,7 @@
#include "BKE_animsys.h"
#include "BKE_bpath.h"
#include "BKE_colortools.h"
+#include "BKE_context.h"
#include "BKE_cryptomatte.h"
#include "BKE_global.h"
#include "BKE_icons.h"
@@ -74,6 +75,7 @@
#include "BKE_lib_query.h"
#include "BKE_main.h"
#include "BKE_node.h"
+#include "BKE_node_tree_update.h"
#include "RNA_access.h"
#include "RNA_define.h"
@@ -98,10 +100,12 @@
#define NODE_DEFAULT_MAX_WIDTH 700
using blender::Array;
+using blender::Map;
using blender::MutableSpan;
using blender::Set;
using blender::Span;
using blender::Stack;
+using blender::StringRef;
using blender::Vector;
using blender::VectorSet;
using blender::nodes::FieldInferencingInterface;
@@ -150,62 +154,42 @@ static void ntree_copy_data(Main *UNUSED(bmain), ID *id_dst, const ID *id_src, c
BLI_listbase_clear(&ntree_dst->nodes);
BLI_listbase_clear(&ntree_dst->links);
- /* Since source nodes and sockets are unique pointers we can put everything in a single map. */
- GHash *new_pointers = BLI_ghash_ptr_new(__func__);
+ Map<const bNode *, bNode *> node_map;
+ Map<const bNodeSocket *, bNodeSocket *> socket_map;
- LISTBASE_FOREACH (const bNode *, node_src, &ntree_src->nodes) {
- bNode *new_node = BKE_node_copy_ex(ntree_dst, node_src, flag_subdata, true);
- BLI_ghash_insert(new_pointers, (void *)node_src, new_node);
- /* Store mapping to inputs. */
- bNodeSocket *new_input_sock = (bNodeSocket *)new_node->inputs.first;
- const bNodeSocket *input_sock_src = (const bNodeSocket *)node_src->inputs.first;
- while (new_input_sock != nullptr) {
- BLI_ghash_insert(new_pointers, (void *)input_sock_src, new_input_sock);
- new_input_sock = new_input_sock->next;
- input_sock_src = input_sock_src->next;
- }
- /* Store mapping to outputs. */
- bNodeSocket *new_output_sock = (bNodeSocket *)new_node->outputs.first;
- const bNodeSocket *output_sock_src = (const bNodeSocket *)node_src->outputs.first;
- while (new_output_sock != nullptr) {
- BLI_ghash_insert(new_pointers, (void *)output_sock_src, new_output_sock);
- new_output_sock = new_output_sock->next;
- output_sock_src = output_sock_src->next;
- }
+ BLI_listbase_clear(&ntree_dst->nodes);
+ LISTBASE_FOREACH (const bNode *, src_node, &ntree_src->nodes) {
+ /* Don't find a unique name for every node, since they should have valid names already. */
+ bNode *new_node = blender::bke::node_copy_with_mapping(
+ ntree_dst, *src_node, flag_subdata, false, socket_map);
+ node_map.add(src_node, new_node);
}
/* copy links */
- BLI_duplicatelist(&ntree_dst->links, &ntree_src->links);
- LISTBASE_FOREACH (bNodeLink *, link_dst, &ntree_dst->links) {
- link_dst->fromnode = (bNode *)BLI_ghash_lookup_default(
- new_pointers, link_dst->fromnode, nullptr);
- link_dst->fromsock = (bNodeSocket *)BLI_ghash_lookup_default(
- new_pointers, link_dst->fromsock, nullptr);
- link_dst->tonode = (bNode *)BLI_ghash_lookup_default(new_pointers, link_dst->tonode, nullptr);
- link_dst->tosock = (bNodeSocket *)BLI_ghash_lookup_default(
- new_pointers, link_dst->tosock, nullptr);
- /* update the link socket's pointer */
- if (link_dst->tosock) {
- link_dst->tosock->link = link_dst;
- }
+ BLI_listbase_clear(&ntree_dst->links);
+ LISTBASE_FOREACH (const bNodeLink *, src_link, &ntree_src->links) {
+ bNodeLink *dst_link = (bNodeLink *)MEM_dupallocN(src_link);
+ dst_link->fromnode = node_map.lookup(src_link->fromnode);
+ dst_link->fromsock = socket_map.lookup(src_link->fromsock);
+ dst_link->tonode = node_map.lookup(src_link->tonode);
+ dst_link->tosock = socket_map.lookup(src_link->tosock);
+ BLI_assert(dst_link->tosock);
+ dst_link->tosock->link = dst_link;
+ BLI_addtail(&ntree_dst->links, dst_link);
}
/* copy interface sockets */
- BLI_duplicatelist(&ntree_dst->inputs, &ntree_src->inputs);
- bNodeSocket *sock_dst, *sock_src;
- for (sock_dst = (bNodeSocket *)ntree_dst->inputs.first,
- sock_src = (bNodeSocket *)ntree_src->inputs.first;
- sock_dst != nullptr;
- sock_dst = (bNodeSocket *)sock_dst->next, sock_src = (bNodeSocket *)sock_src->next) {
- node_socket_copy(sock_dst, sock_src, flag_subdata);
+ BLI_listbase_clear(&ntree_dst->inputs);
+ LISTBASE_FOREACH (const bNodeSocket *, src_socket, &ntree_src->inputs) {
+ bNodeSocket *dst_socket = (bNodeSocket *)MEM_dupallocN(src_socket);
+ node_socket_copy(dst_socket, src_socket, flag_subdata);
+ BLI_addtail(&ntree_dst->inputs, dst_socket);
}
-
- BLI_duplicatelist(&ntree_dst->outputs, &ntree_src->outputs);
- for (sock_dst = (bNodeSocket *)ntree_dst->outputs.first,
- sock_src = (bNodeSocket *)ntree_src->outputs.first;
- sock_dst != nullptr;
- sock_dst = (bNodeSocket *)sock_dst->next, sock_src = (bNodeSocket *)sock_src->next) {
- node_socket_copy(sock_dst, sock_src, flag_subdata);
+ BLI_listbase_clear(&ntree_dst->outputs);
+ LISTBASE_FOREACH (const bNodeSocket *, src_socket, &ntree_src->outputs) {
+ bNodeSocket *dst_socket = (bNodeSocket *)MEM_dupallocN(src_socket);
+ node_socket_copy(dst_socket, src_socket, flag_subdata);
+ BLI_addtail(&ntree_dst->outputs, dst_socket);
}
/* copy preview hash */
@@ -225,18 +209,11 @@ static void ntree_copy_data(Main *UNUSED(bmain), ID *id_dst, const ID *id_src, c
}
/* update node->parent pointers */
- for (bNode *node_dst = (bNode *)ntree_dst->nodes.first,
- *node_src = (bNode *)ntree_src->nodes.first;
- node_dst;
- node_dst = (bNode *)node_dst->next, node_src = (bNode *)node_src->next) {
- if (node_dst->parent) {
- node_dst->parent = (bNode *)BLI_ghash_lookup_default(
- new_pointers, node_dst->parent, nullptr);
+ LISTBASE_FOREACH (bNode *, new_node, &ntree_dst->nodes) {
+ if (new_node->parent) {
+ new_node->parent = node_map.lookup(new_node->parent);
}
}
-
- BLI_ghash_free(new_pointers, nullptr, nullptr);
-
/* node tree will generate its own interface type */
ntree_dst->interface_type = nullptr;
@@ -260,8 +237,7 @@ static void ntree_free_data(ID *id)
/* XXX hack! node trees should not store execution graphs at all.
* This should be removed when old tree types no longer require it.
* Currently the execution data for texture nodes remains in the tree
- * after execution, until the node tree is updated or freed.
- */
+ * after execution, until the node tree is updated or freed. */
if (ntree->execdata) {
switch (ntree->type) {
case NTREE_SHADER:
@@ -277,10 +253,10 @@ static void ntree_free_data(ID *id)
/* XXX not nice, but needed to free localized node groups properly */
free_localized_node_groups(ntree);
- /* unregister associated RNA types */
+ /* Unregister associated RNA types. */
ntreeInterfaceTypeFree(ntree);
- BLI_freelistN(&ntree->links); /* do first, then unlink_node goes fast */
+ BLI_freelistN(&ntree->links);
LISTBASE_FOREACH_MUTABLE (bNode *, node, &ntree->nodes) {
node_free_node(ntree, node);
@@ -521,7 +497,6 @@ static void write_node_socket_default_value(BlendWriter *writer, bNodeSocket *so
static void write_node_socket(BlendWriter *writer, bNodeSocket *sock)
{
- /* actual socket writing */
BLO_write_struct(writer, bNodeSocket, sock);
if (sock->prop) {
@@ -532,7 +507,6 @@ static void write_node_socket(BlendWriter *writer, bNodeSocket *sock)
}
static void write_node_socket_interface(BlendWriter *writer, bNodeSocket *sock)
{
- /* actual socket writing */
BLO_write_struct(writer, bNodeSocket, sock);
if (sock->prop) {
@@ -546,8 +520,6 @@ void ntreeBlendWrite(BlendWriter *writer, bNodeTree *ntree)
{
BKE_id_blend_write(writer, &ntree->id);
- /* for link_list() speed, we write per list */
-
if (ntree->adt) {
BKE_animdata_blend_write(writer, ntree->adt);
}
@@ -571,7 +543,6 @@ void ntreeBlendWrite(BlendWriter *writer, bNodeTree *ntree)
}
if (node->storage) {
- /* could be handlerized at some point, now only 1 exception still */
if (ELEM(ntree->type, NTREE_SHADER, NTREE_GEOMETRY) &&
ELEM(node->type, SH_NODE_CURVE_VEC, SH_NODE_CURVE_RGB, SH_NODE_CURVE_FLOAT)) {
BKE_curvemapping_blend_write(writer, (const CurveMapping *)node->storage);
@@ -645,13 +616,13 @@ void ntreeBlendWrite(BlendWriter *writer, bNodeTree *ntree)
}
if (node->type == CMP_NODE_OUTPUT_FILE) {
- /* inputs have own storage data */
+ /* Inputs have their own storage data. */
LISTBASE_FOREACH (bNodeSocket *, sock, &node->inputs) {
BLO_write_struct(writer, NodeImageMultiFileSocket, sock->storage);
}
}
if (ELEM(node->type, CMP_NODE_IMAGE, CMP_NODE_R_LAYERS)) {
- /* write extra socket info */
+ /* Write extra socket info. */
LISTBASE_FOREACH (bNodeSocket *, sock, &node->outputs) {
BLO_write_struct(writer, NodeImageLayer, sock->storage);
}
@@ -715,6 +686,7 @@ void ntreeBlendReadData(BlendDataReader *reader, bNodeTree *ntree)
ntree->execdata = nullptr;
ntree->field_inferencing_interface = nullptr;
+ BKE_ntree_update_tag_missing_runtime_data(ntree);
BLO_read_data_address(reader, &ntree->adt);
BKE_animdata_blend_read_data(reader, ntree->adt);
@@ -747,7 +719,6 @@ void ntreeBlendReadData(BlendDataReader *reader, bNodeTree *ntree)
}
if (node->storage) {
- /* could be handlerized at some point */
switch (node->type) {
case SH_NODE_CURVE_VEC:
case SH_NODE_CURVE_RGB:
@@ -858,11 +829,6 @@ void ntreeBlendReadData(BlendDataReader *reader, bNodeTree *ntree)
/* TODO: should be dealt by new generic cache handling of IDs... */
ntree->previews = nullptr;
- if (ntree->type == NTREE_GEOMETRY) {
- /* Update field referencing for the geometry nodes modifier. */
- ntree->update |= NTREE_UPDATE_FIELD_INFERENCING;
- }
-
BLO_read_data_address(reader, &ntree->preview);
BKE_previewimg_blend_read(reader, ntree->preview);
@@ -1095,21 +1061,18 @@ static void node_add_sockets_from_type(bNodeTree *ntree, bNode *node, bNodeType
return;
}
bNodeSocketTemplate *sockdef;
- /* bNodeSocket *sock; */ /* UNUSED */
if (ntype->inputs) {
sockdef = ntype->inputs;
while (sockdef->type != -1) {
- /* sock = */ node_add_socket_from_template(ntree, node, sockdef, SOCK_IN);
-
+ node_add_socket_from_template(ntree, node, sockdef, SOCK_IN);
sockdef++;
}
}
if (ntype->outputs) {
sockdef = ntype->outputs;
while (sockdef->type != -1) {
- /* sock = */ node_add_socket_from_template(ntree, node, sockdef, SOCK_OUT);
-
+ node_add_socket_from_template(ntree, node, sockdef, SOCK_OUT);
sockdef++;
}
}
@@ -1167,8 +1130,7 @@ static void node_init(const struct bContext *C, bNodeTree *ntree, bNode *node)
RNA_pointer_create((ID *)ntree, &RNA_Node, node, &ptr);
/* XXX Warning: context can be nullptr in case nodes are added in do_versions.
- * Delayed init is not supported for nodes with context-based `initfunc_api` at the moment.
- */
+ * Delayed init is not supported for nodes with context-based `initfunc_api` at the moment. */
BLI_assert(C != nullptr);
ntype->initfunc_api(C, &ptr);
}
@@ -1189,6 +1151,7 @@ static void ntree_set_typeinfo(bNodeTree *ntree, bNodeTreeType *typeinfo)
/* Deprecated integer type. */
ntree->type = ntree->typeinfo->type;
+ BKE_ntree_update_tag_all(ntree);
}
static void node_set_typeinfo(const struct bContext *C,
@@ -1239,6 +1202,7 @@ static void node_socket_set_typeinfo(bNodeTree *ntree,
ntree->init &= ~NTREE_TYPE_INIT;
}
+ BKE_ntree_update_tag_socket_type(ntree, sock);
}
/* Set specific typeinfo pointers in all node trees on register/unregister */
@@ -1382,18 +1346,6 @@ bNodeType *nodeTypeFind(const char *idname)
return nullptr;
}
-static void free_dynamic_typeinfo(bNodeType *ntype)
-{
- if (ntype->type == NODE_DYNAMIC) {
- if (ntype->inputs) {
- MEM_freeN(ntype->inputs);
- }
- if (ntype->outputs) {
- MEM_freeN(ntype->outputs);
- }
- }
-}
-
/* callback for hash value free function */
static void node_free_type(void *nodetype_v)
{
@@ -1403,11 +1355,6 @@ static void node_free_type(void *nodetype_v)
* or we'd want to update *all* active Mains, which we cannot do anyway currently. */
update_typeinfo(G_MAIN, nullptr, nullptr, nodetype, nullptr, true);
- /* XXX deprecated */
- if (nodetype->type == NODE_DYNAMIC) {
- free_dynamic_typeinfo(nodetype);
- }
-
delete nodetype->fixed_declaration;
nodetype->fixed_declaration = nullptr;
@@ -1522,6 +1469,33 @@ struct bNodeSocket *nodeFindSocket(const bNode *node,
return nullptr;
}
+namespace blender::bke {
+
+bNodeSocket *node_find_enabled_socket(bNode &node,
+ const eNodeSocketInOut in_out,
+ const StringRef name)
+{
+ ListBase *sockets = (in_out == SOCK_IN) ? &node.inputs : &node.outputs;
+ LISTBASE_FOREACH (bNodeSocket *, socket, sockets) {
+ if (!(socket->flag & SOCK_UNAVAIL) && socket->name == name) {
+ return socket;
+ }
+ }
+ return nullptr;
+}
+
+bNodeSocket *node_find_enabled_input_socket(bNode &node, StringRef name)
+{
+ return node_find_enabled_socket(node, SOCK_IN, name);
+}
+
+bNodeSocket *node_find_enabled_output_socket(bNode &node, StringRef name)
+{
+ return node_find_enabled_socket(node, SOCK_OUT, name);
+}
+
+} // namespace blender::bke
+
/* find unique socket identifier */
static bool unique_identifier_check(void *arg, const char *identifier)
{
@@ -1552,11 +1526,11 @@ static bNodeSocket *make_socket(bNodeTree *ntree,
/* if no explicit identifier is given, assign a unique identifier based on the name */
BLI_strncpy(auto_identifier, name, sizeof(auto_identifier));
}
- /* make the identifier unique */
+ /* Make the identifier unique. */
BLI_uniquename_cb(
unique_identifier_check, lb, "socket", '_', auto_identifier, sizeof(auto_identifier));
- bNodeSocket *sock = (bNodeSocket *)MEM_callocN(sizeof(bNodeSocket), "sock");
+ bNodeSocket *sock = MEM_cnew<bNodeSocket>("sock");
sock->in_out = in_out;
BLI_strncpy(sock->identifier, auto_identifier, NODE_MAXSTR);
@@ -1721,26 +1695,7 @@ bNodeSocket *nodeAddSocket(bNodeTree *ntree,
BLI_remlink(lb, sock); /* does nothing for new socket */
BLI_addtail(lb, sock);
- node->update |= NODE_UPDATE;
-
- return sock;
-}
-
-bNodeSocket *nodeInsertSocket(bNodeTree *ntree,
- bNode *node,
- eNodeSocketInOut in_out,
- const char *idname,
- bNodeSocket *next_sock,
- const char *identifier,
- const char *name)
-{
- ListBase *lb = (in_out == SOCK_IN ? &node->inputs : &node->outputs);
- bNodeSocket *sock = make_socket(ntree, node, in_out, lb, idname, identifier, name);
-
- BLI_remlink(lb, sock); /* does nothing for new socket */
- BLI_insertlinkbefore(lb, next_sock, sock);
-
- node->update |= NODE_UPDATE;
+ BKE_ntree_update_tag_socket_new(ntree, sock);
return sock;
}
@@ -1961,31 +1916,7 @@ bNodeSocket *nodeAddStaticSocket(bNodeTree *ntree,
return sock;
}
-bNodeSocket *nodeInsertStaticSocket(bNodeTree *ntree,
- bNode *node,
- eNodeSocketInOut in_out,
- int type,
- int subtype,
- bNodeSocket *next_sock,
- const char *identifier,
- const char *name)
-{
- const char *idname = nodeStaticSocketType(type, subtype);
-
- if (!idname) {
- CLOG_ERROR(&LOG, "static node socket type %d undefined", type);
- return nullptr;
- }
-
- bNodeSocket *sock = nodeInsertSocket(ntree, node, in_out, idname, next_sock, identifier, name);
- sock->type = type;
- return sock;
-}
-
-static void node_socket_free(bNodeTree *UNUSED(ntree),
- bNodeSocket *sock,
- bNode *UNUSED(node),
- const bool do_id_user)
+static void node_socket_free(bNodeSocket *sock, const bool do_id_user)
{
if (sock->prop) {
IDP_FreePropertyContent_ex(sock->prop, do_id_user);
@@ -2016,14 +1947,22 @@ void nodeRemoveSocketEx(struct bNodeTree *ntree,
}
}
+ LISTBASE_FOREACH_MUTABLE (bNodeLink *, link, &node->internal_links) {
+ if (link->fromsock == sock || link->tosock == sock) {
+ BLI_remlink(&node->internal_links, link);
+ MEM_freeN(link);
+ BKE_ntree_update_tag_node_internal_link(ntree, node);
+ }
+ }
+
/* this is fast, this way we don't need an in_out argument */
BLI_remlink(&node->inputs, sock);
BLI_remlink(&node->outputs, sock);
- node_socket_free(ntree, sock, node, do_id_user);
+ node_socket_free(sock, do_id_user);
MEM_freeN(sock);
- node->update |= NODE_UPDATE;
+ BKE_ntree_update_tag_socket_removed(ntree);
}
void nodeRemoveAllSockets(bNodeTree *ntree, bNode *node)
@@ -2035,18 +1974,18 @@ void nodeRemoveAllSockets(bNodeTree *ntree, bNode *node)
}
LISTBASE_FOREACH_MUTABLE (bNodeSocket *, sock, &node->inputs) {
- node_socket_free(ntree, sock, node, true);
+ node_socket_free(sock, true);
MEM_freeN(sock);
}
BLI_listbase_clear(&node->inputs);
LISTBASE_FOREACH_MUTABLE (bNodeSocket *, sock, &node->outputs) {
- node_socket_free(ntree, sock, node, true);
+ node_socket_free(sock, true);
MEM_freeN(sock);
}
BLI_listbase_clear(&node->outputs);
- node->update |= NODE_UPDATE;
+ BKE_ntree_update_tag_socket_removed(ntree);
}
bNode *nodeFindNodebyName(bNodeTree *ntree, const char *name)
@@ -2192,13 +2131,17 @@ void nodeUniqueName(bNodeTree *ntree, bNode *node)
bNode *nodeAddNode(const struct bContext *C, bNodeTree *ntree, const char *idname)
{
- bNode *node = (bNode *)MEM_callocN(sizeof(bNode), "new node");
+ bNode *node = MEM_cnew<bNode>("new node");
BLI_addtail(&ntree->nodes, node);
BLI_strncpy(node->idname, idname, sizeof(node->idname));
node_set_typeinfo(C, ntree, node, nodeTypeFind(idname));
- ntree->update |= NTREE_UPDATE_NODES;
+ BKE_ntree_update_tag_node_new(ntree, node);
+
+ if (node->type == GEO_NODE_INPUT_SCENE_TIME) {
+ DEG_relations_tag_update(CTX_data_main(C));
+ }
return node;
}
@@ -2208,9 +2151,8 @@ bNode *nodeAddStaticNode(const struct bContext *C, bNodeTree *ntree, int type)
const char *idname = nullptr;
NODE_TYPES_BEGIN (ntype) {
- /* do an extra poll here, because some int types are used
- * for multiple node types, this helps find the desired type
- */
+ /* Do an extra poll here, because some int types are used
+ * for multiple node types, this helps find the desired type. */
const char *disabled_hint;
if (ntype->type == type && (!ntype->poll || ntype->poll(ntype, ntree, &disabled_hint))) {
idname = ntype->idname;
@@ -2240,143 +2182,98 @@ static void node_socket_copy(bNodeSocket *sock_dst, const bNodeSocket *sock_src,
}
sock_dst->stack_index = 0;
- /* XXX some compositor node (e.g. image, render layers) still store
- * some persistent buffer data here, need to clear this to avoid dangling pointers.
- */
+ /* XXX some compositor nodes (e.g. image, render layers) still store
+ * some persistent buffer data here, need to clear this to avoid dangling pointers. */
sock_dst->cache = nullptr;
}
-bNode *BKE_node_copy_ex(bNodeTree *ntree,
- const bNode *node_src,
- const int flag,
- const bool unique_name)
-{
- bNode *node_dst = (bNode *)MEM_callocN(sizeof(bNode), "dupli node");
- bNodeSocket *sock_dst, *sock_src;
- bNodeLink *link_dst, *link_src;
+namespace blender::bke {
- *node_dst = *node_src;
+bNode *node_copy_with_mapping(bNodeTree *dst_tree,
+ const bNode &node_src,
+ const int flag,
+ const bool unique_name,
+ Map<const bNodeSocket *, bNodeSocket *> &socket_map)
+{
+ bNode *node_dst = (bNode *)MEM_mallocN(sizeof(bNode), __func__);
+ *node_dst = node_src;
- /* can be called for nodes outside a node tree (e.g. clipboard) */
- if (ntree) {
+ /* Can be called for nodes outside a node tree (e.g. clipboard). */
+ if (dst_tree) {
if (unique_name) {
- nodeUniqueName(ntree, node_dst);
+ nodeUniqueName(dst_tree, node_dst);
}
-
- BLI_addtail(&ntree->nodes, node_dst);
+ BLI_addtail(&dst_tree->nodes, node_dst);
}
- BLI_duplicatelist(&node_dst->inputs, &node_src->inputs);
- for (sock_dst = (bNodeSocket *)node_dst->inputs.first,
- sock_src = (bNodeSocket *)node_src->inputs.first;
- sock_dst != nullptr;
- sock_dst = (bNodeSocket *)sock_dst->next, sock_src = (bNodeSocket *)sock_src->next) {
- node_socket_copy(sock_dst, sock_src, flag);
+ BLI_listbase_clear(&node_dst->inputs);
+ LISTBASE_FOREACH (const bNodeSocket *, src_socket, &node_src.inputs) {
+ bNodeSocket *dst_socket = (bNodeSocket *)MEM_dupallocN(src_socket);
+ node_socket_copy(dst_socket, src_socket, flag);
+ BLI_addtail(&node_dst->inputs, dst_socket);
+ socket_map.add_new(src_socket, dst_socket);
}
- BLI_duplicatelist(&node_dst->outputs, &node_src->outputs);
- for (sock_dst = (bNodeSocket *)node_dst->outputs.first,
- sock_src = (bNodeSocket *)node_src->outputs.first;
- sock_dst != nullptr;
- sock_dst = (bNodeSocket *)sock_dst->next, sock_src = (bNodeSocket *)sock_src->next) {
- node_socket_copy(sock_dst, sock_src, flag);
+ BLI_listbase_clear(&node_dst->outputs);
+ LISTBASE_FOREACH (const bNodeSocket *, src_socket, &node_src.outputs) {
+ bNodeSocket *dst_socket = (bNodeSocket *)MEM_dupallocN(src_socket);
+ node_socket_copy(dst_socket, src_socket, flag);
+ BLI_addtail(&node_dst->outputs, dst_socket);
+ socket_map.add_new(src_socket, dst_socket);
}
- if (node_src->prop) {
- node_dst->prop = IDP_CopyProperty_ex(node_src->prop, flag);
+ if (node_src.prop) {
+ node_dst->prop = IDP_CopyProperty_ex(node_src.prop, flag);
}
- BLI_duplicatelist(&node_dst->internal_links, &node_src->internal_links);
- for (link_dst = (bNodeLink *)node_dst->internal_links.first,
- link_src = (bNodeLink *)node_src->internal_links.first;
- link_dst != nullptr;
- link_dst = (bNodeLink *)link_dst->next, link_src = (bNodeLink *)link_src->next) {
- /* This is a bit annoying to do index lookups in a list, but is likely to be faster than
- * trying to create a hash-map. At least for usual nodes, which only have so much sockets
- * and internal links. */
- const int from_sock_index = BLI_findindex(&node_src->inputs, link_src->fromsock);
- const int to_sock_index = BLI_findindex(&node_src->outputs, link_src->tosock);
- BLI_assert(from_sock_index != -1);
- BLI_assert(to_sock_index != -1);
- link_dst->fromnode = node_dst;
- link_dst->tonode = node_dst;
- link_dst->fromsock = (bNodeSocket *)BLI_findlink(&node_dst->inputs, from_sock_index);
- link_dst->tosock = (bNodeSocket *)BLI_findlink(&node_dst->outputs, to_sock_index);
+ BLI_listbase_clear(&node_dst->internal_links);
+ LISTBASE_FOREACH (const bNodeLink *, src_link, &node_src.internal_links) {
+ bNodeLink *dst_link = (bNodeLink *)MEM_dupallocN(src_link);
+ dst_link->fromnode = node_dst;
+ dst_link->tonode = node_dst;
+ dst_link->fromsock = socket_map.lookup(src_link->fromsock);
+ dst_link->tosock = socket_map.lookup(src_link->tosock);
+ BLI_addtail(&node_dst->internal_links, dst_link);
}
if ((flag & LIB_ID_CREATE_NO_USER_REFCOUNT) == 0) {
id_us_plus(node_dst->id);
}
- if (node_src->typeinfo->copyfunc) {
- node_src->typeinfo->copyfunc(ntree, node_dst, node_src);
+ if (node_src.typeinfo->copyfunc) {
+ node_src.typeinfo->copyfunc(dst_tree, node_dst, &node_src);
}
- node_dst->new_node = nullptr;
-
/* Only call copy function when a copy is made for the main database, not
* for cases like the dependency graph and localization. */
if (node_dst->typeinfo->copyfunc_api && !(flag & LIB_ID_CREATE_NO_MAIN)) {
PointerRNA ptr;
- RNA_pointer_create((ID *)ntree, &RNA_Node, node_dst, &ptr);
+ RNA_pointer_create((ID *)dst_tree, &RNA_Node, node_dst, &ptr);
- node_dst->typeinfo->copyfunc_api(&ptr, node_src);
+ node_dst->typeinfo->copyfunc_api(&ptr, &node_src);
}
- if (ntree) {
- ntree->update |= NTREE_UPDATE_NODES;
+ if (dst_tree) {
+ BKE_ntree_update_tag_node_new(dst_tree, node_dst);
}
/* Reset the declaration of the new node. */
node_dst->declaration = nullptr;
- nodeDeclarationEnsure(ntree, node_dst);
+ nodeDeclarationEnsure(dst_tree, node_dst);
return node_dst;
}
-static void node_set_new_pointers(bNode *node_src, bNode *new_node)
+bNode *node_copy(bNodeTree *dst_tree,
+ const bNode &src_node,
+ const int flag,
+ const bool unique_name)
{
- /* Store mapping to the node itself. */
- node_src->new_node = new_node;
- /* Store mapping to inputs. */
- bNodeSocket *new_input_sock = (bNodeSocket *)new_node->inputs.first;
- bNodeSocket *input_sock_src = (bNodeSocket *)node_src->inputs.first;
- while (new_input_sock != nullptr) {
- input_sock_src->new_sock = new_input_sock;
- new_input_sock = new_input_sock->next;
- input_sock_src = input_sock_src->next;
- }
- /* Store mapping to outputs. */
- bNodeSocket *new_output_sock = (bNodeSocket *)new_node->outputs.first;
- bNodeSocket *output_sock_src = (bNodeSocket *)node_src->outputs.first;
- while (new_output_sock != nullptr) {
- output_sock_src->new_sock = new_output_sock;
- new_output_sock = new_output_sock->next;
- output_sock_src = output_sock_src->next;
- }
+ Map<const bNodeSocket *, bNodeSocket *> socket_map;
+ return node_copy_with_mapping(dst_tree, src_node, flag, unique_name, socket_map);
}
-bNode *BKE_node_copy_store_new_pointers(bNodeTree *ntree, bNode *node_src, const int flag)
-{
- bNode *new_node = BKE_node_copy_ex(ntree, node_src, flag, true);
- node_set_new_pointers(node_src, new_node);
- return new_node;
-}
-
-bNodeTree *ntreeCopyTree_ex_new_pointers(const bNodeTree *ntree,
- Main *bmain,
- const bool do_id_user)
-{
- bNodeTree *new_ntree = ntreeCopyTree_ex(ntree, bmain, do_id_user);
- bNode *new_node = (bNode *)new_ntree->nodes.first;
- bNode *node_src = (bNode *)ntree->nodes.first;
- while (new_node != nullptr) {
- node_set_new_pointers(node_src, new_node);
- new_node = new_node->next;
- node_src = node_src->next;
- }
- return new_ntree;
-}
+} // namespace blender::bke
static int node_count_links(const bNodeTree *ntree, const bNodeSocket *socket)
{
@@ -2394,12 +2291,12 @@ bNodeLink *nodeAddLink(
{
bNodeLink *link = nullptr;
- /* test valid input */
+ /* Test valid input. */
BLI_assert(fromnode);
BLI_assert(tonode);
if (fromsock->in_out == SOCK_OUT && tosock->in_out == SOCK_IN) {
- link = (bNodeLink *)MEM_callocN(sizeof(bNodeLink), "link");
+ link = MEM_cnew<bNodeLink>("link");
if (ntree) {
BLI_addtail(&ntree->links, link);
}
@@ -2410,7 +2307,7 @@ bNodeLink *nodeAddLink(
}
else if (fromsock->in_out == SOCK_IN && tosock->in_out == SOCK_OUT) {
/* OK but flip */
- link = (bNodeLink *)MEM_callocN(sizeof(bNodeLink), "link");
+ link = MEM_cnew<bNodeLink>("link");
if (ntree) {
BLI_addtail(&ntree->links, link);
}
@@ -2421,7 +2318,7 @@ bNodeLink *nodeAddLink(
}
if (ntree) {
- ntree->update |= NTREE_UPDATE_LINKS;
+ BKE_ntree_update_tag_link_added(ntree, link);
}
if (link != nullptr && link->tosock->flag & SOCK_MULTI_INPUT) {
@@ -2433,7 +2330,7 @@ bNodeLink *nodeAddLink(
void nodeRemLink(bNodeTree *ntree, bNodeLink *link)
{
- /* can be called for links outside a node tree (e.g. clipboard) */
+ /* Can be called for links outside a node tree (e.g. clipboard). */
if (ntree) {
BLI_remlink(&ntree->links, link);
}
@@ -2444,7 +2341,7 @@ void nodeRemLink(bNodeTree *ntree, bNodeLink *link)
MEM_freeN(link);
if (ntree) {
- ntree->update |= NTREE_UPDATE_LINKS;
+ BKE_ntree_update_tag_link_removed(ntree);
}
}
@@ -2544,7 +2441,7 @@ void nodeMuteLinkToggle(bNodeTree *ntree, bNodeLink *link)
}
if (ntree) {
- ntree->update |= NTREE_UPDATE_LINKS;
+ BKE_ntree_update_tag_link_mute(ntree, link);
}
}
@@ -2555,8 +2452,6 @@ void nodeRemSocketLinks(bNodeTree *ntree, bNodeSocket *sock)
nodeRemLink(ntree, link);
}
}
-
- ntree->update |= NTREE_UPDATE_LINKS;
}
bool nodeLinkIsHidden(const bNodeLink *link)
@@ -2621,7 +2516,7 @@ void nodeInternalRelink(bNodeTree *ntree, bNode *node)
link->flag |= NODE_LINK_MUTED;
}
- ntree->update |= NTREE_UPDATE_LINKS;
+ BKE_ntree_update_tag_link_changed(ntree);
}
else {
if (link->tosock->flag & SOCK_MULTI_INPUT) {
@@ -2815,13 +2710,16 @@ bool BKE_node_preview_used(const bNode *node)
return (node->typeinfo->flag & NODE_PREVIEW) != 0;
}
-bNodePreview *BKE_node_preview_verify(
- bNodeInstanceHash *previews, bNodeInstanceKey key, int xsize, int ysize, bool create)
+bNodePreview *BKE_node_preview_verify(bNodeInstanceHash *previews,
+ bNodeInstanceKey key,
+ const int xsize,
+ const int ysize,
+ const bool create)
{
bNodePreview *preview = (bNodePreview *)BKE_node_instance_hash_lookup(previews, key);
if (!preview) {
if (create) {
- preview = (bNodePreview *)MEM_callocN(sizeof(bNodePreview), "node preview");
+ preview = MEM_cnew<bNodePreview>("node preview");
BKE_node_instance_hash_insert(previews, key, preview);
}
else {
@@ -2873,9 +2771,8 @@ void BKE_node_preview_free(bNodePreview *preview)
static void node_preview_init_tree_recursive(bNodeInstanceHash *previews,
bNodeTree *ntree,
bNodeInstanceKey parent_key,
- int xsize,
- int ysize,
- bool create_previews)
+ const int xsize,
+ const int ysize)
{
LISTBASE_FOREACH (bNode *, node, &ntree->nodes) {
bNodeInstanceKey key = BKE_node_instance_key(parent_key, ntree, node);
@@ -2884,17 +2781,16 @@ static void node_preview_init_tree_recursive(bNodeInstanceHash *previews,
node->preview_xsize = xsize;
node->preview_ysize = ysize;
- BKE_node_preview_verify(previews, key, xsize, ysize, create_previews);
+ BKE_node_preview_verify(previews, key, xsize, ysize, false);
}
if (node->type == NODE_GROUP && node->id) {
- node_preview_init_tree_recursive(
- previews, (bNodeTree *)node->id, key, xsize, ysize, create_previews);
+ node_preview_init_tree_recursive(previews, (bNodeTree *)node->id, key, xsize, ysize);
}
}
}
-void BKE_node_preview_init_tree(bNodeTree *ntree, int xsize, int ysize, bool create_previews)
+void BKE_node_preview_init_tree(bNodeTree *ntree, int xsize, int ysize)
{
if (!ntree) {
return;
@@ -2904,8 +2800,7 @@ void BKE_node_preview_init_tree(bNodeTree *ntree, int xsize, int ysize, bool cre
ntree->previews = BKE_node_instance_hash_new("node previews");
}
- node_preview_init_tree_recursive(
- ntree->previews, ntree, NODE_INSTANCE_KEY_BASE, xsize, ysize, create_previews);
+ node_preview_init_tree_recursive(ntree->previews, ntree, NODE_INSTANCE_KEY_BASE, xsize, ysize);
}
static void node_preview_tag_used_recursive(bNodeInstanceHash *previews,
@@ -2939,18 +2834,6 @@ void BKE_node_preview_remove_unused(bNodeTree *ntree)
(bNodeInstanceValueFP)BKE_node_preview_free);
}
-void BKE_node_preview_free_tree(bNodeTree *ntree)
-{
- if (!ntree) {
- return;
- }
-
- if (ntree->previews) {
- BKE_node_instance_hash_free(ntree->previews, (bNodeInstanceValueFP)BKE_node_preview_free);
- ntree->previews = nullptr;
- }
-}
-
void BKE_node_preview_clear(bNodePreview *preview)
{
if (preview && preview->rect) {
@@ -2971,40 +2854,6 @@ void BKE_node_preview_clear_tree(bNodeTree *ntree)
}
}
-static void node_preview_sync(bNodePreview *to, bNodePreview *from)
-{
- /* sizes should have been initialized by BKE_node_preview_init_tree */
- BLI_assert(to->xsize == from->xsize && to->ysize == from->ysize);
-
- /* copy over contents of previews */
- if (to->rect && from->rect) {
- int xsize = to->xsize;
- int ysize = to->ysize;
- memcpy(to->rect, from->rect, xsize * ysize * sizeof(char[4]));
- }
-}
-
-void BKE_node_preview_sync_tree(bNodeTree *to_ntree, bNodeTree *from_ntree)
-{
- bNodeInstanceHash *from_previews = from_ntree->previews;
- bNodeInstanceHash *to_previews = to_ntree->previews;
-
- if (!from_previews || !to_previews) {
- return;
- }
-
- bNodeInstanceHashIterator iter;
- NODE_INSTANCE_HASH_ITER (iter, from_previews) {
- bNodeInstanceKey key = BKE_node_instance_hash_iterator_get_key(&iter);
- bNodePreview *from = (bNodePreview *)BKE_node_instance_hash_iterator_get_value(&iter);
- bNodePreview *to = (bNodePreview *)BKE_node_instance_hash_lookup(to_previews, key);
-
- if (from && to) {
- node_preview_sync(to, from);
- }
- }
-}
-
void BKE_node_preview_merge_tree(bNodeTree *to_ntree, bNodeTree *from_ntree, bool remove_old)
{
if (remove_old || !to_ntree->previews) {
@@ -3041,27 +2890,6 @@ void BKE_node_preview_merge_tree(bNodeTree *to_ntree, bNodeTree *from_ntree, boo
}
}
-void BKE_node_preview_set_pixel(
- bNodePreview *preview, const float col[4], int x, int y, bool do_manage)
-{
- if (preview) {
- if (x >= 0 && y >= 0) {
- if (x < preview->xsize && y < preview->ysize) {
- unsigned char *tar = preview->rect + 4 * ((preview->xsize * y) + x);
-
- if (do_manage) {
- linearrgb_to_srgb_uchar4(tar, col);
- }
- else {
- rgba_float_to_uchar(tar, col);
- }
- }
- // else printf("prv out bound x y %d %d\n", x, y);
- }
- // else printf("prv out bound x y %d %d\n", x, y);
- }
-}
-
/* ************** Free stuff ********** */
void nodeUnlinkNode(bNodeTree *ntree, bNode *node)
@@ -3070,9 +2898,6 @@ void nodeUnlinkNode(bNodeTree *ntree, bNode *node)
ListBase *lb;
if (link->fromnode == node) {
lb = &node->outputs;
- if (link->tonode) {
- link->tonode->update |= NODE_UPDATE;
- }
}
else if (link->tonode == node) {
lb = &node->inputs;
@@ -3114,10 +2939,6 @@ static void node_free_node(bNodeTree *ntree, bNode *node)
/* can be called for nodes outside a node tree (e.g. clipboard) */
if (ntree) {
- /* remove all references to this node */
- nodeUnlinkNode(ntree, node);
- node_unlink_attached(ntree, node);
-
BLI_remlink(&ntree->nodes, node);
if (ntree->typeinfo->free_node_cache) {
@@ -3137,12 +2958,12 @@ static void node_free_node(bNodeTree *ntree, bNode *node)
LISTBASE_FOREACH_MUTABLE (bNodeSocket *, sock, &node->inputs) {
/* Remember, no ID user refcount management here! */
- node_socket_free(ntree, sock, node, false);
+ node_socket_free(sock, false);
MEM_freeN(sock);
}
LISTBASE_FOREACH_MUTABLE (bNodeSocket *, sock, &node->outputs) {
/* Remember, no ID user refcount management here! */
- node_socket_free(ntree, sock, node, false);
+ node_socket_free(sock, false);
MEM_freeN(sock);
}
@@ -3161,7 +2982,7 @@ static void node_free_node(bNodeTree *ntree, bNode *node)
MEM_freeN(node);
if (ntree) {
- ntree->update |= NTREE_UPDATE_NODES;
+ BKE_ntree_update_tag_node_removed(ntree);
}
}
@@ -3169,6 +2990,12 @@ void ntreeFreeLocalNode(bNodeTree *ntree, bNode *node)
{
/* For removing nodes while editing localized node trees. */
BLI_assert((ntree->id.tag & LIB_TAG_LOCALIZED) != 0);
+
+ /* These two lines assume the caller might want to free a single node and maintain
+ * a valid state in the node tree. */
+ nodeUnlinkNode(ntree, node);
+ node_unlink_attached(ntree, node);
+
node_free_node(ntree, node);
}
@@ -3213,6 +3040,9 @@ void nodeRemoveNode(Main *bmain, bNodeTree *ntree, bNode *node, bool do_id_user)
}
}
+ nodeUnlinkNode(ntree, node);
+ node_unlink_attached(ntree, node);
+
/* Free node itself. */
node_free_node(ntree, node);
}
@@ -3238,8 +3068,7 @@ static void free_localized_node_groups(bNodeTree *ntree)
/* Only localized node trees store a copy for each node group tree.
* Each node group tree in a localized node tree can be freed,
* since it is a localized copy itself (no risk of accessing free'd
- * data in main, see T37939).
- */
+ * data in main, see T37939). */
if (!(ntree->id.tag & LIB_TAG_LOCALIZED)) {
return;
}
@@ -3384,26 +3213,6 @@ bNodeTree *ntreeFromID(ID *id)
return (nodetree != nullptr) ? *nodetree : nullptr;
}
-bool ntreeNodeExists(const bNodeTree *ntree, const bNode *testnode)
-{
- LISTBASE_FOREACH (const bNode *, node, &ntree->nodes) {
- if (node == testnode) {
- return true;
- }
- }
- return false;
-}
-
-bool ntreeOutputExists(const bNode *node, const bNodeSocket *testsock)
-{
- LISTBASE_FOREACH (const bNodeSocket *, sock, &node->outputs) {
- if (sock == testsock) {
- return true;
- }
- }
- return false;
-}
-
void ntreeNodeFlagSet(const bNodeTree *ntree, const int flag, const bool enable)
{
LISTBASE_FOREACH (bNode *, node, &ntree->nodes) {
@@ -3435,7 +3244,7 @@ bNodeTree *ntreeLocalize(bNodeTree *ntree)
}
}
- /* ensures only a single output node is enabled */
+ /* Ensures only a single output node is enabled. */
ntreeSetOutput(ntree);
bNode *node_src = (bNode *)ntree->nodes.first;
@@ -3453,15 +3262,6 @@ bNodeTree *ntreeLocalize(bNodeTree *ntree)
return ltree;
}
-void ntreeLocalSync(bNodeTree *localtree, bNodeTree *ntree)
-{
- if (localtree && ntree) {
- if (ntree->typeinfo->local_sync) {
- ntree->typeinfo->local_sync(localtree, ntree);
- }
- }
-}
-
void ntreeLocalMerge(Main *bmain, bNodeTree *localtree, bNodeTree *ntree)
{
if (ntree && localtree) {
@@ -3486,7 +3286,7 @@ static bNodeSocket *make_socket_interface(bNodeTree *ntree,
return nullptr;
}
- bNodeSocket *sock = (bNodeSocket *)MEM_callocN(sizeof(bNodeSocket), "socket template");
+ bNodeSocket *sock = MEM_cnew<bNodeSocket>("socket template");
BLI_strncpy(sock->idname, stype->idname, sizeof(sock->idname));
node_socket_set_typeinfo(ntree, sock, stype);
sock->in_out = in_out;
@@ -3532,12 +3332,11 @@ bNodeSocket *ntreeAddSocketInterface(bNodeTree *ntree,
bNodeSocket *iosock = make_socket_interface(ntree, in_out, idname, name);
if (in_out == SOCK_IN) {
BLI_addtail(&ntree->inputs, iosock);
- ntree->update |= NTREE_UPDATE_GROUP_IN;
}
else if (in_out == SOCK_OUT) {
BLI_addtail(&ntree->outputs, iosock);
- ntree->update |= NTREE_UPDATE_GROUP_OUT;
}
+ BKE_ntree_update_tag_interface(ntree);
return iosock;
}
@@ -3550,12 +3349,11 @@ bNodeSocket *ntreeInsertSocketInterface(bNodeTree *ntree,
bNodeSocket *iosock = make_socket_interface(ntree, in_out, idname, name);
if (in_out == SOCK_IN) {
BLI_insertlinkbefore(&ntree->inputs, next_sock, iosock);
- ntree->update |= NTREE_UPDATE_GROUP_IN;
}
else if (in_out == SOCK_OUT) {
BLI_insertlinkbefore(&ntree->outputs, next_sock, iosock);
- ntree->update |= NTREE_UPDATE_GROUP_OUT;
}
+ BKE_ntree_update_tag_interface(ntree);
return iosock;
}
@@ -3601,7 +3399,7 @@ void ntreeRemoveSocketInterface(bNodeTree *ntree, bNodeSocket *sock)
node_socket_interface_free(ntree, sock, true);
MEM_freeN(sock);
- ntree->update |= NTREE_UPDATE_GROUP;
+ BKE_ntree_update_tag_interface(ntree);
}
/* generates a valid RNA identifier from the node tree name */
@@ -3739,11 +3537,6 @@ bNode *ntreeFindType(const bNodeTree *ntree, int type)
return nullptr;
}
-bool ntreeHasType(const bNodeTree *ntree, int type)
-{
- return ntreeFindType(ntree, type) != nullptr;
-}
-
bool ntreeHasTree(const bNodeTree *ntree, const bNodeTree *lookup)
{
if (ntree == lookup) {
@@ -3797,93 +3590,6 @@ bNode *nodeGetActive(bNodeTree *ntree)
return nullptr;
}
-static bNode *node_get_active_id_recursive(bNodeInstanceKey active_key,
- bNodeInstanceKey parent_key,
- bNodeTree *ntree,
- short idtype)
-{
- if (parent_key.value == active_key.value || active_key.value == 0) {
- LISTBASE_FOREACH (bNode *, node, &ntree->nodes) {
- if (node->id && GS(node->id->name) == idtype) {
- if (node->flag & NODE_ACTIVE_ID) {
- return node;
- }
- }
- }
- }
- else {
- /* no node with active ID in this tree, look inside groups */
- LISTBASE_FOREACH (bNode *, node, &ntree->nodes) {
- if (node->type == NODE_GROUP) {
- bNodeTree *group = (bNodeTree *)node->id;
- if (group) {
- bNodeInstanceKey group_key = BKE_node_instance_key(parent_key, ntree, node);
- bNode *tnode = node_get_active_id_recursive(active_key, group_key, group, idtype);
- if (tnode) {
- return tnode;
- }
- }
- }
- }
- }
- return nullptr;
-}
-
-bNode *nodeGetActiveID(bNodeTree *ntree, short idtype)
-{
- if (ntree) {
- return node_get_active_id_recursive(
- ntree->active_viewer_key, NODE_INSTANCE_KEY_BASE, ntree, idtype);
- }
- return nullptr;
-}
-
-bool nodeSetActiveID(bNodeTree *ntree, short idtype, ID *id)
-{
- bool ok = false;
-
- if (ntree == nullptr) {
- return ok;
- }
-
- LISTBASE_FOREACH (bNode *, node, &ntree->nodes) {
- if (node->id && GS(node->id->name) == idtype) {
- if (id && ok == false && node->id == id) {
- node->flag |= NODE_ACTIVE_ID;
- ok = true;
- }
- else {
- node->flag &= ~NODE_ACTIVE_ID;
- }
- }
- }
-
- /* update all groups linked from here
- * if active ID node has been found already,
- * just pass null so other matching nodes are deactivated.
- */
- LISTBASE_FOREACH (bNode *, node, &ntree->nodes) {
- if (node->type == NODE_GROUP) {
- ok |= nodeSetActiveID((bNodeTree *)node->id, idtype, (ok == false ? id : nullptr));
- }
- }
-
- return ok;
-}
-
-void nodeClearActiveID(bNodeTree *ntree, short idtype)
-{
- if (ntree == nullptr) {
- return;
- }
-
- LISTBASE_FOREACH (bNode *, node, &ntree->nodes) {
- if (node->id && GS(node->id->name) == idtype) {
- node->flag &= ~NODE_ACTIVE_ID;
- }
- }
-}
-
void nodeSetSelected(bNode *node, bool select)
{
if (select) {
@@ -3909,7 +3615,7 @@ void nodeClearActive(bNodeTree *ntree)
}
LISTBASE_FOREACH (bNode *, node, &ntree->nodes) {
- node->flag &= ~(NODE_ACTIVE | NODE_ACTIVE_ID);
+ node->flag &= ~NODE_ACTIVE;
}
}
@@ -3919,11 +3625,6 @@ void nodeSetActive(bNodeTree *ntree, bNode *node)
LISTBASE_FOREACH (bNode *, tnode, &ntree->nodes) {
tnode->flag &= ~NODE_ACTIVE;
- if (node->id && tnode->id) {
- if (GS(node->id->name) == GS(tnode->id->name)) {
- tnode->flag &= ~NODE_ACTIVE_ID;
- }
- }
if ((node->typeinfo->nclass == NODE_CLASS_TEXTURE) ||
(node->typeinfo->type == GEO_NODE_LEGACY_ATTRIBUTE_SAMPLE_TEXTURE)) {
tnode->flag &= ~NODE_ACTIVE_TEXTURE;
@@ -3931,9 +3632,6 @@ void nodeSetActive(bNodeTree *ntree, bNode *node)
}
node->flag |= NODE_ACTIVE;
- if (node->id) {
- node->flag |= NODE_ACTIVE_ID;
- }
if ((node->typeinfo->nclass == NODE_CLASS_TEXTURE) ||
(node->typeinfo->type == GEO_NODE_LEGACY_ATTRIBUTE_SAMPLE_TEXTURE)) {
node->flag |= NODE_ACTIVE_TEXTURE;
@@ -3945,10 +3643,13 @@ int nodeSocketIsHidden(const bNodeSocket *sock)
return ((sock->flag & (SOCK_HIDDEN | SOCK_UNAVAIL)) != 0);
}
-void nodeSetSocketAvailability(bNodeTree *UNUSED(ntree), bNodeSocket *sock, bool is_available)
+void nodeSetSocketAvailability(bNodeTree *ntree, bNodeSocket *sock, bool is_available)
{
- /* #ntree is not needed right now, but it's generally necessary when changing the tree because we
- * want to tag it as changed in the future. */
+ const bool was_available = (sock->flag & SOCK_UNAVAIL) == 0;
+ if (is_available != was_available) {
+ BKE_ntree_update_tag_socket_availability(ntree, sock);
+ }
+
if (is_available) {
sock->flag &= ~SOCK_UNAVAIL;
}
@@ -4400,7 +4101,7 @@ void ntreeGetDependencyList(struct bNodeTree *ntree, struct bNode ***r_deplist,
}
/* only updates node->level for detecting cycles links */
-static void ntree_update_node_level(bNodeTree *ntree)
+void ntreeUpdateNodeLevels(bNodeTree *ntree)
{
/* first clear tag */
LISTBASE_FOREACH (bNode *, node, &ntree->nodes) {
@@ -4415,768 +4116,42 @@ static void ntree_update_node_level(bNodeTree *ntree)
}
}
-void ntreeTagUsedSockets(bNodeTree *ntree)
-{
- /* first clear data */
- LISTBASE_FOREACH (bNode *, node, &ntree->nodes) {
- LISTBASE_FOREACH (bNodeSocket *, sock, &node->inputs) {
- sock->flag &= ~SOCK_IN_USE;
- }
- LISTBASE_FOREACH (bNodeSocket *, sock, &node->outputs) {
- sock->flag &= ~SOCK_IN_USE;
- }
- }
-
- LISTBASE_FOREACH (bNodeLink *, link, &ntree->links) {
- link->fromsock->flag |= SOCK_IN_USE;
- if (!(link->flag & NODE_LINK_MUTED)) {
- link->tosock->flag |= SOCK_IN_USE;
- }
- }
-}
-
-static void ntree_update_link_pointers(bNodeTree *ntree)
-{
- /* first clear data */
- LISTBASE_FOREACH (bNode *, node, &ntree->nodes) {
- LISTBASE_FOREACH (bNodeSocket *, sock, &node->inputs) {
- sock->link = nullptr;
- }
- }
-
- LISTBASE_FOREACH (bNodeLink *, link, &ntree->links) {
- link->tosock->link = link;
- }
-
- ntreeTagUsedSockets(ntree);
-}
-
-static void ntree_validate_links(bNodeTree *ntree)
-{
- LISTBASE_FOREACH (bNodeLink *, link, &ntree->links) {
- link->flag |= NODE_LINK_VALID;
- if (link->fromnode && link->tonode && link->fromnode->level <= link->tonode->level) {
- link->flag &= ~NODE_LINK_VALID;
- }
- else if (ntree->typeinfo->validate_link) {
- if (!ntree->typeinfo->validate_link(ntree, link)) {
- link->flag &= ~NODE_LINK_VALID;
- }
- }
- }
-}
-
void ntreeUpdateAllNew(Main *main)
{
+ Vector<bNodeTree *> new_ntrees;
+
/* Update all new node trees on file read or append, to add/remove sockets
* in groups nodes if the group changed, and handle any update flags that
* might have been set in file reading or versioning. */
FOREACH_NODETREE_BEGIN (main, ntree, owner_id) {
if (owner_id->tag & LIB_TAG_NEW) {
- LISTBASE_FOREACH (bNode *, node, &ntree->nodes) {
- if (node->typeinfo->group_update_func) {
- node->typeinfo->group_update_func(ntree, node);
- }
- }
-
- ntreeUpdateTree(nullptr, ntree);
+ BKE_ntree_update_tag_all(ntree);
}
}
FOREACH_NODETREE_END;
+ BKE_ntree_update_main(main, nullptr);
}
-namespace blender::bke::node_field_inferencing {
-
-static bool is_field_socket_type(eNodeSocketDatatype type)
-{
- return ELEM(type, SOCK_FLOAT, SOCK_INT, SOCK_BOOLEAN, SOCK_VECTOR, SOCK_RGBA);
-}
-
-static bool is_field_socket_type(const SocketRef &socket)
-{
- return is_field_socket_type((eNodeSocketDatatype)socket.typeinfo()->type);
-}
-
-static bool update_field_inferencing(bNodeTree &btree);
-
-static InputSocketFieldType get_interface_input_field_type(const NodeRef &node,
- const InputSocketRef &socket)
-{
- if (!is_field_socket_type(socket)) {
- return InputSocketFieldType::None;
- }
- if (node.is_reroute_node()) {
- return InputSocketFieldType::IsSupported;
- }
- if (node.is_group_output_node()) {
- /* Outputs always support fields when the data type is correct. */
- return InputSocketFieldType::IsSupported;
- }
- if (node.is_undefined()) {
- return InputSocketFieldType::None;
- }
-
- const NodeDeclaration *node_decl = node.declaration();
-
- /* Node declarations should be implemented for nodes involved here. */
- BLI_assert(node_decl != nullptr);
-
- /* Get the field type from the declaration. */
- const SocketDeclaration &socket_decl = *node_decl->inputs()[socket.index()];
- const InputSocketFieldType field_type = socket_decl.input_field_type();
- if (field_type == InputSocketFieldType::Implicit) {
- return field_type;
- }
- if (node_decl->is_function_node()) {
- /* In a function node, every socket supports fields. */
- return InputSocketFieldType::IsSupported;
- }
- return field_type;
-}
-
-static OutputFieldDependency get_interface_output_field_dependency(const NodeRef &node,
- const OutputSocketRef &socket)
-{
- if (!is_field_socket_type(socket)) {
- /* Non-field sockets always output data. */
- return OutputFieldDependency::ForDataSource();
- }
- if (node.is_reroute_node()) {
- /* The reroute just forwards what is passed in. */
- return OutputFieldDependency::ForDependentField();
- }
- if (node.is_group_input_node()) {
- /* Input nodes get special treatment in #determine_group_input_states. */
- return OutputFieldDependency::ForDependentField();
- }
- if (node.is_undefined()) {
- return OutputFieldDependency::ForDataSource();
- }
-
- const NodeDeclaration *node_decl = node.declaration();
-
- /* Node declarations should be implemented for nodes involved here. */
- BLI_assert(node_decl != nullptr);
-
- if (node_decl->is_function_node()) {
- /* In a generic function node, all outputs depend on all inputs. */
- return OutputFieldDependency::ForDependentField();
- }
-
- /* Use the socket declaration. */
- const SocketDeclaration &socket_decl = *node_decl->outputs()[socket.index()];
- return socket_decl.output_field_dependency();
-}
-
-static FieldInferencingInterface get_dummy_field_inferencing_interface(const NodeRef &node)
-{
- FieldInferencingInterface inferencing_interface;
- inferencing_interface.inputs.append_n_times(InputSocketFieldType::None, node.inputs().size());
- inferencing_interface.outputs.append_n_times(OutputFieldDependency::ForDataSource(),
- node.outputs().size());
- return inferencing_interface;
-}
-
-/**
- * Retrieves information about how the node interacts with fields.
- * In the future, this information can be stored in the node declaration. This would allow this
- * function to return a reference, making it more efficient.
- */
-static FieldInferencingInterface get_node_field_inferencing_interface(const NodeRef &node)
-{
- /* Node groups already reference all required information, so just return that. */
- if (node.is_group_node()) {
- bNodeTree *group = (bNodeTree *)node.bnode()->id;
- if (group == nullptr) {
- return FieldInferencingInterface();
- }
- if (!ntreeIsRegistered(group)) {
- /* This can happen when there is a linked node group that was not found (see T92799). */
- return get_dummy_field_inferencing_interface(node);
- }
- if (group->field_inferencing_interface == nullptr) {
- /* Update group recursively. */
- update_field_inferencing(*group);
- }
- return *group->field_inferencing_interface;
- }
-
- FieldInferencingInterface inferencing_interface;
- for (const InputSocketRef *input_socket : node.inputs()) {
- inferencing_interface.inputs.append(get_interface_input_field_type(node, *input_socket));
- }
-
- for (const OutputSocketRef *output_socket : node.outputs()) {
- inferencing_interface.outputs.append(
- get_interface_output_field_dependency(node, *output_socket));
- }
- return inferencing_interface;
-}
-
-/**
- * This struct contains information for every socket. The values are propagated through the
- * network.
- */
-struct SocketFieldState {
- /* This socket starts a new field. */
- bool is_field_source = false;
- /* This socket can never become a field, because the node itself does not support it. */
- bool is_always_single = false;
- /* This socket is currently a single value. It could become a field though. */
- bool is_single = true;
- /* This socket is required to be a single value. This can be because the node itself only
- * supports this socket to be a single value, or because a node afterwards requires this to be a
- * single value. */
- bool requires_single = false;
-};
-
-static Vector<const InputSocketRef *> gather_input_socket_dependencies(
- const OutputFieldDependency &field_dependency, const NodeRef &node)
-{
- const OutputSocketFieldType type = field_dependency.field_type();
- Vector<const InputSocketRef *> input_sockets;
- switch (type) {
- case OutputSocketFieldType::FieldSource:
- case OutputSocketFieldType::None: {
- break;
- }
- case OutputSocketFieldType::DependentField: {
- /* This output depends on all inputs. */
- input_sockets.extend(node.inputs());
- break;
- }
- case OutputSocketFieldType::PartiallyDependent: {
- /* This output depends only on a few inputs. */
- for (const int i : field_dependency.linked_input_indices()) {
- input_sockets.append(&node.input(i));
- }
- break;
- }
- }
- return input_sockets;
-}
-
-/**
- * Check what the group output socket depends on. Potentially traverses the node tree
- * to figure out if it is always a field or if it depends on any group inputs.
- */
-static OutputFieldDependency find_group_output_dependencies(
- const InputSocketRef &group_output_socket,
- const Span<SocketFieldState> field_state_by_socket_id)
-{
- if (!is_field_socket_type(group_output_socket)) {
- return OutputFieldDependency::ForDataSource();
- }
-
- /* Use a Set here instead of an array indexed by socket id, because we my only need to look at
- * very few sockets. */
- Set<const InputSocketRef *> handled_sockets;
- Stack<const InputSocketRef *> sockets_to_check;
-
- handled_sockets.add(&group_output_socket);
- sockets_to_check.push(&group_output_socket);
-
- /* Keeps track of group input indices that are (indirectly) connected to the output. */
- Vector<int> linked_input_indices;
-
- while (!sockets_to_check.is_empty()) {
- const InputSocketRef *input_socket = sockets_to_check.pop();
-
- for (const OutputSocketRef *origin_socket : input_socket->directly_linked_sockets()) {
- const NodeRef &origin_node = origin_socket->node();
- const SocketFieldState &origin_state = field_state_by_socket_id[origin_socket->id()];
-
- if (origin_state.is_field_source) {
- if (origin_node.is_group_input_node()) {
- /* Found a group input that the group output depends on. */
- linked_input_indices.append_non_duplicates(origin_socket->index());
- }
- else {
- /* Found a field source that is not the group input. So the output is always a field. */
- return OutputFieldDependency::ForFieldSource();
- }
- }
- else if (!origin_state.is_single) {
- const FieldInferencingInterface inferencing_interface =
- get_node_field_inferencing_interface(origin_node);
- const OutputFieldDependency &field_dependency =
- inferencing_interface.outputs[origin_socket->index()];
-
- /* Propagate search further to the left. */
- for (const InputSocketRef *origin_input_socket :
- gather_input_socket_dependencies(field_dependency, origin_node)) {
- if (!origin_input_socket->is_available()) {
- continue;
- }
- if (!field_state_by_socket_id[origin_input_socket->id()].is_single) {
- if (handled_sockets.add(origin_input_socket)) {
- sockets_to_check.push(origin_input_socket);
- }
- }
- }
- }
- }
- }
- return OutputFieldDependency::ForPartiallyDependentField(std::move(linked_input_indices));
-}
-
-static void propagate_data_requirements_from_right_to_left(
- const NodeTreeRef &tree, const MutableSpan<SocketFieldState> field_state_by_socket_id)
-{
- const NodeTreeRef::ToposortResult toposort_result = tree.toposort(
- NodeTreeRef::ToposortDirection::RightToLeft);
-
- for (const NodeRef *node : toposort_result.sorted_nodes) {
- const FieldInferencingInterface inferencing_interface = get_node_field_inferencing_interface(
- *node);
-
- for (const OutputSocketRef *output_socket : node->outputs()) {
- SocketFieldState &state = field_state_by_socket_id[output_socket->id()];
-
- const OutputFieldDependency &field_dependency =
- inferencing_interface.outputs[output_socket->index()];
-
- if (field_dependency.field_type() == OutputSocketFieldType::FieldSource) {
- continue;
- }
- if (field_dependency.field_type() == OutputSocketFieldType::None) {
- state.requires_single = true;
- state.is_always_single = true;
- continue;
- }
-
- /* The output is required to be a single value when it is connected to any input that does
- * not support fields. */
- for (const InputSocketRef *target_socket : output_socket->directly_linked_sockets()) {
- if (target_socket->is_available()) {
- state.requires_single |= field_state_by_socket_id[target_socket->id()].requires_single;
- }
- }
-
- if (state.requires_single) {
- bool any_input_is_field_implicitly = false;
- const Vector<const InputSocketRef *> connected_inputs = gather_input_socket_dependencies(
- field_dependency, *node);
- for (const InputSocketRef *input_socket : connected_inputs) {
- if (!input_socket->is_available()) {
- continue;
- }
- if (inferencing_interface.inputs[input_socket->index()] ==
- InputSocketFieldType::Implicit) {
- if (!input_socket->is_logically_linked()) {
- any_input_is_field_implicitly = true;
- break;
- }
- }
- }
- if (any_input_is_field_implicitly) {
- /* This output isn't a single value actually. */
- state.requires_single = false;
- }
- else {
- /* If the output is required to be a single value, the connected inputs in the same node
- * must not be fields as well. */
- for (const InputSocketRef *input_socket : connected_inputs) {
- field_state_by_socket_id[input_socket->id()].requires_single = true;
- }
- }
- }
- }
-
- /* Some inputs do not require fields independent of what the outputs are connected to. */
- for (const InputSocketRef *input_socket : node->inputs()) {
- SocketFieldState &state = field_state_by_socket_id[input_socket->id()];
- if (inferencing_interface.inputs[input_socket->index()] == InputSocketFieldType::None) {
- state.requires_single = true;
- state.is_always_single = true;
- }
- }
- }
-}
-
-static void determine_group_input_states(
- const NodeTreeRef &tree,
- FieldInferencingInterface &new_inferencing_interface,
- const MutableSpan<SocketFieldState> field_state_by_socket_id)
-{
- {
- /* Non-field inputs never support fields. */
- int index;
- LISTBASE_FOREACH_INDEX (bNodeSocket *, group_input, &tree.btree()->inputs, index) {
- if (!is_field_socket_type((eNodeSocketDatatype)group_input->type)) {
- new_inferencing_interface.inputs[index] = InputSocketFieldType::None;
- }
- }
- }
- /* Check if group inputs are required to be single values, because they are (indirectly)
- * connected to some socket that does not support fields. */
- for (const NodeRef *node : tree.nodes_by_type("NodeGroupInput")) {
- for (const OutputSocketRef *output_socket : node->outputs().drop_back(1)) {
- SocketFieldState &state = field_state_by_socket_id[output_socket->id()];
- if (state.requires_single) {
- new_inferencing_interface.inputs[output_socket->index()] = InputSocketFieldType::None;
- }
- }
- }
- /* If an input does not support fields, this should be reflected in all Group Input nodes. */
- for (const NodeRef *node : tree.nodes_by_type("NodeGroupInput")) {
- for (const OutputSocketRef *output_socket : node->outputs().drop_back(1)) {
- SocketFieldState &state = field_state_by_socket_id[output_socket->id()];
- const bool supports_field = new_inferencing_interface.inputs[output_socket->index()] !=
- InputSocketFieldType::None;
- if (supports_field) {
- state.is_single = false;
- state.is_field_source = true;
- }
- else {
- state.requires_single = true;
- }
- }
- SocketFieldState &dummy_socket_state = field_state_by_socket_id[node->outputs().last()->id()];
- dummy_socket_state.requires_single = true;
- }
-}
-
-static void propagate_field_status_from_left_to_right(
- const NodeTreeRef &tree, const MutableSpan<SocketFieldState> field_state_by_socket_id)
-{
- const NodeTreeRef::ToposortResult toposort_result = tree.toposort(
- NodeTreeRef::ToposortDirection::LeftToRight);
-
- for (const NodeRef *node : toposort_result.sorted_nodes) {
- if (node->is_group_input_node()) {
- continue;
- }
-
- const FieldInferencingInterface inferencing_interface = get_node_field_inferencing_interface(
- *node);
-
- /* Update field state of input sockets, also taking into account linked origin sockets. */
- for (const InputSocketRef *input_socket : node->inputs()) {
- SocketFieldState &state = field_state_by_socket_id[input_socket->id()];
- if (state.is_always_single) {
- state.is_single = true;
- continue;
- }
- state.is_single = true;
- if (input_socket->directly_linked_sockets().is_empty()) {
- if (inferencing_interface.inputs[input_socket->index()] ==
- InputSocketFieldType::Implicit) {
- state.is_single = false;
- }
- }
- else {
- for (const OutputSocketRef *origin_socket : input_socket->directly_linked_sockets()) {
- if (!field_state_by_socket_id[origin_socket->id()].is_single) {
- state.is_single = false;
- break;
- }
- }
- }
- }
-
- /* Update field state of output sockets, also taking into account input sockets. */
- for (const OutputSocketRef *output_socket : node->outputs()) {
- SocketFieldState &state = field_state_by_socket_id[output_socket->id()];
- const OutputFieldDependency &field_dependency =
- inferencing_interface.outputs[output_socket->index()];
-
- switch (field_dependency.field_type()) {
- case OutputSocketFieldType::None: {
- state.is_single = true;
- break;
- }
- case OutputSocketFieldType::FieldSource: {
- state.is_single = false;
- state.is_field_source = true;
- break;
- }
- case OutputSocketFieldType::PartiallyDependent:
- case OutputSocketFieldType::DependentField: {
- for (const InputSocketRef *input_socket :
- gather_input_socket_dependencies(field_dependency, *node)) {
- if (!input_socket->is_available()) {
- continue;
- }
- if (!field_state_by_socket_id[input_socket->id()].is_single) {
- state.is_single = false;
- break;
- }
- }
- break;
- }
- }
- }
- }
-}
-
-static void determine_group_output_states(const NodeTreeRef &tree,
- FieldInferencingInterface &new_inferencing_interface,
- const Span<SocketFieldState> field_state_by_socket_id)
-{
- for (const NodeRef *group_output_node : tree.nodes_by_type("NodeGroupOutput")) {
- /* Ignore inactive group output nodes. */
- if (!(group_output_node->bnode()->flag & NODE_DO_OUTPUT)) {
- continue;
- }
- /* Determine dependencies of all group outputs. */
- for (const InputSocketRef *group_output_socket : group_output_node->inputs().drop_back(1)) {
- OutputFieldDependency field_dependency = find_group_output_dependencies(
- *group_output_socket, field_state_by_socket_id);
- new_inferencing_interface.outputs[group_output_socket->index()] = std::move(
- field_dependency);
- }
- break;
- }
-}
-
-static void update_socket_shapes(const NodeTreeRef &tree,
- const Span<SocketFieldState> field_state_by_socket_id)
-{
- const eNodeSocketDisplayShape requires_data_shape = SOCK_DISPLAY_SHAPE_CIRCLE;
- const eNodeSocketDisplayShape data_but_can_be_field_shape = SOCK_DISPLAY_SHAPE_DIAMOND_DOT;
- const eNodeSocketDisplayShape is_field_shape = SOCK_DISPLAY_SHAPE_DIAMOND;
-
- auto get_shape_for_state = [&](const SocketFieldState &state) {
- if (state.is_always_single) {
- return requires_data_shape;
- }
- if (!state.is_single) {
- return is_field_shape;
- }
- if (state.requires_single) {
- return requires_data_shape;
- }
- return data_but_can_be_field_shape;
- };
-
- for (const InputSocketRef *socket : tree.input_sockets()) {
- bNodeSocket *bsocket = socket->bsocket();
- const SocketFieldState &state = field_state_by_socket_id[socket->id()];
- bsocket->display_shape = get_shape_for_state(state);
- }
- for (const OutputSocketRef *socket : tree.output_sockets()) {
- bNodeSocket *bsocket = socket->bsocket();
- const SocketFieldState &state = field_state_by_socket_id[socket->id()];
- bsocket->display_shape = get_shape_for_state(state);
- }
-}
-
-static bool update_field_inferencing(bNodeTree &btree)
-{
- using namespace blender::nodes;
- if (btree.type != NTREE_GEOMETRY) {
- return false;
- }
-
- /* Create new inferencing interface for this node group. */
- FieldInferencingInterface *new_inferencing_interface = new FieldInferencingInterface();
- new_inferencing_interface->inputs.resize(BLI_listbase_count(&btree.inputs),
- InputSocketFieldType::IsSupported);
- new_inferencing_interface->outputs.resize(BLI_listbase_count(&btree.outputs),
- OutputFieldDependency::ForDataSource());
-
- /* Create #NodeTreeRef to accelerate various queries on the node tree (e.g. linked sockets). */
- const NodeTreeRef tree{&btree};
-
- /* Keep track of the state of all sockets. The index into this array is #SocketRef::id(). */
- Array<SocketFieldState> field_state_by_socket_id(tree.sockets().size());
-
- propagate_data_requirements_from_right_to_left(tree, field_state_by_socket_id);
- determine_group_input_states(tree, *new_inferencing_interface, field_state_by_socket_id);
- propagate_field_status_from_left_to_right(tree, field_state_by_socket_id);
- determine_group_output_states(tree, *new_inferencing_interface, field_state_by_socket_id);
- update_socket_shapes(tree, field_state_by_socket_id);
-
- /* Update the previous group interface. */
- const bool group_interface_changed = btree.field_inferencing_interface == nullptr ||
- *btree.field_inferencing_interface !=
- *new_inferencing_interface;
- delete btree.field_inferencing_interface;
- btree.field_inferencing_interface = new_inferencing_interface;
-
- return group_interface_changed;
-}
-
-} // namespace blender::bke::node_field_inferencing
-
-void ntreeUpdateAllUsers(Main *main, ID *id, const int tree_update_flag)
+void ntreeUpdateAllUsers(Main *main, ID *id)
{
if (id == nullptr) {
return;
}
+ bool need_update = false;
+
/* Update all users of ngroup, to add/remove sockets as needed. */
FOREACH_NODETREE_BEGIN (main, ntree, owner_id) {
- bool need_update = false;
-
LISTBASE_FOREACH (bNode *, node, &ntree->nodes) {
if (node->id == id) {
- if (node->typeinfo->group_update_func) {
- node->typeinfo->group_update_func(ntree, node);
- }
-
+ BKE_ntree_update_tag_node_property(ntree, node);
need_update = true;
}
}
-
- if (need_update) {
- ntree->update |= tree_update_flag;
- ntreeUpdateTree(tree_update_flag ? main : nullptr, ntree);
- }
}
FOREACH_NODETREE_END;
-
- if (GS(id->name) == ID_NT) {
- bNodeTree *ngroup = (bNodeTree *)id;
- if (ngroup->type == NTREE_GEOMETRY && (ngroup->update & NTREE_UPDATE_GROUP)) {
- LISTBASE_FOREACH (Object *, object, &main->objects) {
- LISTBASE_FOREACH (ModifierData *, md, &object->modifiers) {
- if (md->type == eModifierType_Nodes) {
- NodesModifierData *nmd = (NodesModifierData *)md;
- if (nmd->node_group == ngroup) {
- MOD_nodes_update_interface(object, nmd);
- }
- }
- }
- }
- }
- }
-}
-
-void ntreeUpdateTree(Main *bmain, bNodeTree *ntree)
-{
- if (!ntree) {
- return;
- }
-
- /* Avoid re-entrant updates, can be caused by RNA update callbacks. */
- if (ntree->is_updating) {
- return;
- }
- ntree->is_updating = true;
-
- if (ntree->update & (NTREE_UPDATE_LINKS | NTREE_UPDATE_NODES)) {
- /* set the bNodeSocket->link pointers */
- ntree_update_link_pointers(ntree);
- }
-
- /* update individual nodes */
- LISTBASE_FOREACH (bNode *, node, &ntree->nodes) {
- /* node tree update tags override individual node update flags */
- if ((node->update & NODE_UPDATE) || (ntree->update & NTREE_UPDATE)) {
- if (node->typeinfo->updatefunc) {
- node->typeinfo->updatefunc(ntree, node);
- }
-
- nodeUpdateInternalLinks(ntree, node);
- }
- }
-
- /* generic tree update callback */
- if (ntree->typeinfo->update) {
- ntree->typeinfo->update(ntree);
- }
- /* XXX this should be moved into the tree type update callback for tree supporting node groups.
- * Currently the node tree interface is still a generic feature of the base NodeTree type.
- */
- if (ntree->update & NTREE_UPDATE_GROUP) {
- ntreeInterfaceTypeUpdate(ntree);
- }
-
- int tree_user_update_flag = 0;
-
- if (ntree->update & NTREE_UPDATE) {
- /* If the field interface of this node tree has changed, all node trees using
- * this group will need to recalculate their interface as well. */
- if (blender::bke::node_field_inferencing::update_field_inferencing(*ntree)) {
- tree_user_update_flag |= NTREE_UPDATE_FIELD_INFERENCING;
- }
- }
-
- if (bmain) {
- ntreeUpdateAllUsers(bmain, &ntree->id, tree_user_update_flag);
- }
-
- if (ntree->update & (NTREE_UPDATE_LINKS | NTREE_UPDATE_NODES)) {
- /* node updates can change sockets or links, repeat link pointer update afterward */
- ntree_update_link_pointers(ntree);
-
- /* update the node level from link dependencies */
- ntree_update_node_level(ntree);
-
- /* check link validity */
- ntree_validate_links(ntree);
- }
-
- /* clear update flags */
- LISTBASE_FOREACH (bNode *, node, &ntree->nodes) {
- node->update = 0;
- }
- ntree->update = 0;
-
- ntree->is_updating = false;
-}
-
-void nodeUpdate(bNodeTree *ntree, bNode *node)
-{
- /* Avoid re-entrant updates, can be caused by RNA update callbacks. */
- if (ntree->is_updating) {
- return;
- }
- ntree->is_updating = true;
-
- if (node->typeinfo->updatefunc) {
- node->typeinfo->updatefunc(ntree, node);
- }
-
- nodeUpdateInternalLinks(ntree, node);
-
- /* clear update flag */
- node->update = 0;
-
- ntree->is_updating = false;
-}
-
-bool nodeUpdateID(bNodeTree *ntree, ID *id)
-{
- bool changed = false;
-
- if (ELEM(nullptr, id, ntree)) {
- return changed;
- }
-
- /* Avoid re-entrant updates, can be caused by RNA update callbacks. */
- if (ntree->is_updating) {
- return changed;
- }
- ntree->is_updating = true;
-
- LISTBASE_FOREACH (bNode *, node, &ntree->nodes) {
- if (node->id == id) {
- changed = true;
- node->update |= NODE_UPDATE_ID;
- if (node->typeinfo->updatefunc) {
- node->typeinfo->updatefunc(ntree, node);
- }
- /* clear update flag */
- node->update = 0;
- }
- }
-
- LISTBASE_FOREACH (bNode *, node, &ntree->nodes) {
- nodeUpdateInternalLinks(ntree, node);
- }
-
- ntree->is_updating = false;
- return changed;
-}
-
-void nodeUpdateInternalLinks(bNodeTree *ntree, bNode *node)
-{
- BLI_freelistN(&node->internal_links);
- if (!node->typeinfo->no_muting) {
- node_internal_links_create(ntree, node);
+ if (need_update) {
+ BKE_ntree_update_main(main, nullptr);
}
}
@@ -5232,8 +4207,7 @@ static bool node_poll_instance_default(bNode *node, bNodeTree *ntree, const char
return node->typeinfo->poll(node->typeinfo, ntree, disabled_hint);
}
-/* NOLINTNEXTLINE: readability-function-size */
-void node_type_base(bNodeType *ntype, int type, const char *name, short nclass, short flag)
+void node_type_base(bNodeType *ntype, int type, const char *name, short nclass)
{
/* Use static type info header to map static int type to identifier string and RNA struct type.
* Associate the RNA struct type with the bNodeType.
@@ -5260,7 +4234,6 @@ void node_type_base(bNodeType *ntype, int type, const char *name, short nclass,
ntype->type = type;
BLI_strncpy(ntype->ui_name, name, sizeof(ntype->ui_name));
ntype->nclass = nclass;
- ntype->flag = flag;
node_type_base_defaults(ntype);
@@ -5268,14 +4241,12 @@ void node_type_base(bNodeType *ntype, int type, const char *name, short nclass,
ntype->poll_instance = node_poll_instance_default;
}
-void node_type_base_custom(
- bNodeType *ntype, const char *idname, const char *name, short nclass, short flag)
+void node_type_base_custom(bNodeType *ntype, const char *idname, const char *name, short nclass)
{
BLI_strncpy(ntype->idname, idname, sizeof(ntype->idname));
ntype->type = NODE_CUSTOM;
BLI_strncpy(ntype->ui_name, name, sizeof(ntype->ui_name));
ntype->nclass = nclass;
- ntype->flag = flag;
node_type_base_defaults(ntype);
}
@@ -5451,7 +4422,7 @@ static void register_undefined_types()
strcpy(NodeTreeTypeUndefined.ui_name, N_("Undefined"));
strcpy(NodeTreeTypeUndefined.ui_description, N_("Undefined Node Tree Type"));
- node_type_base_custom(&NodeTypeUndefined, "NodeUndefined", "Undefined", 0, 0);
+ node_type_base_custom(&NodeTypeUndefined, "NodeUndefined", "Undefined", 0);
NodeTypeUndefined.poll = node_undefined_poll;
BLI_strncpy(NodeSocketTypeUndefined.idname,
@@ -5476,6 +4447,7 @@ static void registerCompositNodes()
register_node_type_cmp_value();
register_node_type_cmp_rgb();
register_node_type_cmp_curve_time();
+ register_node_type_cmp_scene_time();
register_node_type_cmp_movieclip();
register_node_type_cmp_composite();
@@ -5516,6 +4488,7 @@ static void registerCompositNodes()
register_node_type_cmp_denoise();
register_node_type_cmp_antialiasing();
+ register_node_type_cmp_convert_color_space();
register_node_type_cmp_valtorgb();
register_node_type_cmp_rgbtobw();
register_node_type_cmp_setalpha();
@@ -5695,6 +4668,7 @@ static void registerTextureNodes()
register_node_type_sh_tangent();
register_node_type_sh_normal_map();
register_node_type_sh_hair_info();
+ register_node_type_sh_point_info();
register_node_type_sh_volume_info();
register_node_type_tex_checker();
@@ -5745,6 +4719,7 @@ static void registerGeometryNodes()
register_node_type_geo_legacy_subdivision_surface();
register_node_type_geo_legacy_volume_to_mesh();
+ register_node_type_geo_accumulate_field();
register_node_type_geo_align_rotation_to_vector();
register_node_type_geo_attribute_capture();
register_node_type_geo_attribute_clamp();
@@ -5772,6 +4747,7 @@ static void registerGeometryNodes()
register_node_type_geo_curve_fillet();
register_node_type_geo_curve_handle_type_selection();
register_node_type_geo_curve_length();
+ register_node_type_geo_curve_primitive_arc();
register_node_type_geo_curve_primitive_bezier_segment();
register_node_type_geo_curve_primitive_circle();
register_node_type_geo_curve_primitive_line();
@@ -5793,6 +4769,9 @@ static void registerGeometryNodes()
register_node_type_geo_distribute_points_on_faces();
register_node_type_geo_dual_mesh();
register_node_type_geo_edge_split();
+ register_node_type_geo_extrude_mesh();
+ register_node_type_geo_field_at_index();
+ register_node_type_geo_flip_faces();
register_node_type_geo_geometry_to_instance();
register_node_type_geo_image_texture();
register_node_type_geo_input_curve_handles();
@@ -5801,6 +4780,7 @@ static void registerGeometryNodes()
register_node_type_geo_input_index();
register_node_type_geo_input_material_index();
register_node_type_geo_input_material();
+ register_node_type_geo_input_mesh_edge_angle();
register_node_type_geo_input_mesh_edge_neighbors();
register_node_type_geo_input_mesh_edge_vertices();
register_node_type_geo_input_mesh_face_area();
@@ -5822,6 +4802,7 @@ static void registerGeometryNodes()
register_node_type_geo_join_geometry();
register_node_type_geo_material_replace();
register_node_type_geo_material_selection();
+ register_node_type_geo_merge_by_distance();
register_node_type_geo_mesh_primitive_circle();
register_node_type_geo_mesh_primitive_cone();
register_node_type_geo_mesh_primitive_cube();
@@ -5847,6 +4828,7 @@ static void registerGeometryNodes()
register_node_type_geo_realize_instances();
register_node_type_geo_rotate_instances();
register_node_type_geo_sample_texture();
+ register_node_type_geo_scale_elements();
register_node_type_geo_scale_instances();
register_node_type_geo_separate_components();
register_node_type_geo_separate_geometry();
diff --git a/source/blender/blenkernel/intern/node_tree_update.cc b/source/blender/blenkernel/intern/node_tree_update.cc
new file mode 100644
index 00000000000..0555707b64c
--- /dev/null
+++ b/source/blender/blenkernel/intern/node_tree_update.cc
@@ -0,0 +1,1670 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "BLI_map.hh"
+#include "BLI_multi_value_map.hh"
+#include "BLI_noise.hh"
+#include "BLI_set.hh"
+#include "BLI_stack.hh"
+#include "BLI_vector_set.hh"
+
+#include "DNA_anim_types.h"
+#include "DNA_modifier_types.h"
+#include "DNA_node_types.h"
+
+#include "BKE_anim_data.h"
+#include "BKE_main.h"
+#include "BKE_node.h"
+#include "BKE_node_tree_update.h"
+
+#include "MOD_nodes.h"
+
+#include "NOD_node_declaration.hh"
+#include "NOD_node_tree_ref.hh"
+#include "NOD_texture.h"
+
+#include "DEG_depsgraph_query.h"
+
+using namespace blender::nodes;
+
+/**
+ * These flags are used by the `changed_flag` field in #bNodeTree, #bNode and #bNodeSocket.
+ * This enum is not part of the public api. It should be used through the `BKE_ntree_update_tag_*`
+ * api.
+ */
+enum eNodeTreeChangedFlag {
+ NTREE_CHANGED_NOTHING = 0,
+ NTREE_CHANGED_ANY = (1 << 1),
+ NTREE_CHANGED_NODE_PROPERTY = (1 << 2),
+ NTREE_CHANGED_NODE_OUTPUT = (1 << 3),
+ NTREE_CHANGED_INTERFACE = (1 << 4),
+ NTREE_CHANGED_LINK = (1 << 5),
+ NTREE_CHANGED_REMOVED_NODE = (1 << 6),
+ NTREE_CHANGED_REMOVED_SOCKET = (1 << 7),
+ NTREE_CHANGED_SOCKET_PROPERTY = (1 << 8),
+ NTREE_CHANGED_INTERNAL_LINK = (1 << 9),
+ NTREE_CHANGED_ALL = -1,
+};
+
+static void add_tree_tag(bNodeTree *ntree, const eNodeTreeChangedFlag flag)
+{
+ ntree->changed_flag |= flag;
+}
+
+static void add_node_tag(bNodeTree *ntree, bNode *node, const eNodeTreeChangedFlag flag)
+{
+ add_tree_tag(ntree, flag);
+ node->changed_flag |= flag;
+}
+
+static void add_socket_tag(bNodeTree *ntree, bNodeSocket *socket, const eNodeTreeChangedFlag flag)
+{
+ add_tree_tag(ntree, flag);
+ socket->changed_flag |= flag;
+}
+
+namespace blender::bke {
+
+namespace node_field_inferencing {
+
+static bool is_field_socket_type(eNodeSocketDatatype type)
+{
+ return ELEM(type, SOCK_FLOAT, SOCK_INT, SOCK_BOOLEAN, SOCK_VECTOR, SOCK_RGBA);
+}
+
+static bool is_field_socket_type(const SocketRef &socket)
+{
+ return is_field_socket_type((eNodeSocketDatatype)socket.typeinfo()->type);
+}
+
+static InputSocketFieldType get_interface_input_field_type(const NodeRef &node,
+ const InputSocketRef &socket)
+{
+ if (!is_field_socket_type(socket)) {
+ return InputSocketFieldType::None;
+ }
+ if (node.is_reroute_node()) {
+ return InputSocketFieldType::IsSupported;
+ }
+ if (node.is_group_output_node()) {
+ /* Outputs always support fields when the data type is correct. */
+ return InputSocketFieldType::IsSupported;
+ }
+ if (node.is_undefined()) {
+ return InputSocketFieldType::None;
+ }
+
+ const NodeDeclaration *node_decl = node.declaration();
+
+ /* Node declarations should be implemented for nodes involved here. */
+ BLI_assert(node_decl != nullptr);
+
+ /* Get the field type from the declaration. */
+ const SocketDeclaration &socket_decl = *node_decl->inputs()[socket.index()];
+ const InputSocketFieldType field_type = socket_decl.input_field_type();
+ if (field_type == InputSocketFieldType::Implicit) {
+ return field_type;
+ }
+ if (node_decl->is_function_node()) {
+ /* In a function node, every socket supports fields. */
+ return InputSocketFieldType::IsSupported;
+ }
+ return field_type;
+}
+
+static OutputFieldDependency get_interface_output_field_dependency(const NodeRef &node,
+ const OutputSocketRef &socket)
+{
+ if (!is_field_socket_type(socket)) {
+ /* Non-field sockets always output data. */
+ return OutputFieldDependency::ForDataSource();
+ }
+ if (node.is_reroute_node()) {
+ /* The reroute just forwards what is passed in. */
+ return OutputFieldDependency::ForDependentField();
+ }
+ if (node.is_group_input_node()) {
+ /* Input nodes get special treatment in #determine_group_input_states. */
+ return OutputFieldDependency::ForDependentField();
+ }
+ if (node.is_undefined()) {
+ return OutputFieldDependency::ForDataSource();
+ }
+
+ const NodeDeclaration *node_decl = node.declaration();
+
+ /* Node declarations should be implemented for nodes involved here. */
+ BLI_assert(node_decl != nullptr);
+
+ if (node_decl->is_function_node()) {
+ /* In a generic function node, all outputs depend on all inputs. */
+ return OutputFieldDependency::ForDependentField();
+ }
+
+ /* Use the socket declaration. */
+ const SocketDeclaration &socket_decl = *node_decl->outputs()[socket.index()];
+ return socket_decl.output_field_dependency();
+}
+
+static FieldInferencingInterface get_dummy_field_inferencing_interface(const NodeRef &node)
+{
+ FieldInferencingInterface inferencing_interface;
+ inferencing_interface.inputs.append_n_times(InputSocketFieldType::None, node.inputs().size());
+ inferencing_interface.outputs.append_n_times(OutputFieldDependency::ForDataSource(),
+ node.outputs().size());
+ return inferencing_interface;
+}
+
+/**
+ * Retrieves information about how the node interacts with fields.
+ * In the future, this information can be stored in the node declaration. This would allow this
+ * function to return a reference, making it more efficient.
+ */
+static FieldInferencingInterface get_node_field_inferencing_interface(const NodeRef &node)
+{
+ /* Node groups already reference all required information, so just return that. */
+ if (node.is_group_node()) {
+ bNodeTree *group = (bNodeTree *)node.bnode()->id;
+ if (group == nullptr) {
+ return FieldInferencingInterface();
+ }
+ if (!ntreeIsRegistered(group)) {
+ /* This can happen when there is a linked node group that was not found (see T92799). */
+ return get_dummy_field_inferencing_interface(node);
+ }
+ if (group->field_inferencing_interface == nullptr) {
+ /* This shouldn't happen because referenced node groups should always be updated first. */
+ BLI_assert_unreachable();
+ }
+ return *group->field_inferencing_interface;
+ }
+
+ FieldInferencingInterface inferencing_interface;
+ for (const InputSocketRef *input_socket : node.inputs()) {
+ inferencing_interface.inputs.append(get_interface_input_field_type(node, *input_socket));
+ }
+
+ for (const OutputSocketRef *output_socket : node.outputs()) {
+ inferencing_interface.outputs.append(
+ get_interface_output_field_dependency(node, *output_socket));
+ }
+ return inferencing_interface;
+}
+
+/**
+ * This struct contains information for every socket. The values are propagated through the
+ * network.
+ */
+struct SocketFieldState {
+ /* This socket starts a new field. */
+ bool is_field_source = false;
+ /* This socket can never become a field, because the node itself does not support it. */
+ bool is_always_single = false;
+ /* This socket is currently a single value. It could become a field though. */
+ bool is_single = true;
+ /* This socket is required to be a single value. This can be because the node itself only
+ * supports this socket to be a single value, or because a node afterwards requires this to be a
+ * single value. */
+ bool requires_single = false;
+};
+
+static Vector<const InputSocketRef *> gather_input_socket_dependencies(
+ const OutputFieldDependency &field_dependency, const NodeRef &node)
+{
+ const OutputSocketFieldType type = field_dependency.field_type();
+ Vector<const InputSocketRef *> input_sockets;
+ switch (type) {
+ case OutputSocketFieldType::FieldSource:
+ case OutputSocketFieldType::None: {
+ break;
+ }
+ case OutputSocketFieldType::DependentField: {
+ /* This output depends on all inputs. */
+ input_sockets.extend(node.inputs());
+ break;
+ }
+ case OutputSocketFieldType::PartiallyDependent: {
+ /* This output depends only on a few inputs. */
+ for (const int i : field_dependency.linked_input_indices()) {
+ input_sockets.append(&node.input(i));
+ }
+ break;
+ }
+ }
+ return input_sockets;
+}
+
+/**
+ * Check what the group output socket depends on. Potentially traverses the node tree
+ * to figure out if it is always a field or if it depends on any group inputs.
+ */
+static OutputFieldDependency find_group_output_dependencies(
+ const InputSocketRef &group_output_socket,
+ const Span<SocketFieldState> field_state_by_socket_id)
+{
+ if (!is_field_socket_type(group_output_socket)) {
+ return OutputFieldDependency::ForDataSource();
+ }
+
+ /* Use a Set here instead of an array indexed by socket id, because we my only need to look at
+ * very few sockets. */
+ Set<const InputSocketRef *> handled_sockets;
+ Stack<const InputSocketRef *> sockets_to_check;
+
+ handled_sockets.add(&group_output_socket);
+ sockets_to_check.push(&group_output_socket);
+
+ /* Keeps track of group input indices that are (indirectly) connected to the output. */
+ Vector<int> linked_input_indices;
+
+ while (!sockets_to_check.is_empty()) {
+ const InputSocketRef *input_socket = sockets_to_check.pop();
+
+ for (const OutputSocketRef *origin_socket : input_socket->directly_linked_sockets()) {
+ const NodeRef &origin_node = origin_socket->node();
+ const SocketFieldState &origin_state = field_state_by_socket_id[origin_socket->id()];
+
+ if (origin_state.is_field_source) {
+ if (origin_node.is_group_input_node()) {
+ /* Found a group input that the group output depends on. */
+ linked_input_indices.append_non_duplicates(origin_socket->index());
+ }
+ else {
+ /* Found a field source that is not the group input. So the output is always a field. */
+ return OutputFieldDependency::ForFieldSource();
+ }
+ }
+ else if (!origin_state.is_single) {
+ const FieldInferencingInterface inferencing_interface =
+ get_node_field_inferencing_interface(origin_node);
+ const OutputFieldDependency &field_dependency =
+ inferencing_interface.outputs[origin_socket->index()];
+
+ /* Propagate search further to the left. */
+ for (const InputSocketRef *origin_input_socket :
+ gather_input_socket_dependencies(field_dependency, origin_node)) {
+ if (!origin_input_socket->is_available()) {
+ continue;
+ }
+ if (!field_state_by_socket_id[origin_input_socket->id()].is_single) {
+ if (handled_sockets.add(origin_input_socket)) {
+ sockets_to_check.push(origin_input_socket);
+ }
+ }
+ }
+ }
+ }
+ }
+ return OutputFieldDependency::ForPartiallyDependentField(std::move(linked_input_indices));
+}
+
+static void propagate_data_requirements_from_right_to_left(
+ const NodeTreeRef &tree, const MutableSpan<SocketFieldState> field_state_by_socket_id)
+{
+ const NodeTreeRef::ToposortResult toposort_result = tree.toposort(
+ NodeTreeRef::ToposortDirection::RightToLeft);
+
+ for (const NodeRef *node : toposort_result.sorted_nodes) {
+ const FieldInferencingInterface inferencing_interface = get_node_field_inferencing_interface(
+ *node);
+
+ for (const OutputSocketRef *output_socket : node->outputs()) {
+ SocketFieldState &state = field_state_by_socket_id[output_socket->id()];
+
+ const OutputFieldDependency &field_dependency =
+ inferencing_interface.outputs[output_socket->index()];
+
+ if (field_dependency.field_type() == OutputSocketFieldType::FieldSource) {
+ continue;
+ }
+ if (field_dependency.field_type() == OutputSocketFieldType::None) {
+ state.requires_single = true;
+ state.is_always_single = true;
+ continue;
+ }
+
+ /* The output is required to be a single value when it is connected to any input that does
+ * not support fields. */
+ for (const InputSocketRef *target_socket : output_socket->directly_linked_sockets()) {
+ if (target_socket->is_available()) {
+ state.requires_single |= field_state_by_socket_id[target_socket->id()].requires_single;
+ }
+ }
+
+ if (state.requires_single) {
+ bool any_input_is_field_implicitly = false;
+ const Vector<const InputSocketRef *> connected_inputs = gather_input_socket_dependencies(
+ field_dependency, *node);
+ for (const InputSocketRef *input_socket : connected_inputs) {
+ if (!input_socket->is_available()) {
+ continue;
+ }
+ if (inferencing_interface.inputs[input_socket->index()] ==
+ InputSocketFieldType::Implicit) {
+ if (!input_socket->is_logically_linked()) {
+ any_input_is_field_implicitly = true;
+ break;
+ }
+ }
+ }
+ if (any_input_is_field_implicitly) {
+ /* This output isn't a single value actually. */
+ state.requires_single = false;
+ }
+ else {
+ /* If the output is required to be a single value, the connected inputs in the same node
+ * must not be fields as well. */
+ for (const InputSocketRef *input_socket : connected_inputs) {
+ field_state_by_socket_id[input_socket->id()].requires_single = true;
+ }
+ }
+ }
+ }
+
+ /* Some inputs do not require fields independent of what the outputs are connected to. */
+ for (const InputSocketRef *input_socket : node->inputs()) {
+ SocketFieldState &state = field_state_by_socket_id[input_socket->id()];
+ if (inferencing_interface.inputs[input_socket->index()] == InputSocketFieldType::None) {
+ state.requires_single = true;
+ state.is_always_single = true;
+ }
+ }
+ }
+}
+
+static void determine_group_input_states(
+ const NodeTreeRef &tree,
+ FieldInferencingInterface &new_inferencing_interface,
+ const MutableSpan<SocketFieldState> field_state_by_socket_id)
+{
+ {
+ /* Non-field inputs never support fields. */
+ int index;
+ LISTBASE_FOREACH_INDEX (bNodeSocket *, group_input, &tree.btree()->inputs, index) {
+ if (!is_field_socket_type((eNodeSocketDatatype)group_input->type)) {
+ new_inferencing_interface.inputs[index] = InputSocketFieldType::None;
+ }
+ }
+ }
+ /* Check if group inputs are required to be single values, because they are (indirectly)
+ * connected to some socket that does not support fields. */
+ for (const NodeRef *node : tree.nodes_by_type("NodeGroupInput")) {
+ for (const OutputSocketRef *output_socket : node->outputs().drop_back(1)) {
+ SocketFieldState &state = field_state_by_socket_id[output_socket->id()];
+ if (state.requires_single) {
+ new_inferencing_interface.inputs[output_socket->index()] = InputSocketFieldType::None;
+ }
+ }
+ }
+ /* If an input does not support fields, this should be reflected in all Group Input nodes. */
+ for (const NodeRef *node : tree.nodes_by_type("NodeGroupInput")) {
+ for (const OutputSocketRef *output_socket : node->outputs().drop_back(1)) {
+ SocketFieldState &state = field_state_by_socket_id[output_socket->id()];
+ const bool supports_field = new_inferencing_interface.inputs[output_socket->index()] !=
+ InputSocketFieldType::None;
+ if (supports_field) {
+ state.is_single = false;
+ state.is_field_source = true;
+ }
+ else {
+ state.requires_single = true;
+ }
+ }
+ SocketFieldState &dummy_socket_state = field_state_by_socket_id[node->outputs().last()->id()];
+ dummy_socket_state.requires_single = true;
+ }
+}
+
+static void propagate_field_status_from_left_to_right(
+ const NodeTreeRef &tree, const MutableSpan<SocketFieldState> field_state_by_socket_id)
+{
+ const NodeTreeRef::ToposortResult toposort_result = tree.toposort(
+ NodeTreeRef::ToposortDirection::LeftToRight);
+
+ for (const NodeRef *node : toposort_result.sorted_nodes) {
+ if (node->is_group_input_node()) {
+ continue;
+ }
+
+ const FieldInferencingInterface inferencing_interface = get_node_field_inferencing_interface(
+ *node);
+
+ /* Update field state of input sockets, also taking into account linked origin sockets. */
+ for (const InputSocketRef *input_socket : node->inputs()) {
+ SocketFieldState &state = field_state_by_socket_id[input_socket->id()];
+ if (state.is_always_single) {
+ state.is_single = true;
+ continue;
+ }
+ state.is_single = true;
+ if (input_socket->directly_linked_sockets().is_empty()) {
+ if (inferencing_interface.inputs[input_socket->index()] ==
+ InputSocketFieldType::Implicit) {
+ state.is_single = false;
+ }
+ }
+ else {
+ for (const OutputSocketRef *origin_socket : input_socket->directly_linked_sockets()) {
+ if (!field_state_by_socket_id[origin_socket->id()].is_single) {
+ state.is_single = false;
+ break;
+ }
+ }
+ }
+ }
+
+ /* Update field state of output sockets, also taking into account input sockets. */
+ for (const OutputSocketRef *output_socket : node->outputs()) {
+ SocketFieldState &state = field_state_by_socket_id[output_socket->id()];
+ const OutputFieldDependency &field_dependency =
+ inferencing_interface.outputs[output_socket->index()];
+
+ switch (field_dependency.field_type()) {
+ case OutputSocketFieldType::None: {
+ state.is_single = true;
+ break;
+ }
+ case OutputSocketFieldType::FieldSource: {
+ state.is_single = false;
+ state.is_field_source = true;
+ break;
+ }
+ case OutputSocketFieldType::PartiallyDependent:
+ case OutputSocketFieldType::DependentField: {
+ for (const InputSocketRef *input_socket :
+ gather_input_socket_dependencies(field_dependency, *node)) {
+ if (!input_socket->is_available()) {
+ continue;
+ }
+ if (!field_state_by_socket_id[input_socket->id()].is_single) {
+ state.is_single = false;
+ break;
+ }
+ }
+ break;
+ }
+ }
+ }
+ }
+}
+
+static void determine_group_output_states(const NodeTreeRef &tree,
+ FieldInferencingInterface &new_inferencing_interface,
+ const Span<SocketFieldState> field_state_by_socket_id)
+{
+ for (const NodeRef *group_output_node : tree.nodes_by_type("NodeGroupOutput")) {
+ /* Ignore inactive group output nodes. */
+ if (!(group_output_node->bnode()->flag & NODE_DO_OUTPUT)) {
+ continue;
+ }
+ /* Determine dependencies of all group outputs. */
+ for (const InputSocketRef *group_output_socket : group_output_node->inputs().drop_back(1)) {
+ OutputFieldDependency field_dependency = find_group_output_dependencies(
+ *group_output_socket, field_state_by_socket_id);
+ new_inferencing_interface.outputs[group_output_socket->index()] = std::move(
+ field_dependency);
+ }
+ break;
+ }
+}
+
+static void update_socket_shapes(const NodeTreeRef &tree,
+ const Span<SocketFieldState> field_state_by_socket_id)
+{
+ const eNodeSocketDisplayShape requires_data_shape = SOCK_DISPLAY_SHAPE_CIRCLE;
+ const eNodeSocketDisplayShape data_but_can_be_field_shape = SOCK_DISPLAY_SHAPE_DIAMOND_DOT;
+ const eNodeSocketDisplayShape is_field_shape = SOCK_DISPLAY_SHAPE_DIAMOND;
+
+ auto get_shape_for_state = [&](const SocketFieldState &state) {
+ if (state.is_always_single) {
+ return requires_data_shape;
+ }
+ if (!state.is_single) {
+ return is_field_shape;
+ }
+ if (state.requires_single) {
+ return requires_data_shape;
+ }
+ return data_but_can_be_field_shape;
+ };
+
+ for (const InputSocketRef *socket : tree.input_sockets()) {
+ bNodeSocket *bsocket = socket->bsocket();
+ const SocketFieldState &state = field_state_by_socket_id[socket->id()];
+ bsocket->display_shape = get_shape_for_state(state);
+ }
+ for (const OutputSocketRef *socket : tree.output_sockets()) {
+ bNodeSocket *bsocket = socket->bsocket();
+ const SocketFieldState &state = field_state_by_socket_id[socket->id()];
+ bsocket->display_shape = get_shape_for_state(state);
+ }
+}
+
+static bool update_field_inferencing(const NodeTreeRef &tree)
+{
+ bNodeTree &btree = *tree.btree();
+
+ /* Create new inferencing interface for this node group. */
+ FieldInferencingInterface *new_inferencing_interface = new FieldInferencingInterface();
+ new_inferencing_interface->inputs.resize(BLI_listbase_count(&btree.inputs),
+ InputSocketFieldType::IsSupported);
+ new_inferencing_interface->outputs.resize(BLI_listbase_count(&btree.outputs),
+ OutputFieldDependency::ForDataSource());
+
+ /* Keep track of the state of all sockets. The index into this array is #SocketRef::id(). */
+ Array<SocketFieldState> field_state_by_socket_id(tree.sockets().size());
+
+ propagate_data_requirements_from_right_to_left(tree, field_state_by_socket_id);
+ determine_group_input_states(tree, *new_inferencing_interface, field_state_by_socket_id);
+ propagate_field_status_from_left_to_right(tree, field_state_by_socket_id);
+ determine_group_output_states(tree, *new_inferencing_interface, field_state_by_socket_id);
+ update_socket_shapes(tree, field_state_by_socket_id);
+
+ /* Update the previous group interface. */
+ const bool group_interface_changed = btree.field_inferencing_interface == nullptr ||
+ *btree.field_inferencing_interface !=
+ *new_inferencing_interface;
+ delete btree.field_inferencing_interface;
+ btree.field_inferencing_interface = new_inferencing_interface;
+
+ return group_interface_changed;
+}
+
+} // namespace node_field_inferencing
+
+/**
+ * Common datatype priorities, works for compositor, shader and texture nodes alike
+ * defines priority of datatype connection based on output type (to):
+ * `< 0`: never connect these types.
+ * `>= 0`: priority of connection (higher values chosen first).
+ */
+static int get_internal_link_type_priority(const bNodeSocketType *from, const bNodeSocketType *to)
+{
+ switch (to->type) {
+ case SOCK_RGBA:
+ switch (from->type) {
+ case SOCK_RGBA:
+ return 4;
+ case SOCK_FLOAT:
+ return 3;
+ case SOCK_INT:
+ return 2;
+ case SOCK_BOOLEAN:
+ return 1;
+ }
+ return -1;
+ case SOCK_VECTOR:
+ switch (from->type) {
+ case SOCK_VECTOR:
+ return 4;
+ case SOCK_FLOAT:
+ return 3;
+ case SOCK_INT:
+ return 2;
+ case SOCK_BOOLEAN:
+ return 1;
+ }
+ return -1;
+ case SOCK_FLOAT:
+ switch (from->type) {
+ case SOCK_FLOAT:
+ return 5;
+ case SOCK_INT:
+ return 4;
+ case SOCK_BOOLEAN:
+ return 3;
+ case SOCK_RGBA:
+ return 2;
+ case SOCK_VECTOR:
+ return 1;
+ }
+ return -1;
+ case SOCK_INT:
+ switch (from->type) {
+ case SOCK_INT:
+ return 5;
+ case SOCK_FLOAT:
+ return 4;
+ case SOCK_BOOLEAN:
+ return 3;
+ case SOCK_RGBA:
+ return 2;
+ case SOCK_VECTOR:
+ return 1;
+ }
+ return -1;
+ case SOCK_BOOLEAN:
+ switch (from->type) {
+ case SOCK_BOOLEAN:
+ return 5;
+ case SOCK_INT:
+ return 4;
+ case SOCK_FLOAT:
+ return 3;
+ case SOCK_RGBA:
+ return 2;
+ case SOCK_VECTOR:
+ return 1;
+ }
+ return -1;
+ }
+
+ /* The rest of the socket types only allow an internal link if both the input and output socket
+ * have the same type. If the sockets are custom, we check the idname instead. */
+ if (to->type == from->type && (to->type != SOCK_CUSTOM || STREQ(to->idname, from->idname))) {
+ return 1;
+ }
+
+ return -1;
+}
+
+using TreeNodePair = std::pair<bNodeTree *, bNode *>;
+using ObjectModifierPair = std::pair<Object *, ModifierData *>;
+using NodeSocketPair = std::pair<bNode *, bNodeSocket *>;
+
+/**
+ * Cache common data about node trees from the #Main database that is expensive to retrieve on
+ * demand every time.
+ */
+struct NodeTreeRelations {
+ private:
+ Main *bmain_;
+ std::optional<Vector<bNodeTree *>> all_trees_;
+ std::optional<Map<bNodeTree *, ID *>> owner_ids_;
+ std::optional<MultiValueMap<bNodeTree *, TreeNodePair>> group_node_users_;
+ std::optional<MultiValueMap<bNodeTree *, ObjectModifierPair>> modifiers_users_;
+
+ public:
+ NodeTreeRelations(Main *bmain) : bmain_(bmain)
+ {
+ }
+
+ void ensure_all_trees()
+ {
+ if (all_trees_.has_value()) {
+ return;
+ }
+ all_trees_.emplace();
+ owner_ids_.emplace();
+ if (bmain_ == nullptr) {
+ return;
+ }
+
+ FOREACH_NODETREE_BEGIN (bmain_, ntree, id) {
+ all_trees_->append(ntree);
+ if (&ntree->id != id) {
+ owner_ids_->add_new(ntree, id);
+ }
+ }
+ FOREACH_NODETREE_END;
+ }
+
+ void ensure_owner_ids()
+ {
+ this->ensure_all_trees();
+ }
+
+ void ensure_group_node_users()
+ {
+ if (group_node_users_.has_value()) {
+ return;
+ }
+ group_node_users_.emplace();
+ if (bmain_ == nullptr) {
+ return;
+ }
+
+ this->ensure_all_trees();
+
+ for (bNodeTree *ntree : *all_trees_) {
+ LISTBASE_FOREACH (bNode *, node, &ntree->nodes) {
+ if (node->id == nullptr) {
+ continue;
+ }
+ ID *id = node->id;
+ if (GS(id->name) == ID_NT) {
+ bNodeTree *group = (bNodeTree *)id;
+ group_node_users_->add(group, {ntree, node});
+ }
+ }
+ }
+ }
+
+ void ensure_modifier_users()
+ {
+ if (modifiers_users_.has_value()) {
+ return;
+ }
+ modifiers_users_.emplace();
+ if (bmain_ == nullptr) {
+ return;
+ }
+
+ LISTBASE_FOREACH (Object *, object, &bmain_->objects) {
+ LISTBASE_FOREACH (ModifierData *, md, &object->modifiers) {
+ if (md->type == eModifierType_Nodes) {
+ NodesModifierData *nmd = (NodesModifierData *)md;
+ if (nmd->node_group != nullptr) {
+ modifiers_users_->add(nmd->node_group, {object, md});
+ }
+ }
+ }
+ }
+ }
+
+ Span<ObjectModifierPair> get_modifier_users(bNodeTree *ntree)
+ {
+ BLI_assert(modifiers_users_.has_value());
+ return modifiers_users_->lookup(ntree);
+ }
+
+ Span<TreeNodePair> get_group_node_users(bNodeTree *ntree)
+ {
+ BLI_assert(group_node_users_.has_value());
+ return group_node_users_->lookup(ntree);
+ }
+
+ ID *get_owner_id(bNodeTree *ntree)
+ {
+ BLI_assert(owner_ids_.has_value());
+ return owner_ids_->lookup_default(ntree, &ntree->id);
+ }
+};
+
+struct TreeUpdateResult {
+ bool interface_changed = false;
+ bool output_changed = false;
+};
+
+class NodeTreeMainUpdater {
+ private:
+ Main *bmain_;
+ NodeTreeUpdateExtraParams *params_;
+ Map<bNodeTree *, TreeUpdateResult> update_result_by_tree_;
+ NodeTreeRelations relations_;
+
+ public:
+ NodeTreeMainUpdater(Main *bmain, NodeTreeUpdateExtraParams *params)
+ : bmain_(bmain), params_(params), relations_(bmain)
+ {
+ }
+
+ void update()
+ {
+ Vector<bNodeTree *> changed_ntrees;
+ FOREACH_NODETREE_BEGIN (bmain_, ntree, id) {
+ if (ntree->changed_flag != NTREE_CHANGED_NOTHING) {
+ changed_ntrees.append(ntree);
+ }
+ }
+ FOREACH_NODETREE_END;
+ this->update_rooted(changed_ntrees);
+ }
+
+ void update_rooted(Span<bNodeTree *> root_ntrees)
+ {
+ if (root_ntrees.is_empty()) {
+ return;
+ }
+
+ bool is_single_tree_update = false;
+
+ if (root_ntrees.size() == 1) {
+ bNodeTree *ntree = root_ntrees[0];
+ if (ntree->changed_flag == NTREE_CHANGED_NOTHING) {
+ return;
+ }
+ const TreeUpdateResult result = this->update_tree(*ntree);
+ update_result_by_tree_.add_new(ntree, result);
+ if (!result.interface_changed && !result.output_changed) {
+ is_single_tree_update = true;
+ }
+ }
+
+ if (!is_single_tree_update) {
+ Vector<bNodeTree *> ntrees_in_order = this->get_tree_update_order(root_ntrees);
+ for (bNodeTree *ntree : ntrees_in_order) {
+ if (ntree->changed_flag == NTREE_CHANGED_NOTHING) {
+ continue;
+ }
+ if (!update_result_by_tree_.contains(ntree)) {
+ const TreeUpdateResult result = this->update_tree(*ntree);
+ update_result_by_tree_.add_new(ntree, result);
+ }
+ const TreeUpdateResult result = update_result_by_tree_.lookup(ntree);
+ Span<TreeNodePair> dependent_trees = relations_.get_group_node_users(ntree);
+ if (result.output_changed) {
+ for (const TreeNodePair &pair : dependent_trees) {
+ add_node_tag(pair.first, pair.second, NTREE_CHANGED_NODE_OUTPUT);
+ }
+ }
+ if (result.interface_changed) {
+ for (const TreeNodePair &pair : dependent_trees) {
+ add_node_tag(pair.first, pair.second, NTREE_CHANGED_NODE_PROPERTY);
+ }
+ }
+ }
+ }
+
+ for (const auto item : update_result_by_tree_.items()) {
+ bNodeTree *ntree = item.key;
+ const TreeUpdateResult &result = item.value;
+
+ this->reset_changed_flags(*ntree);
+
+ if (result.interface_changed) {
+ if (ntree->type == NTREE_GEOMETRY) {
+ relations_.ensure_modifier_users();
+ for (const ObjectModifierPair &pair : relations_.get_modifier_users(ntree)) {
+ Object *object = pair.first;
+ ModifierData *md = pair.second;
+
+ if (md->type == eModifierType_Nodes) {
+ MOD_nodes_update_interface(object, (NodesModifierData *)md);
+ }
+ }
+ }
+ }
+
+ if (params_) {
+ relations_.ensure_owner_ids();
+ ID *id = relations_.get_owner_id(ntree);
+ if (params_->tree_changed_fn) {
+ params_->tree_changed_fn(id, ntree, params_->user_data);
+ }
+ if (params_->tree_output_changed_fn && result.output_changed) {
+ params_->tree_output_changed_fn(id, ntree, params_->user_data);
+ }
+ }
+ }
+ }
+
+ private:
+ enum class ToposortMark {
+ None,
+ Temporary,
+ Permanent,
+ };
+
+ using ToposortMarkMap = Map<bNodeTree *, ToposortMark>;
+
+ /**
+ * Finds all trees that depend on the given trees (through node groups). Then those trees are
+ * ordered such that all trees used by one tree come before it.
+ */
+ Vector<bNodeTree *> get_tree_update_order(Span<bNodeTree *> root_ntrees)
+ {
+ relations_.ensure_group_node_users();
+
+ Set<bNodeTree *> trees_to_update = get_trees_to_update(root_ntrees);
+
+ Vector<bNodeTree *> sorted_ntrees;
+
+ ToposortMarkMap marks;
+ for (bNodeTree *ntree : trees_to_update) {
+ marks.add_new(ntree, ToposortMark::None);
+ }
+ for (bNodeTree *ntree : trees_to_update) {
+ if (marks.lookup(ntree) == ToposortMark::None) {
+ const bool cycle_detected = !this->get_tree_update_order__visit_recursive(
+ ntree, marks, sorted_ntrees);
+ /* This should be prevented by higher level operators. */
+ BLI_assert(!cycle_detected);
+ UNUSED_VARS_NDEBUG(cycle_detected);
+ }
+ }
+
+ std::reverse(sorted_ntrees.begin(), sorted_ntrees.end());
+
+ return sorted_ntrees;
+ }
+
+ bool get_tree_update_order__visit_recursive(bNodeTree *ntree,
+ ToposortMarkMap &marks,
+ Vector<bNodeTree *> &sorted_ntrees)
+ {
+ ToposortMark &mark = marks.lookup(ntree);
+ if (mark == ToposortMark::Permanent) {
+ return true;
+ }
+ if (mark == ToposortMark::Temporary) {
+ /* There is a dependency cycle. */
+ return false;
+ }
+
+ mark = ToposortMark::Temporary;
+
+ for (const TreeNodePair &pair : relations_.get_group_node_users(ntree)) {
+ this->get_tree_update_order__visit_recursive(pair.first, marks, sorted_ntrees);
+ }
+ sorted_ntrees.append(ntree);
+
+ mark = ToposortMark::Permanent;
+ return true;
+ }
+
+ Set<bNodeTree *> get_trees_to_update(Span<bNodeTree *> root_ntrees)
+ {
+ relations_.ensure_group_node_users();
+
+ Set<bNodeTree *> reachable_trees;
+ VectorSet<bNodeTree *> trees_to_check = root_ntrees;
+
+ while (!trees_to_check.is_empty()) {
+ bNodeTree *ntree = trees_to_check.pop();
+ if (reachable_trees.add(ntree)) {
+ for (const TreeNodePair &pair : relations_.get_group_node_users(ntree)) {
+ trees_to_check.add(pair.first);
+ }
+ }
+ }
+
+ return reachable_trees;
+ }
+
+ TreeUpdateResult update_tree(bNodeTree &ntree)
+ {
+ TreeUpdateResult result;
+
+ /* Use a #NodeTreeRef to speedup certain queries. It is rebuilt whenever the node tree topology
+ * changes, which typically happens zero or one times during the entire update of the node
+ * tree. */
+ std::unique_ptr<NodeTreeRef> tree_ref;
+ this->ensure_tree_ref(ntree, tree_ref);
+
+ this->update_socket_link_and_use(*tree_ref);
+ this->update_individual_nodes(ntree, tree_ref);
+ this->update_internal_links(ntree, tree_ref);
+ this->update_generic_callback(ntree, tree_ref);
+ this->remove_unused_previews_when_necessary(ntree);
+
+ this->ensure_tree_ref(ntree, tree_ref);
+ if (ntree.type == NTREE_GEOMETRY) {
+ if (node_field_inferencing::update_field_inferencing(*tree_ref)) {
+ result.interface_changed = true;
+ }
+ }
+
+ result.output_changed = this->check_if_output_changed(*tree_ref);
+
+ this->update_socket_link_and_use(*tree_ref);
+ this->update_node_levels(ntree);
+ this->update_link_validation(ntree);
+
+ if (ntree.type == NTREE_TEXTURE) {
+ ntreeTexCheckCyclics(&ntree);
+ }
+
+ if (ntree.changed_flag & NTREE_CHANGED_INTERFACE || ntree.changed_flag & NTREE_CHANGED_ANY) {
+ result.interface_changed = true;
+ }
+
+ if (result.interface_changed) {
+ ntreeInterfaceTypeUpdate(&ntree);
+ }
+
+ return result;
+ }
+
+ void ensure_tree_ref(bNodeTree &ntree, std::unique_ptr<NodeTreeRef> &tree_ref)
+ {
+ if (!tree_ref) {
+ tree_ref = std::make_unique<NodeTreeRef>(&ntree);
+ }
+ }
+
+ void update_socket_link_and_use(const NodeTreeRef &tree)
+ {
+ for (const InputSocketRef *socket : tree.input_sockets()) {
+ bNodeSocket *bsocket = socket->bsocket();
+ if (socket->directly_linked_links().is_empty()) {
+ bsocket->link = nullptr;
+ }
+ else {
+ bsocket->link = socket->directly_linked_links()[0]->blink();
+ }
+ }
+
+ this->update_socket_used_tags(tree);
+ }
+
+ void update_socket_used_tags(const NodeTreeRef &tree)
+ {
+ for (const SocketRef *socket : tree.sockets()) {
+ bNodeSocket *bsocket = socket->bsocket();
+ bsocket->flag &= ~SOCK_IN_USE;
+ for (const LinkRef *link : socket->directly_linked_links()) {
+ if (!link->is_muted()) {
+ bsocket->flag |= SOCK_IN_USE;
+ break;
+ }
+ }
+ }
+ }
+
+ void update_individual_nodes(bNodeTree &ntree, std::unique_ptr<NodeTreeRef> &tree_ref)
+ {
+ /* Iterate over nodes instead of #NodeTreeRef, because the #tree_ref might be outdated after
+ * some update functions. */
+ LISTBASE_FOREACH (bNode *, bnode, &ntree.nodes) {
+ this->ensure_tree_ref(ntree, tree_ref);
+ const NodeRef &node = *tree_ref->find_node(*bnode);
+ if (this->should_update_individual_node(node)) {
+ const uint32_t old_changed_flag = ntree.changed_flag;
+ ntree.changed_flag = NTREE_CHANGED_NOTHING;
+
+ /* This may set #ntree.changed_flag which is detected below. */
+ this->update_individual_node(node);
+
+ if (ntree.changed_flag != NTREE_CHANGED_NOTHING) {
+ /* The tree ref is outdated and needs to be rebuilt. Generally, only very few update
+ * functions change the node. Typically zero or one nodes change after an update. */
+ tree_ref.reset();
+ }
+ ntree.changed_flag |= old_changed_flag;
+ }
+ }
+ }
+
+ bool should_update_individual_node(const NodeRef &node)
+ {
+ bNodeTree &ntree = *node.btree();
+ bNode &bnode = *node.bnode();
+ if (ntree.changed_flag & NTREE_CHANGED_ANY) {
+ return true;
+ }
+ if (bnode.changed_flag & NTREE_CHANGED_NODE_PROPERTY) {
+ return true;
+ }
+ if (ntree.changed_flag & NTREE_CHANGED_LINK) {
+ /* Node groups currently always rebuilt their sockets when they are updated.
+ * So avoid calling the update method when no new link was added to it. */
+ if (node.is_group_input_node()) {
+ if (node.outputs().last()->is_directly_linked()) {
+ return true;
+ }
+ }
+ else if (node.is_group_output_node()) {
+ if (node.inputs().last()->is_directly_linked()) {
+ return true;
+ }
+ }
+ else {
+ /* Currently we have no way to tell if a node needs to be updated when a link changed. */
+ return true;
+ }
+ }
+ if (ntree.changed_flag & NTREE_CHANGED_INTERFACE) {
+ if (node.is_group_input_node() || node.is_group_output_node()) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ void update_individual_node(const NodeRef &node)
+ {
+ bNodeTree &ntree = *node.btree();
+ bNode &bnode = *node.bnode();
+ bNodeType &ntype = *bnode.typeinfo;
+ if (ntype.group_update_func) {
+ ntype.group_update_func(&ntree, &bnode);
+ }
+ if (ntype.updatefunc) {
+ ntype.updatefunc(&ntree, &bnode);
+ }
+ }
+
+ void update_internal_links(bNodeTree &ntree, std::unique_ptr<NodeTreeRef> &tree_ref)
+ {
+ bool any_internal_links_updated = false;
+ this->ensure_tree_ref(ntree, tree_ref);
+ for (const NodeRef *node : tree_ref->nodes()) {
+ if (!this->should_update_individual_node(*node)) {
+ continue;
+ }
+ /* Find all expected internal links. */
+ Vector<std::pair<bNodeSocket *, bNodeSocket *>> expected_internal_links;
+ for (const OutputSocketRef *output_socket : node->outputs()) {
+ if (!output_socket->is_available()) {
+ continue;
+ }
+ if (!output_socket->is_directly_linked()) {
+ continue;
+ }
+ if (output_socket->bsocket()->flag & SOCK_NO_INTERNAL_LINK) {
+ continue;
+ }
+ const InputSocketRef *input_socket = this->find_internally_linked_input(output_socket);
+ if (input_socket != nullptr) {
+ expected_internal_links.append({input_socket->bsocket(), output_socket->bsocket()});
+ }
+ }
+ /* rebuilt internal links if they have changed. */
+ if (node->internal_links().size() != expected_internal_links.size()) {
+ this->update_internal_links_in_node(ntree, *node->bnode(), expected_internal_links);
+ any_internal_links_updated = true;
+ }
+ else {
+ for (auto &item : expected_internal_links) {
+ const bNodeSocket *from_socket = item.first;
+ const bNodeSocket *to_socket = item.second;
+ bool found = false;
+ for (const InternalLinkRef *internal_link : node->internal_links()) {
+ if (from_socket == internal_link->from().bsocket() &&
+ to_socket == internal_link->to().bsocket()) {
+ found = true;
+ }
+ }
+ if (!found) {
+ this->update_internal_links_in_node(ntree, *node->bnode(), expected_internal_links);
+ any_internal_links_updated = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (any_internal_links_updated) {
+ tree_ref.reset();
+ }
+ }
+
+ const InputSocketRef *find_internally_linked_input(const OutputSocketRef *output_socket)
+ {
+ const InputSocketRef *selected_socket = nullptr;
+ int selected_priority = -1;
+ bool selected_is_linked = false;
+ for (const InputSocketRef *input_socket : output_socket->node().inputs()) {
+ if (!input_socket->is_available()) {
+ continue;
+ }
+ if (input_socket->bsocket()->flag & SOCK_NO_INTERNAL_LINK) {
+ continue;
+ }
+ const int priority = get_internal_link_type_priority(input_socket->bsocket()->typeinfo,
+ output_socket->bsocket()->typeinfo);
+ if (priority < 0) {
+ continue;
+ }
+ const bool is_linked = input_socket->is_directly_linked();
+ const bool is_preferred = priority > selected_priority || (is_linked && !selected_is_linked);
+ if (!is_preferred) {
+ continue;
+ }
+ selected_socket = input_socket;
+ selected_priority = priority;
+ selected_is_linked = is_linked;
+ }
+ return selected_socket;
+ }
+
+ void update_internal_links_in_node(bNodeTree &ntree,
+ bNode &node,
+ Span<std::pair<bNodeSocket *, bNodeSocket *>> links)
+ {
+ BLI_freelistN(&node.internal_links);
+ for (const auto &item : links) {
+ bNodeSocket *from_socket = item.first;
+ bNodeSocket *to_socket = item.second;
+ bNodeLink *link = MEM_cnew<bNodeLink>(__func__);
+ link->fromnode = &node;
+ link->fromsock = from_socket;
+ link->tonode = &node;
+ link->tosock = to_socket;
+ link->flag |= NODE_LINK_VALID;
+ BLI_addtail(&node.internal_links, link);
+ }
+ BKE_ntree_update_tag_node_internal_link(&ntree, &node);
+ }
+
+ void update_generic_callback(bNodeTree &ntree, std::unique_ptr<NodeTreeRef> &tree_ref)
+ {
+ if (ntree.typeinfo->update == nullptr) {
+ return;
+ }
+
+ /* Reset the changed_flag to allow detecting when the update callback changed the node tree. */
+ const uint32_t old_changed_flag = ntree.changed_flag;
+ ntree.changed_flag = NTREE_CHANGED_NOTHING;
+
+ ntree.typeinfo->update(&ntree);
+
+ if (ntree.changed_flag != NTREE_CHANGED_NOTHING) {
+ /* The tree ref is outdated and needs to be rebuilt. */
+ tree_ref.reset();
+ }
+ ntree.changed_flag |= old_changed_flag;
+ }
+
+ void remove_unused_previews_when_necessary(bNodeTree &ntree)
+ {
+ /* Don't trigger preview removal when only those flags are set. */
+ const uint32_t allowed_flags = NTREE_CHANGED_LINK | NTREE_CHANGED_SOCKET_PROPERTY |
+ NTREE_CHANGED_NODE_PROPERTY | NTREE_CHANGED_NODE_OUTPUT |
+ NTREE_CHANGED_INTERFACE;
+ if ((ntree.changed_flag & allowed_flags) == ntree.changed_flag) {
+ return;
+ }
+ BKE_node_preview_remove_unused(&ntree);
+ }
+
+ void update_node_levels(bNodeTree &ntree)
+ {
+ ntreeUpdateNodeLevels(&ntree);
+ }
+
+ void update_link_validation(bNodeTree &ntree)
+ {
+ LISTBASE_FOREACH (bNodeLink *, link, &ntree.links) {
+ link->flag |= NODE_LINK_VALID;
+ if (link->fromnode && link->tonode && link->fromnode->level <= link->tonode->level) {
+ link->flag &= ~NODE_LINK_VALID;
+ }
+ else if (ntree.typeinfo->validate_link) {
+ const eNodeSocketDatatype from_type = static_cast<eNodeSocketDatatype>(
+ link->fromsock->type);
+ const eNodeSocketDatatype to_type = static_cast<eNodeSocketDatatype>(link->tosock->type);
+ if (!ntree.typeinfo->validate_link(from_type, to_type)) {
+ link->flag &= ~NODE_LINK_VALID;
+ }
+ }
+ }
+ }
+
+ bool check_if_output_changed(const NodeTreeRef &tree)
+ {
+ bNodeTree &btree = *tree.btree();
+
+ /* Compute a hash that represents the node topology connected to the output. This always has to
+ * be updated even if it is not used to detect changes right now. Otherwise
+ * #btree.output_topology_hash will go out of date. */
+ const Vector<const SocketRef *> tree_output_sockets = this->find_output_sockets(tree);
+ const uint32_t old_topology_hash = btree.output_topology_hash;
+ const uint32_t new_topology_hash = this->get_combined_socket_topology_hash(
+ tree, tree_output_sockets);
+ btree.output_topology_hash = new_topology_hash;
+
+ if (const AnimData *adt = BKE_animdata_from_id(&btree.id)) {
+ /* Drivers may copy values in the node tree around arbitrarily and may cause the output to
+ * change even if it wouldn't without drivers. Only some special drivers like `frame/5` can
+ * be used without causing updates all the time currently. In the future we could try to
+ * handle other drivers better as well.
+ * Note that this optimization only works in practice when the depsgraph didn't also get a
+ * copy-on-write tag for the node tree (which happens when changing node properties). It does
+ * work in a few situations like adding reroutes and duplicating nodes though. */
+ LISTBASE_FOREACH (const FCurve *, fcurve, &adt->drivers) {
+ const ChannelDriver *driver = fcurve->driver;
+ const StringRef expression = driver->expression;
+ if (expression.startswith("frame")) {
+ const StringRef remaining_expression = expression.drop_known_prefix("frame");
+ if (remaining_expression.find_first_not_of(" */+-0123456789.") == StringRef::not_found) {
+ continue;
+ }
+ }
+ /* Unrecognized driver, assume that the output always changes. */
+ return true;
+ }
+ }
+
+ if (btree.changed_flag & NTREE_CHANGED_ANY) {
+ return true;
+ }
+
+ if (old_topology_hash != new_topology_hash) {
+ return true;
+ }
+
+ /* The topology hash can only be used when only topology-changing operations have been done. */
+ if (btree.changed_flag ==
+ (btree.changed_flag & (NTREE_CHANGED_LINK | NTREE_CHANGED_REMOVED_NODE))) {
+ if (old_topology_hash == new_topology_hash) {
+ return false;
+ }
+ }
+
+ if (!this->check_if_socket_outputs_changed_based_on_flags(tree, tree_output_sockets)) {
+ return false;
+ }
+
+ return true;
+ }
+
+ Vector<const SocketRef *> find_output_sockets(const NodeTreeRef &tree)
+ {
+ Vector<const SocketRef *> sockets;
+ for (const NodeRef *node : tree.nodes()) {
+ const bNode *bnode = node->bnode();
+ if (bnode->typeinfo->nclass != NODE_CLASS_OUTPUT && bnode->type != NODE_GROUP_OUTPUT) {
+ continue;
+ }
+ for (const InputSocketRef *socket : node->inputs()) {
+ if (socket->idname() != "NodeSocketVirtual") {
+ sockets.append(socket);
+ }
+ }
+ }
+ return sockets;
+ }
+
+ /**
+ * Computes a hash that changes when the node tree topology connected to an output node changes.
+ * Adding reroutes does not have an effect on the hash.
+ */
+ uint32_t get_combined_socket_topology_hash(const NodeTreeRef &tree,
+ Span<const SocketRef *> sockets)
+ {
+ if (tree.has_link_cycles()) {
+ /* Return dummy value when the link has any cycles. The algorithm below could be improved to
+ * handle cycles more gracefully. */
+ return 0;
+ }
+ Array<uint32_t> hashes = this->get_socket_topology_hashes(tree, sockets);
+ uint32_t combined_hash = 0;
+ for (uint32_t hash : hashes) {
+ combined_hash = noise::hash(combined_hash, hash);
+ }
+ return combined_hash;
+ }
+
+ Array<uint32_t> get_socket_topology_hashes(const NodeTreeRef &tree,
+ Span<const SocketRef *> sockets)
+ {
+ BLI_assert(!tree.has_link_cycles());
+ Array<std::optional<uint32_t>> hash_by_socket_id(tree.sockets().size());
+ Stack<const SocketRef *> sockets_to_check = sockets;
+
+ while (!sockets_to_check.is_empty()) {
+ const SocketRef &in_out_socket = *sockets_to_check.peek();
+ const NodeRef &node = in_out_socket.node();
+
+ if (hash_by_socket_id[in_out_socket.id()].has_value()) {
+ sockets_to_check.pop();
+ /* Socket is handled already. */
+ continue;
+ }
+
+ if (in_out_socket.is_input()) {
+ /* For input sockets, first compute the hashes of all linked sockets. */
+ const InputSocketRef &socket = in_out_socket.as_input();
+ bool all_origins_computed = true;
+ for (const OutputSocketRef *origin_socket : socket.logically_linked_sockets()) {
+ if (!hash_by_socket_id[origin_socket->id()].has_value()) {
+ sockets_to_check.push(origin_socket);
+ all_origins_computed = false;
+ }
+ }
+ if (!all_origins_computed) {
+ continue;
+ }
+ /* When the hashes for the linked sockets are ready, combine them into a hash for the input
+ * socket. */
+ const uint64_t socket_ptr = (uintptr_t)socket.bsocket();
+ uint32_t socket_hash = noise::hash(socket_ptr, socket_ptr >> 32);
+ for (const OutputSocketRef *origin_socket : socket.logically_linked_sockets()) {
+ const uint32_t origin_socket_hash = *hash_by_socket_id[origin_socket->id()];
+ socket_hash = noise::hash(socket_hash, origin_socket_hash);
+ }
+ hash_by_socket_id[socket.id()] = socket_hash;
+ sockets_to_check.pop();
+ }
+ else {
+ /* For output sockets, first compute the hashes of all available input sockets. */
+ const OutputSocketRef &socket = in_out_socket.as_output();
+ bool all_available_inputs_computed = true;
+ for (const InputSocketRef *input_socket : node.inputs()) {
+ if (input_socket->is_available()) {
+ if (!hash_by_socket_id[input_socket->id()].has_value()) {
+ sockets_to_check.push(input_socket);
+ all_available_inputs_computed = false;
+ }
+ }
+ }
+ if (!all_available_inputs_computed) {
+ continue;
+ }
+ /* When all input socket hashes have been computed, combine them into a hash for the output
+ * socket. */
+ const uint64_t socket_ptr = (uintptr_t)socket.bsocket();
+ uint32_t socket_hash = noise::hash(socket_ptr, socket_ptr >> 32);
+ for (const InputSocketRef *input_socket : node.inputs()) {
+ if (input_socket->is_available()) {
+ const uint32_t input_socket_hash = *hash_by_socket_id[input_socket->id()];
+ socket_hash = noise::hash(socket_hash, input_socket_hash);
+ }
+ }
+ hash_by_socket_id[socket.id()] = socket_hash;
+ sockets_to_check.pop();
+ }
+ }
+
+ /* Create output array. */
+ Array<uint32_t> hashes(sockets.size());
+ for (const int i : sockets.index_range()) {
+ hashes[i] = *hash_by_socket_id[sockets[i]->id()];
+ }
+ return hashes;
+ }
+
+ /**
+ * Returns true when any of the provided sockets changed its values. A change is detected by
+ * checking the #changed_flag on connected sockets and nodes.
+ */
+ bool check_if_socket_outputs_changed_based_on_flags(const NodeTreeRef &tree,
+ Span<const SocketRef *> sockets)
+ {
+ /* Avoid visiting the same socket twice when multiple links point to the same socket. */
+ Array<bool> pushed_by_socket_id(tree.sockets().size(), false);
+ Stack<const SocketRef *> sockets_to_check = sockets;
+
+ for (const SocketRef *socket : sockets) {
+ pushed_by_socket_id[socket->id()] = true;
+ }
+
+ while (!sockets_to_check.is_empty()) {
+ const SocketRef &in_out_socket = *sockets_to_check.pop();
+ const bNode &bnode = *in_out_socket.node().bnode();
+ const bNodeSocket &bsocket = *in_out_socket.bsocket();
+ if (bsocket.changed_flag != NTREE_CHANGED_NOTHING) {
+ return true;
+ }
+ if (bnode.changed_flag != NTREE_CHANGED_NOTHING) {
+ const bool only_unused_internal_link_changed = (bnode.flag & NODE_MUTED) == 0 &&
+ bnode.changed_flag ==
+ NTREE_CHANGED_INTERNAL_LINK;
+ if (!only_unused_internal_link_changed) {
+ return true;
+ }
+ }
+ if (in_out_socket.is_input()) {
+ const InputSocketRef &socket = in_out_socket.as_input();
+ for (const OutputSocketRef *origin_socket : socket.logically_linked_sockets()) {
+ bool &pushed = pushed_by_socket_id[origin_socket->id()];
+ if (!pushed) {
+ sockets_to_check.push(origin_socket);
+ pushed = true;
+ }
+ }
+ }
+ else {
+ const OutputSocketRef &socket = in_out_socket.as_output();
+ for (const InputSocketRef *input_socket : socket.node().inputs()) {
+ if (input_socket->is_available()) {
+ bool &pushed = pushed_by_socket_id[input_socket->id()];
+ if (!pushed) {
+ sockets_to_check.push(input_socket);
+ pushed = true;
+ }
+ }
+ }
+ }
+ }
+ return false;
+ }
+
+ void reset_changed_flags(bNodeTree &ntree)
+ {
+ ntree.changed_flag = NTREE_CHANGED_NOTHING;
+ LISTBASE_FOREACH (bNode *, node, &ntree.nodes) {
+ node->changed_flag = NTREE_CHANGED_NOTHING;
+ node->update = 0;
+ LISTBASE_FOREACH (bNodeSocket *, socket, &node->inputs) {
+ socket->changed_flag = NTREE_CHANGED_NOTHING;
+ }
+ LISTBASE_FOREACH (bNodeSocket *, socket, &node->outputs) {
+ socket->changed_flag = NTREE_CHANGED_NOTHING;
+ }
+ }
+ }
+};
+
+} // namespace blender::bke
+
+void BKE_ntree_update_tag_all(bNodeTree *ntree)
+{
+ add_tree_tag(ntree, NTREE_CHANGED_ANY);
+}
+
+void BKE_ntree_update_tag_node_property(bNodeTree *ntree, bNode *node)
+{
+ add_node_tag(ntree, node, NTREE_CHANGED_NODE_PROPERTY);
+}
+
+void BKE_ntree_update_tag_node_new(bNodeTree *ntree, bNode *node)
+{
+ add_node_tag(ntree, node, NTREE_CHANGED_NODE_PROPERTY);
+}
+
+void BKE_ntree_update_tag_socket_property(bNodeTree *ntree, bNodeSocket *socket)
+{
+ add_socket_tag(ntree, socket, NTREE_CHANGED_SOCKET_PROPERTY);
+}
+
+void BKE_ntree_update_tag_socket_new(bNodeTree *ntree, bNodeSocket *socket)
+{
+ add_socket_tag(ntree, socket, NTREE_CHANGED_SOCKET_PROPERTY);
+}
+
+void BKE_ntree_update_tag_socket_removed(bNodeTree *ntree)
+{
+ add_tree_tag(ntree, NTREE_CHANGED_REMOVED_SOCKET);
+}
+
+void BKE_ntree_update_tag_socket_type(bNodeTree *ntree, bNodeSocket *socket)
+{
+ add_socket_tag(ntree, socket, NTREE_CHANGED_SOCKET_PROPERTY);
+}
+
+void BKE_ntree_update_tag_socket_availability(bNodeTree *ntree, bNodeSocket *socket)
+{
+ add_socket_tag(ntree, socket, NTREE_CHANGED_SOCKET_PROPERTY);
+}
+
+void BKE_ntree_update_tag_node_removed(bNodeTree *ntree)
+{
+ add_tree_tag(ntree, NTREE_CHANGED_REMOVED_NODE);
+}
+
+void BKE_ntree_update_tag_node_mute(bNodeTree *ntree, bNode *node)
+{
+ add_node_tag(ntree, node, NTREE_CHANGED_NODE_PROPERTY);
+}
+
+void BKE_ntree_update_tag_node_internal_link(bNodeTree *ntree, bNode *node)
+{
+ add_node_tag(ntree, node, NTREE_CHANGED_INTERNAL_LINK);
+}
+
+void BKE_ntree_update_tag_link_changed(bNodeTree *ntree)
+{
+ add_tree_tag(ntree, NTREE_CHANGED_LINK);
+}
+
+void BKE_ntree_update_tag_link_removed(bNodeTree *ntree)
+{
+ add_tree_tag(ntree, NTREE_CHANGED_LINK);
+}
+
+void BKE_ntree_update_tag_link_added(bNodeTree *ntree, bNodeLink *UNUSED(link))
+{
+ add_tree_tag(ntree, NTREE_CHANGED_LINK);
+}
+
+void BKE_ntree_update_tag_link_mute(bNodeTree *ntree, bNodeLink *UNUSED(link))
+{
+ add_tree_tag(ntree, NTREE_CHANGED_LINK);
+}
+
+void BKE_ntree_update_tag_missing_runtime_data(bNodeTree *ntree)
+{
+ add_tree_tag(ntree, NTREE_CHANGED_ALL);
+}
+
+void BKE_ntree_update_tag_interface(bNodeTree *ntree)
+{
+ add_tree_tag(ntree, NTREE_CHANGED_INTERFACE);
+}
+
+void BKE_ntree_update_tag_id_changed(Main *bmain, ID *id)
+{
+ FOREACH_NODETREE_BEGIN (bmain, ntree, ntree_id) {
+ LISTBASE_FOREACH (bNode *, node, &ntree->nodes) {
+ if (node->id == id) {
+ node->update |= NODE_UPDATE_ID;
+ add_node_tag(ntree, node, NTREE_CHANGED_NODE_PROPERTY);
+ }
+ }
+ }
+ FOREACH_NODETREE_END;
+}
+
+/**
+ * Protect from recursive calls into the updating function. Some node update functions might
+ * trigger this from Python or in other cases.
+ *
+ * This could be added to #Main, but given that there is generally only one #Main, that's not
+ * really worth it now.
+ */
+static bool is_updating = false;
+
+void BKE_ntree_update_main(Main *bmain, NodeTreeUpdateExtraParams *params)
+{
+ if (is_updating) {
+ return;
+ }
+
+ is_updating = true;
+ blender::bke::NodeTreeMainUpdater updater{bmain, params};
+ updater.update();
+ is_updating = false;
+}
+
+void BKE_ntree_update_main_tree(Main *bmain, bNodeTree *ntree, NodeTreeUpdateExtraParams *params)
+{
+ if (ntree == nullptr) {
+ BKE_ntree_update_main(bmain, params);
+ return;
+ }
+
+ if (is_updating) {
+ return;
+ }
+
+ is_updating = true;
+ blender::bke::NodeTreeMainUpdater updater{bmain, params};
+ updater.update_rooted({ntree});
+ is_updating = false;
+}
diff --git a/source/blender/blenkernel/intern/object.cc b/source/blender/blenkernel/intern/object.cc
index 7fec91ed65a..e177b1ce29e 100644
--- a/source/blender/blenkernel/intern/object.cc
+++ b/source/blender/blenkernel/intern/object.cc
@@ -87,6 +87,7 @@
#include "BKE_camera.h"
#include "BKE_collection.h"
#include "BKE_constraint.h"
+#include "BKE_crazyspace.h"
#include "BKE_curve.h"
#include "BKE_deform.h"
#include "BKE_displist.h"
@@ -333,30 +334,9 @@ static void object_make_local(Main *bmain, ID *id, const int flags)
Object *ob = (Object *)id;
const bool lib_local = (flags & LIB_ID_MAKELOCAL_FULL_LIBRARY) != 0;
const bool clear_proxy = (flags & LIB_ID_MAKELOCAL_OBJECT_NO_PROXY_CLEARING) == 0;
- bool force_local = (flags & LIB_ID_MAKELOCAL_FORCE_LOCAL) != 0;
- bool force_copy = (flags & LIB_ID_MAKELOCAL_FORCE_COPY) != 0;
- BLI_assert(force_copy == false || force_copy != force_local);
- bool is_local = false, is_lib = false;
-
- /* - only lib users: do nothing (unless force_local is set)
- * - only local users: set flag
- * - mixed: make copy
- * In case we make a whole lib's content local,
- * we always want to localize, and we skip remapping (done later).
- */
-
- if (!force_local && !force_copy) {
- BKE_library_ID_test_usages(bmain, ob, &is_local, &is_lib);
- if (lib_local || is_local) {
- if (!is_lib) {
- force_local = true;
- }
- else {
- force_copy = true;
- }
- }
- }
+ bool force_local, force_copy;
+ BKE_lib_id_make_local_generic_action_define(bmain, id, flags, &force_local, &force_copy);
if (force_local) {
BKE_lib_id_clear_library_data(bmain, &ob->id, flags);
@@ -1773,8 +1753,9 @@ static void object_update_from_subsurf_ccg(Object *object)
if (!object->runtime.is_data_eval_owned) {
return;
}
- /* Object was never evaluated, so can not have CCG subdivision surface. */
- Mesh *mesh_eval = BKE_object_get_evaluated_mesh(object);
+ /* Object was never evaluated, so can not have CCG subdivision surface. If it were evaluated, do
+ * not try to compute OpenSubDiv on the CPU as it is not needed here. */
+ Mesh *mesh_eval = BKE_object_get_evaluated_mesh_no_subsurf(object);
if (mesh_eval == nullptr) {
return;
}
@@ -1866,6 +1847,12 @@ void BKE_object_free_derived_caches(Object *ob)
object_update_from_subsurf_ccg(ob);
+ if (ob->runtime.editmesh_eval_cage &&
+ ob->runtime.editmesh_eval_cage != reinterpret_cast<Mesh *>(ob->runtime.data_eval)) {
+ BKE_mesh_eval_delete(ob->runtime.editmesh_eval_cage);
+ }
+ ob->runtime.editmesh_eval_cage = nullptr;
+
if (ob->runtime.data_eval != nullptr) {
if (ob->runtime.is_data_eval_owned) {
ID *data_eval = ob->runtime.data_eval;
@@ -1895,6 +1882,8 @@ void BKE_object_free_derived_caches(Object *ob)
BKE_object_to_curve_clear(ob);
BKE_object_free_curve_cache(ob);
+ BKE_crazyspace_api_eval_clear(ob);
+
/* Clear grease pencil data. */
if (ob->runtime.gpd_eval != nullptr) {
BKE_gpencil_eval_delete(ob->runtime.gpd_eval);
@@ -1905,6 +1894,8 @@ void BKE_object_free_derived_caches(Object *ob)
BKE_geometry_set_free(ob->runtime.geometry_set_eval);
ob->runtime.geometry_set_eval = nullptr;
}
+
+ MEM_SAFE_FREE(ob->runtime.editmesh_bb_cage);
}
void BKE_object_free_caches(Object *object)
@@ -3410,7 +3401,8 @@ static void give_parvert(Object *par, int nr, float vec[3])
if (par->type == OB_MESH) {
Mesh *me = (Mesh *)par->data;
BMEditMesh *em = me->edit_mesh;
- Mesh *me_eval = (em) ? em->mesh_eval_final : BKE_object_get_evaluated_mesh(par);
+ Mesh *me_eval = (em) ? BKE_object_get_editmesh_eval_final(par) :
+ BKE_object_get_evaluated_mesh(par);
if (me_eval) {
int count = 0;
@@ -3793,7 +3785,7 @@ BoundBox *BKE_boundbox_alloc_unit()
{
const float min[3] = {-1.0f, -1.0f, -1.0f}, max[3] = {1.0f, 1.0f, 1.0f};
- BoundBox *bb = (BoundBox *)MEM_callocN(sizeof(BoundBox), "OB-BoundBox");
+ BoundBox *bb = MEM_cnew<BoundBox>("OB-BoundBox");
BKE_boundbox_init_from_minmax(bb, min, max);
return bb;
@@ -3903,7 +3895,7 @@ void BKE_object_boundbox_calc_from_mesh(Object *ob, const Mesh *me_eval)
}
if (ob->runtime.bb == nullptr) {
- ob->runtime.bb = (BoundBox *)MEM_callocN(sizeof(BoundBox), "DM-BoundBox");
+ ob->runtime.bb = MEM_cnew<BoundBox>("DM-BoundBox");
}
BKE_boundbox_init_from_minmax(ob->runtime.bb, min, max);
@@ -3917,11 +3909,15 @@ bool BKE_object_boundbox_calc_from_evaluated_geometry(Object *ob)
INIT_MINMAX(min, max);
if (ob->runtime.geometry_set_eval) {
- ob->runtime.geometry_set_eval->compute_boundbox_without_instances(&min, &max);
+ if (!ob->runtime.geometry_set_eval->compute_boundbox_without_instances(&min, &max)) {
+ zero_v3(min);
+ zero_v3(max);
+ }
}
else if (const Mesh *mesh_eval = BKE_object_get_evaluated_mesh(ob)) {
if (!BKE_mesh_wrapper_minmax(mesh_eval, min, max)) {
- return false;
+ zero_v3(min);
+ zero_v3(max);
}
}
else if (ob->runtime.curve_cache) {
@@ -3932,7 +3928,7 @@ bool BKE_object_boundbox_calc_from_evaluated_geometry(Object *ob)
}
if (ob->runtime.bb == nullptr) {
- ob->runtime.bb = (BoundBox *)MEM_callocN(sizeof(BoundBox), __func__);
+ ob->runtime.bb = MEM_cnew<BoundBox>(__func__);
}
BKE_boundbox_init_from_minmax(ob->runtime.bb, min, max);
@@ -4108,7 +4104,7 @@ void BKE_object_empty_draw_type_set(Object *ob, const int value)
if (ob->type == OB_EMPTY && ob->empty_drawtype == OB_EMPTY_IMAGE) {
if (!ob->iuser) {
- ob->iuser = (ImageUser *)MEM_callocN(sizeof(ImageUser), "image user");
+ ob->iuser = MEM_cnew<ImageUser>("image user");
ob->iuser->flag |= IMA_ANIM_ALWAYS;
ob->iuser->frames = 100;
ob->iuser->sfra = 1;
@@ -4447,7 +4443,7 @@ void BKE_object_handle_update(Depsgraph *depsgraph, Scene *scene, Object *ob)
void BKE_object_sculpt_data_create(Object *ob)
{
BLI_assert((ob->sculpt == nullptr) && (ob->mode & OB_MODE_ALL_SCULPT));
- ob->sculpt = (SculptSession *)MEM_callocN(sizeof(SculptSession), __func__);
+ ob->sculpt = MEM_cnew<SculptSession>(__func__);
ob->sculpt->mode_type = (eObjectMode)ob->mode;
}
@@ -4496,7 +4492,7 @@ bool BKE_object_obdata_texspace_get(Object *ob, char **r_texflag, float **r_loc,
return true;
}
-Mesh *BKE_object_get_evaluated_mesh(const Object *object)
+Mesh *BKE_object_get_evaluated_mesh_no_subsurf(const Object *object)
{
/* First attempt to retrieve the evaluated mesh from the evaluated geometry set. Most
* object types either store it there or add a reference to it if it's owned elsewhere. */
@@ -4523,6 +4519,20 @@ Mesh *BKE_object_get_evaluated_mesh(const Object *object)
return nullptr;
}
+Mesh *BKE_object_get_evaluated_mesh(const Object *object)
+{
+ Mesh *mesh = BKE_object_get_evaluated_mesh_no_subsurf(object);
+ if (!mesh) {
+ return nullptr;
+ }
+
+ if (object->data && GS(((const ID *)object->data)->name) == ID_ME) {
+ mesh = BKE_mesh_wrapper_ensure_subdivision(object, mesh);
+ }
+
+ return mesh;
+}
+
Mesh *BKE_object_get_pre_modified_mesh(const Object *object)
{
if (object->type == OB_MESH && object->runtime.data_orig != nullptr) {
@@ -4555,6 +4565,33 @@ Mesh *BKE_object_get_original_mesh(const Object *object)
return result;
}
+Mesh *BKE_object_get_editmesh_eval_final(const Object *object)
+{
+ BLI_assert(!DEG_is_original_id(&object->id));
+ BLI_assert(object->type == OB_MESH);
+
+ const Mesh *mesh = static_cast<const Mesh *>(object->data);
+ if (mesh->edit_mesh == nullptr) {
+ /* Happens when requesting material of evaluated 3d font object: the evaluated object get
+ * converted to mesh, and it does not have edit mesh. */
+ return nullptr;
+ }
+
+ return reinterpret_cast<Mesh *>(object->runtime.data_eval);
+}
+
+Mesh *BKE_object_get_editmesh_eval_cage(const Object *object)
+{
+ BLI_assert(!DEG_is_original_id(&object->id));
+ BLI_assert(object->type == OB_MESH);
+
+ const Mesh *mesh = static_cast<const Mesh *>(object->data);
+ BLI_assert(mesh->edit_mesh != nullptr);
+ UNUSED_VARS_NDEBUG(mesh);
+
+ return object->runtime.editmesh_eval_cage;
+}
+
Lattice *BKE_object_get_lattice(const Object *object)
{
ID *data = (ID *)object->data;
@@ -4621,7 +4658,7 @@ int BKE_object_insert_ptcache(Object *ob)
}
}
- link = (LinkData *)MEM_callocN(sizeof(LinkData), "PCLink");
+ link = MEM_cnew<LinkData>("PCLink");
link->data = POINTER_FROM_INT(i);
BLI_addtail(&ob->pc_ids, link);
@@ -5185,6 +5222,9 @@ void BKE_object_runtime_reset_on_copy(Object *object, const int UNUSED(flag))
runtime->object_as_temp_mesh = nullptr;
runtime->object_as_temp_curve = nullptr;
runtime->geometry_set_eval = nullptr;
+
+ runtime->crazyspace_deform_imats = nullptr;
+ runtime->crazyspace_deform_cos = nullptr;
}
void BKE_object_runtime_free_data(Object *object)
@@ -5779,6 +5819,21 @@ void BKE_object_modifiers_lib_link_common(void *userData,
}
}
+SubsurfModifierData *BKE_object_get_last_subsurf_modifier(const Object *ob)
+{
+ ModifierData *md = (ModifierData *)(ob->modifiers.last);
+
+ while (md) {
+ if (md->type == eModifierType_Subsurf) {
+ break;
+ }
+
+ md = md->prev;
+ }
+
+ return (SubsurfModifierData *)(md);
+}
+
void BKE_object_replace_data_on_shallow_copy(Object *ob, ID *new_data)
{
ob->type = BKE_object_obdata_to_type(new_data);
diff --git a/source/blender/blenkernel/intern/object_dupli.cc b/source/blender/blenkernel/intern/object_dupli.cc
index 18bcf2041c3..3082d6f25f3 100644
--- a/source/blender/blenkernel/intern/object_dupli.cc
+++ b/source/blender/blenkernel/intern/object_dupli.cc
@@ -31,9 +31,9 @@
#include "BLI_string_utf8.h"
#include "BLI_array.hh"
-#include "BLI_float3.hh"
#include "BLI_float4x4.hh"
#include "BLI_math.h"
+#include "BLI_math_vec_types.hh"
#include "BLI_rand.h"
#include "BLI_span.hh"
#include "BLI_vector.hh"
@@ -147,7 +147,7 @@ static void init_context(DupliContext *r_ctx,
/**
* Create sub-context for recursive duplis.
*/
-static void copy_dupli_context(
+static bool copy_dupli_context(
DupliContext *r_ctx, const DupliContext *ctx, Object *ob, const float mat[4][4], int index)
{
*r_ctx = *ctx;
@@ -168,9 +168,11 @@ static void copy_dupli_context(
if (r_ctx->level == MAX_DUPLI_RECUR - 1) {
std::cerr << "Warning: Maximum instance recursion level reached.\n";
+ return false;
}
r_ctx->gen = get_dupli_generator(r_ctx);
+ return true;
}
/**
@@ -188,7 +190,7 @@ static DupliObject *make_dupli(const DupliContext *ctx,
/* Add a #DupliObject instance to the result container. */
if (ctx->duplilist) {
- dob = (DupliObject *)MEM_callocN(sizeof(DupliObject), "dupli object");
+ dob = MEM_cnew<DupliObject>("dupli object");
BLI_addtail(ctx->duplilist, dob);
}
else {
@@ -258,7 +260,9 @@ static void make_recursive_duplis(const DupliContext *ctx,
/* Simple preventing of too deep nested collections with #MAX_DUPLI_RECUR. */
if (ctx->level < MAX_DUPLI_RECUR) {
DupliContext rctx;
- copy_dupli_context(&rctx, ctx, ob, space_mat, index);
+ if (!copy_dupli_context(&rctx, ctx, ob, space_mat, index)) {
+ return;
+ }
if (rctx.gen) {
ctx->instance_stack->append(ob);
rctx.gen->make_duplis(&rctx);
@@ -301,13 +305,13 @@ static void make_child_duplis(const DupliContext *ctx,
FOREACH_COLLECTION_VISIBLE_OBJECT_RECURSIVE_BEGIN (ctx->collection, ob, mode) {
if ((ob != ctx->obedit) && is_child(ob, parent)) {
DupliContext pctx;
- copy_dupli_context(&pctx, ctx, ctx->object, nullptr, _base_id);
-
- /* Meta-balls have a different dupli handling. */
- if (ob->type != OB_MBALL) {
- ob->flag |= OB_DONE; /* Doesn't render. */
+ if (copy_dupli_context(&pctx, ctx, ctx->object, nullptr, _base_id)) {
+ /* Meta-balls have a different dupli handling. */
+ if (ob->type != OB_MBALL) {
+ ob->flag |= OB_DONE; /* Doesn't render. */
+ }
+ make_child_duplis_cb(&pctx, userdata, ob);
}
- make_child_duplis_cb(&pctx, userdata, ob);
}
}
FOREACH_COLLECTION_VISIBLE_OBJECT_RECURSIVE_END;
@@ -324,14 +328,14 @@ static void make_child_duplis(const DupliContext *ctx,
DEG_OBJECT_ITER_BEGIN (ctx->depsgraph, ob, deg_objects_visibility_flags) {
if ((ob != ctx->obedit) && is_child(ob, parent)) {
DupliContext pctx;
- copy_dupli_context(&pctx, ctx, ctx->object, nullptr, persistent_dupli_id);
+ if (copy_dupli_context(&pctx, ctx, ctx->object, nullptr, persistent_dupli_id)) {
+ /* Meta-balls have a different dupli-handling. */
+ if (ob->type != OB_MBALL) {
+ ob->flag |= OB_DONE; /* Doesn't render. */
+ }
- /* Meta-balls have a different dupli-handling. */
- if (ob->type != OB_MBALL) {
- ob->flag |= OB_DONE; /* Doesn't render. */
+ make_child_duplis_cb(&pctx, userdata, ob);
}
-
- make_child_duplis_cb(&pctx, userdata, ob);
}
persistent_dupli_id++;
}
@@ -367,7 +371,7 @@ static const Mesh *mesh_data_from_duplicator_object(Object *ob,
if (em != nullptr) {
/* Note that this will only show deformation if #eModifierMode_OnCage is enabled.
* We could change this but it matches 2.7x behavior. */
- me_eval = em->mesh_eval_cage;
+ me_eval = BKE_object_get_editmesh_eval_cage(ob);
if ((me_eval == nullptr) || (me_eval->runtime.wrapper_type == ME_WRAPPER_TYPE_BMESH)) {
EditMeshData *emd = me_eval ? me_eval->runtime.edit_data : nullptr;
@@ -457,6 +461,7 @@ struct VertexDupliData_Mesh {
int totvert;
const MVert *mvert;
+ const float (*vert_normals)[3];
const float (*orco)[3];
};
@@ -554,12 +559,9 @@ static void make_child_duplis_verts_from_mesh(const DupliContext *ctx,
float child_imat[4][4];
mul_m4_m4m4(child_imat, inst_ob->imat, ctx->object->obmat);
- const MVert *mv = mvert;
- for (int i = 0; i < totvert; i++, mv++) {
- const float *co = mv->co;
- float no[3];
- normal_short_to_float_v3(no, mv->no);
- DupliObject *dob = vertex_dupli(vdd->params.ctx, inst_ob, child_imat, i, co, no, use_rotation);
+ for (int i = 0; i < totvert; i++) {
+ DupliObject *dob = vertex_dupli(
+ vdd->params.ctx, inst_ob, child_imat, i, mvert[i].co, vdd->vert_normals[i], use_rotation);
if (vdd->orco) {
copy_v3_v3(dob->orco, vdd->orco[i]);
}
@@ -636,6 +638,7 @@ static void make_duplis_verts(const DupliContext *ctx)
vdd.params = vdd_params;
vdd.totvert = me_eval->totvert;
vdd.mvert = me_eval->mvert;
+ vdd.vert_normals = BKE_mesh_vertex_normals_ensure(me_eval);
vdd.orco = (const float(*)[3])CustomData_get_layer(&me_eval->vdata, CD_ORCO);
make_child_duplis(ctx, &vdd, make_child_duplis_verts_from_mesh);
@@ -893,7 +896,9 @@ static void make_duplis_geometry_set_impl(const DupliContext *ctx,
* between the instances component below and the other components above. */
DupliContext new_instances_ctx;
if (creates_duplis_for_components) {
- copy_dupli_context(&new_instances_ctx, ctx, ctx->object, nullptr, component_index);
+ if (!copy_dupli_context(&new_instances_ctx, ctx, ctx->object, nullptr, component_index)) {
+ return;
+ }
instances_ctx = &new_instances_ctx;
}
@@ -928,7 +933,9 @@ static void make_duplis_geometry_set_impl(const DupliContext *ctx,
mul_m4_m4_pre(collection_matrix, parent_transform);
DupliContext sub_ctx;
- copy_dupli_context(&sub_ctx, instances_ctx, instances_ctx->object, nullptr, id);
+ if (!copy_dupli_context(&sub_ctx, instances_ctx, instances_ctx->object, nullptr, id)) {
+ break;
+ }
eEvaluationMode mode = DEG_get_mode(instances_ctx->depsgraph);
int object_id = 0;
@@ -951,8 +958,9 @@ static void make_duplis_geometry_set_impl(const DupliContext *ctx,
mul_m4_m4m4(new_transform, parent_transform, instance_offset_matrices[i].values);
DupliContext sub_ctx;
- copy_dupli_context(&sub_ctx, instances_ctx, instances_ctx->object, nullptr, id);
- make_duplis_geometry_set_impl(&sub_ctx, reference.geometry_set(), new_transform, true);
+ if (copy_dupli_context(&sub_ctx, instances_ctx, instances_ctx->object, nullptr, id)) {
+ make_duplis_geometry_set_impl(&sub_ctx, reference.geometry_set(), new_transform, true);
+ }
break;
}
case InstanceReference::Type::None: {
@@ -1017,6 +1025,8 @@ static void get_dupliface_transform_from_coords(Span<float3> coords,
const float scale_fac,
float r_mat[4][4])
{
+ using namespace blender::math;
+
/* Location. */
float3 location(0);
for (const float3 &coord : coords) {
@@ -1027,9 +1037,7 @@ static void get_dupliface_transform_from_coords(Span<float3> coords,
/* Rotation. */
float quat[4];
- float3 f_no;
- cross_poly_v3(f_no, (const float(*)[3])coords.data(), (uint)coords.size());
- f_no.normalize();
+ float3 f_no = normalize(cross_poly(coords));
tri_to_quat_ex(quat, coords[0], coords[1], coords[2], f_no);
/* Scale. */
@@ -1609,8 +1617,9 @@ static void make_duplis_particles(const DupliContext *ctx)
LISTBASE_FOREACH_INDEX (ParticleSystem *, psys, &ctx->object->particlesystem, psysid) {
/* Particles create one more level for persistent `psys` index. */
DupliContext pctx;
- copy_dupli_context(&pctx, ctx, ctx->object, nullptr, psysid);
- make_duplis_particle_system(&pctx, psys);
+ if (copy_dupli_context(&pctx, ctx, ctx->object, nullptr, psysid)) {
+ make_duplis_particle_system(&pctx, psys);
+ }
}
}
@@ -1640,6 +1649,14 @@ static const DupliGenerator *get_dupli_generator(const DupliContext *ctx)
return nullptr;
}
+ /* Give "Object as Font" instances higher priority than geometry set instances, to retain
+ * the behavior from before curve object meshes were processed as instances internally. */
+ if (transflag & OB_DUPLIVERTS) {
+ if (ctx->object->type == OB_FONT) {
+ return &gen_dupli_verts_font;
+ }
+ }
+
if (ctx->object->runtime.geometry_set_eval != nullptr) {
if (BKE_object_has_geometry_set_instances(ctx->object)) {
return &gen_dupli_geometry_set;
@@ -1653,9 +1670,6 @@ static const DupliGenerator *get_dupli_generator(const DupliContext *ctx)
if (ctx->object->type == OB_MESH) {
return &gen_dupli_verts;
}
- if (ctx->object->type == OB_FONT) {
- return &gen_dupli_verts_font;
- }
if (ctx->object->type == OB_POINTCLOUD) {
return &gen_dupli_verts_pointcloud;
}
@@ -1680,7 +1694,7 @@ static const DupliGenerator *get_dupli_generator(const DupliContext *ctx)
ListBase *object_duplilist(Depsgraph *depsgraph, Scene *sce, Object *ob)
{
- ListBase *duplilist = (ListBase *)MEM_callocN(sizeof(ListBase), "duplilist");
+ ListBase *duplilist = MEM_cnew<ListBase>("duplilist");
DupliContext ctx;
Vector<Object *> instance_stack;
instance_stack.append(ob);
diff --git a/source/blender/blenkernel/intern/object_update.c b/source/blender/blenkernel/intern/object_update.c
index 4c0d0303c1f..1a208355870 100644
--- a/source/blender/blenkernel/intern/object_update.c
+++ b/source/blender/blenkernel/intern/object_update.c
@@ -160,12 +160,6 @@ void BKE_object_handle_data_update(Depsgraph *depsgraph, Scene *scene, Object *o
/* includes all keys and modifiers */
switch (ob->type) {
case OB_MESH: {
-#if 0
- BMEditMesh *em = (ob->mode & OB_MODE_EDIT) ? BKE_editmesh_from_object(ob) : NULL;
-#else
- BMEditMesh *em = (ob->mode & OB_MODE_EDIT) ? ((Mesh *)ob->data)->edit_mesh : NULL;
-#endif
-
CustomData_MeshMasks cddata_masks = scene->customdata_mask;
CustomData_MeshMasks_update(&cddata_masks, &CD_MASK_BAREMESH);
/* Custom attributes should not be removed automatically. They might be used by the render
@@ -175,6 +169,11 @@ void BKE_object_handle_data_update(Depsgraph *depsgraph, Scene *scene, Object *o
cddata_masks.fmask |= CD_MASK_PROP_ALL;
cddata_masks.pmask |= CD_MASK_PROP_ALL;
cddata_masks.lmask |= CD_MASK_PROP_ALL;
+
+ /* Also copy over normal layers to avoid recomputation. */
+ cddata_masks.pmask |= CD_MASK_NORMAL;
+ cddata_masks.vmask |= CD_MASK_NORMAL;
+
/* Make sure Freestyle edge/face marks appear in DM for render (see T40315).
* Due to Line Art implementation, edge marks should also be shown in viewport. */
#ifdef WITH_FREESTYLE
@@ -187,12 +186,7 @@ void BKE_object_handle_data_update(Depsgraph *depsgraph, Scene *scene, Object *o
cddata_masks.lmask |= CD_MASK_MLOOPUV | CD_MASK_MLOOPCOL;
cddata_masks.vmask |= CD_MASK_ORCO | CD_MASK_PROP_COLOR;
}
- if (em) {
- makeDerivedMesh(depsgraph, scene, ob, em, &cddata_masks); /* was CD_MASK_BAREMESH */
- }
- else {
- makeDerivedMesh(depsgraph, scene, ob, NULL, &cddata_masks);
- }
+ makeDerivedMesh(depsgraph, scene, ob, &cddata_masks); /* was CD_MASK_BAREMESH */
break;
}
case OB_ARMATURE:
diff --git a/source/blender/blenkernel/intern/packedFile.c b/source/blender/blenkernel/intern/packedFile.c
index 8989450e41b..3ddcdb424f9 100644
--- a/source/blender/blenkernel/intern/packedFile.c
+++ b/source/blender/blenkernel/intern/packedFile.c
@@ -734,7 +734,7 @@ void BKE_packedfile_pack_all_libraries(Main *bmain, ReportList *reports)
{
Library *lib;
- /* test for relativenss */
+ /* Test for relativeness. */
for (lib = bmain->libraries.first; lib; lib = lib->id.next) {
if (!BLI_path_is_rel(lib->filepath)) {
break;
diff --git a/source/blender/blenkernel/intern/paint.c b/source/blender/blenkernel/intern/paint.c
index 72210eea71d..407375c4d22 100644
--- a/source/blender/blenkernel/intern/paint.c
+++ b/source/blender/blenkernel/intern/paint.c
@@ -1648,6 +1648,7 @@ static void sculpt_update_object(Depsgraph *depsgraph,
ss->totvert = me->totvert;
ss->totpoly = me->totpoly;
ss->totfaces = me->totpoly;
+ ss->vert_normals = BKE_mesh_vertex_normals_ensure(me);
ss->mvert = me->mvert;
ss->mpoly = me->mpoly;
ss->mloop = me->mloop;
diff --git a/source/blender/blenkernel/intern/particle.c b/source/blender/blenkernel/intern/particle.c
index 674f264feb7..4dba13ce4c2 100644
--- a/source/blender/blenkernel/intern/particle.c
+++ b/source/blender/blenkernel/intern/particle.c
@@ -1674,6 +1674,7 @@ static void interpolate_pathcache(ParticleCacheKey *first, float t, ParticleCach
/************************************************/
void psys_interpolate_face(MVert *mvert,
+ const float (*vert_normals)[3],
MFace *mface,
MTFace *tface,
float (*orcodata)[3],
@@ -1695,13 +1696,13 @@ void psys_interpolate_face(MVert *mvert,
v2 = mvert[mface->v2].co;
v3 = mvert[mface->v3].co;
- normal_short_to_float_v3(n1, mvert[mface->v1].no);
- normal_short_to_float_v3(n2, mvert[mface->v2].no);
- normal_short_to_float_v3(n3, mvert[mface->v3].no);
+ copy_v3_v3(n1, vert_normals[mface->v1]);
+ copy_v3_v3(n2, vert_normals[mface->v2]);
+ copy_v3_v3(n3, vert_normals[mface->v3]);
if (mface->v4) {
v4 = mvert[mface->v4].co;
- normal_short_to_float_v3(n4, mvert[mface->v4].no);
+ copy_v3_v3(n4, vert_normals[mface->v4]);
interp_v3_v3v3v3v3(vec, v1, v2, v3, v4, w);
@@ -2124,13 +2125,13 @@ void psys_particle_on_dm(Mesh *mesh_final,
}
orcodata = CustomData_get_layer(&mesh_final->vdata, CD_ORCO);
+ const float(*vert_normals)[3] = BKE_mesh_vertex_normals_ensure(mesh_final);
if (from == PART_FROM_VERT) {
copy_v3_v3(vec, mesh_final->mvert[mapindex].co);
if (nor) {
- normal_short_to_float_v3(nor, mesh_final->mvert[mapindex].no);
- normalize_v3(nor);
+ copy_v3_v3(nor, vert_normals[mapindex]);
}
if (orco) {
@@ -2161,7 +2162,8 @@ void psys_particle_on_dm(Mesh *mesh_final,
}
if (from == PART_FROM_VOLUME) {
- psys_interpolate_face(mvert, mface, mtface, orcodata, mapfw, vec, tmpnor, utan, vtan, orco);
+ psys_interpolate_face(
+ mvert, vert_normals, mface, mtface, orcodata, mapfw, vec, tmpnor, utan, vtan, orco);
if (nor) {
copy_v3_v3(nor, tmpnor);
}
@@ -2173,7 +2175,8 @@ void psys_particle_on_dm(Mesh *mesh_final,
add_v3_v3(vec, tmpnor);
}
else {
- psys_interpolate_face(mvert, mface, mtface, orcodata, mapfw, vec, nor, utan, vtan, orco);
+ psys_interpolate_face(
+ mvert, vert_normals, mface, mtface, orcodata, mapfw, vec, nor, utan, vtan, orco);
}
}
}
diff --git a/source/blender/blenkernel/intern/particle_distribute.c b/source/blender/blenkernel/intern/particle_distribute.c
index fd4f89e3f6d..ba3f99a2800 100644
--- a/source/blender/blenkernel/intern/particle_distribute.c
+++ b/source/blender/blenkernel/intern/particle_distribute.c
@@ -626,7 +626,8 @@ static void distribute_from_volume_exec(ParticleTask *thread, ParticleData *pa,
/* experimental */
tot = mesh->totface;
- psys_interpolate_face(mvert, mface, 0, 0, pa->fuv, co, nor, 0, 0, 0);
+ psys_interpolate_face(
+ mvert, BKE_mesh_vertex_normals_ensure(mesh), mface, 0, 0, pa->fuv, co, nor, 0, 0, 0);
normalize_v3(nor);
negate_v3(nor);
diff --git a/source/blender/blenkernel/intern/pbvh.c b/source/blender/blenkernel/intern/pbvh.c
index 2f22f94d142..1926bbcda02 100644
--- a/source/blender/blenkernel/intern/pbvh.c
+++ b/source/blender/blenkernel/intern/pbvh.c
@@ -32,7 +32,7 @@
#include "DNA_meshdata_types.h"
#include "BKE_ccg.h"
-#include "BKE_mesh.h" /* for BKE_mesh_calc_normals */
+#include "BKE_mesh.h"
#include "BKE_paint.h"
#include "BKE_pbvh.h"
#include "BKE_subdiv_ccg.h"
@@ -552,7 +552,7 @@ static void pbvh_build(PBVH *pbvh, BB *cb, BBC *prim_bbc, int totprim)
}
void BKE_pbvh_build_mesh(PBVH *pbvh,
- const Mesh *mesh,
+ Mesh *mesh,
const MPoly *mpoly,
const MLoop *mloop,
MVert *verts,
@@ -572,6 +572,8 @@ void BKE_pbvh_build_mesh(PBVH *pbvh,
pbvh->mloop = mloop;
pbvh->looptri = looptri;
pbvh->verts = verts;
+ BKE_mesh_vertex_normals_ensure(mesh);
+ pbvh->vert_normals = BKE_mesh_vertex_normals_for_write(mesh);
pbvh->vert_bitmap = BLI_BITMAP_NEW(totvert, "bvh->vert_bitmap");
pbvh->totvert = totvert;
pbvh->leaf_limit = LEAF_LIMIT;
@@ -1076,7 +1078,6 @@ static void pbvh_update_normals_store_task_cb(void *__restrict userdata,
* so we know only this thread will handle this vertex. */
if (mvert->flag & ME_VERT_PBVH_UPDATE) {
normalize_v3(vnors[v]);
- normal_float_to_short_v3(mvert->no, vnors[v]);
mvert->flag &= ~ME_VERT_PBVH_UPDATE;
}
}
@@ -1087,10 +1088,6 @@ static void pbvh_update_normals_store_task_cb(void *__restrict userdata,
static void pbvh_faces_update_normals(PBVH *pbvh, PBVHNode **nodes, int totnode)
{
- /* could be per node to save some memory, but also means
- * we have to store for each vertex which node it is in */
- float(*vnors)[3] = MEM_callocN(sizeof(*vnors) * pbvh->totvert, __func__);
-
/* subtle assumptions:
* - We know that for all edited vertices, the nodes with faces
* adjacent to these vertices have been marked with PBVH_UpdateNormals.
@@ -1104,7 +1101,7 @@ static void pbvh_faces_update_normals(PBVH *pbvh, PBVHNode **nodes, int totnode)
PBVHUpdateData data = {
.pbvh = pbvh,
.nodes = nodes,
- .vnors = vnors,
+ .vnors = pbvh->vert_normals,
};
TaskParallelSettings settings;
@@ -1112,8 +1109,6 @@ static void pbvh_faces_update_normals(PBVH *pbvh, PBVHNode **nodes, int totnode)
BLI_task_parallel_range(0, totnode, &data, pbvh_update_normals_accum_task_cb, &settings);
BLI_task_parallel_range(0, totnode, &data, pbvh_update_normals_store_task_cb, &settings);
-
- MEM_freeN(vnors);
}
static void pbvh_update_mask_redraw_task_cb(void *__restrict userdata,
@@ -1300,6 +1295,7 @@ static void pbvh_update_draw_buffer_cb(void *__restrict userdata,
case PBVH_FACES:
GPU_pbvh_mesh_buffers_update(node->draw_buffers,
pbvh->verts,
+ pbvh->vert_normals,
CustomData_get_layer(pbvh->vdata, CD_PAINT_MASK),
CustomData_get_layer(pbvh->ldata, CD_MLOOPCOL),
CustomData_get_layer(pbvh->pdata, CD_SCULPT_FACE_SETS),
@@ -2964,6 +2960,8 @@ void pbvh_vertex_iter_init(PBVH *pbvh, PBVHNode *node, PBVHVertexIter *vi, int m
vi->mask = NULL;
if (pbvh->type == PBVH_FACES) {
+ vi->vert_normals = pbvh->vert_normals;
+
vi->vmask = CustomData_get_layer(pbvh->vdata, CD_PAINT_MASK);
vi->vcol = CustomData_get_layer(pbvh->vdata, CD_PROP_COLOR);
}
@@ -3037,6 +3035,12 @@ MVert *BKE_pbvh_get_verts(const PBVH *pbvh)
return pbvh->verts;
}
+const float (*BKE_pbvh_get_vert_normals(const PBVH *pbvh))[3]
+{
+ BLI_assert(pbvh->type == PBVH_FACES);
+ return pbvh->vert_normals;
+}
+
void BKE_pbvh_subdiv_cgg_set(PBVH *pbvh, SubdivCCG *subdiv_ccg)
{
pbvh->subdiv_ccg = subdiv_ccg;
diff --git a/source/blender/blenkernel/intern/pbvh_intern.h b/source/blender/blenkernel/intern/pbvh_intern.h
index 12c2d7aac78..9562cda5f28 100644
--- a/source/blender/blenkernel/intern/pbvh_intern.h
+++ b/source/blender/blenkernel/intern/pbvh_intern.h
@@ -130,6 +130,9 @@ struct PBVH {
/* Mesh data */
const struct Mesh *mesh;
+
+ /* Note: Normals are not const because they can be updated for drawing by sculpt code. */
+ float (*vert_normals)[3];
MVert *verts;
const MPoly *mpoly;
const MLoop *mloop;
diff --git a/source/blender/blenkernel/intern/pointcache.c b/source/blender/blenkernel/intern/pointcache.c
index 094181afca9..602546db8df 100644
--- a/source/blender/blenkernel/intern/pointcache.c
+++ b/source/blender/blenkernel/intern/pointcache.c
@@ -831,31 +831,23 @@ static void ptcache_rigidbody_interpolate(int index,
RigidBodyOb *rbo = ob->rigidbody_object;
if (rbo->type == RBO_TYPE_ACTIVE) {
- ParticleKey keys[4];
- ParticleKey result;
- float dfra;
-
- memset(keys, 0, sizeof(keys));
-
- copy_v3_v3(keys[1].co, rbo->pos);
- copy_qt_qt(keys[1].rot, rbo->orn);
+ /* It may be possible to improve results by taking into account velocity
+ * for interpolation using psys_interpolate_particle, however this is
+ * not currently cached. */
+ float pos[3], orn[4];
if (old_data) {
- memcpy(keys[2].co, data, sizeof(float[3]));
- memcpy(keys[2].rot, data + 3, sizeof(float[4]));
+ memcpy(pos, data, sizeof(float[3]));
+ memcpy(orn, data + 3, sizeof(float[4]));
}
else {
- BKE_ptcache_make_particle_key(&keys[2], 0, data, cfra2);
+ PTCACHE_DATA_TO(data, BPHYS_DATA_LOCATION, index, pos);
+ PTCACHE_DATA_TO(data, BPHYS_DATA_ROTATION, index, orn);
}
- dfra = cfra2 - cfra1;
-
- /* NOTE: keys[0] and keys[3] unused for type < 1 (crappy). */
- psys_interpolate_particle(-1, keys, (cfra - cfra1) / dfra, &result, true);
- interp_qt_qtqt(result.rot, keys[1].rot, keys[2].rot, (cfra - cfra1) / dfra);
-
- copy_v3_v3(rbo->pos, result.co);
- copy_qt_qt(rbo->orn, result.rot);
+ const float t = (cfra - cfra1) / (cfra2 - cfra1);
+ interp_v3_v3v3(rbo->pos, rbo->pos, pos, t);
+ interp_qt_qtqt(rbo->orn, rbo->orn, orn, t);
}
}
}
@@ -1322,10 +1314,11 @@ static int ptcache_frame_from_filename(const char *filename, const char *ext)
static int ptcache_path(PTCacheID *pid, char *filename)
{
+ const char *blendfile_path = BKE_main_blendfile_path_from_global();
Library *lib = (pid->owner_id) ? pid->owner_id->lib : NULL;
const char *blendfilename = (lib && (pid->cache->flag & PTCACHE_IGNORE_LIBPATH) == 0) ?
lib->filepath_abs :
- BKE_main_blendfile_path_from_global();
+ blendfile_path;
size_t i;
if (pid->cache->flag & PTCACHE_EXTERNAL) {
@@ -1337,7 +1330,7 @@ static int ptcache_path(PTCacheID *pid, char *filename)
return BLI_path_slash_ensure(filename); /* new strlen() */
}
- if (G.relbase_valid || lib) {
+ if ((blendfile_path[0] != '\0') || lib) {
char file[MAX_PTCACHE_PATH]; /* we don't want the dir, only the file */
BLI_split_file_part(blendfilename, file, sizeof(file));
@@ -1422,8 +1415,11 @@ static int ptcache_filename(PTCacheID *pid, char *filename, int cfra, short do_p
filename[0] = '\0';
newname = filename;
- if (!G.relbase_valid && (pid->cache->flag & PTCACHE_EXTERNAL) == 0) {
- return 0; /* save blend file before using disk pointcache */
+ if ((pid->cache->flag & PTCACHE_EXTERNAL) == 0) {
+ const char *blendfile_path = BKE_main_blendfile_path_from_global();
+ if (blendfile_path[0] == '\0') {
+ return 0; /* save blend file before using disk pointcache */
+ }
}
/* start with temp dir */
@@ -1469,8 +1465,11 @@ static PTCacheFile *ptcache_file_open(PTCacheID *pid, int mode, int cfra)
return NULL;
}
#endif
- if (!G.relbase_valid && (pid->cache->flag & PTCACHE_EXTERNAL) == 0) {
- return NULL; /* save blend file before using disk pointcache */
+ if ((pid->cache->flag & PTCACHE_EXTERNAL) == 0) {
+ const char *blendfile_path = BKE_main_blendfile_path_from_global();
+ if (blendfile_path[0] == '\0') {
+ return NULL; /* save blend file before using disk pointcache */
+ }
}
ptcache_filename(pid, filename, cfra, 1, 1);
@@ -3444,8 +3443,9 @@ void BKE_ptcache_toggle_disk_cache(PTCacheID *pid)
{
PointCache *cache = pid->cache;
int last_exact = cache->last_exact;
+ const char *blendfile_path = BKE_main_blendfile_path_from_global();
- if (!G.relbase_valid) {
+ if (blendfile_path[0] == '\0') {
cache->flag &= ~PTCACHE_DISK_CACHE;
if (G.debug & G_DEBUG) {
printf("File must be saved before using disk cache!\n");
@@ -3497,6 +3497,11 @@ void BKE_ptcache_disk_cache_rename(PTCacheID *pid, const char *name_src, const c
char old_path_full[MAX_PTCACHE_FILE];
char ext[MAX_PTCACHE_PATH];
+ /* If both names are the same, there is nothing to do. */
+ if (STREQ(name_src, name_dst)) {
+ return;
+ }
+
/* save old name */
BLI_strncpy(old_name, pid->cache->name, sizeof(old_name));
diff --git a/source/blender/blenkernel/intern/pointcloud.cc b/source/blender/blenkernel/intern/pointcloud.cc
index 82dde79cff6..b5f016e4d76 100644
--- a/source/blender/blenkernel/intern/pointcloud.cc
+++ b/source/blender/blenkernel/intern/pointcloud.cc
@@ -25,10 +25,13 @@
#include "DNA_object_types.h"
#include "DNA_pointcloud_types.h"
+#include "BLI_index_range.hh"
#include "BLI_listbase.h"
-#include "BLI_math.h"
+#include "BLI_math_vec_types.hh"
#include "BLI_rand.h"
+#include "BLI_span.hh"
#include "BLI_string.h"
+#include "BLI_task.hh"
#include "BLI_utildefines.h"
#include "BKE_anim_data.h"
@@ -51,6 +54,10 @@
#include "BLO_read_write.h"
+using blender::float3;
+using blender::IndexRange;
+using blender::Span;
+
/* PointCloud datablock */
static void pointcloud_random(PointCloud *pointcloud);
@@ -261,18 +268,70 @@ PointCloud *BKE_pointcloud_new_nomain(const int totpoint)
return pointcloud;
}
-void BKE_pointcloud_minmax(const struct PointCloud *pointcloud, float r_min[3], float r_max[3])
+struct MinMaxResult {
+ float3 min;
+ float3 max;
+};
+
+static MinMaxResult min_max_no_radii(Span<float3> positions)
{
- float(*pointcloud_co)[3] = pointcloud->co;
- float *pointcloud_radius = pointcloud->radius;
- for (int a = 0; a < pointcloud->totpoint; a++) {
- float *co = pointcloud_co[a];
- float radius = (pointcloud_radius) ? pointcloud_radius[a] : 0.0f;
- const float co_min[3] = {co[0] - radius, co[1] - radius, co[2] - radius};
- const float co_max[3] = {co[0] + radius, co[1] + radius, co[2] + radius};
- DO_MIN(co_min, r_min);
- DO_MAX(co_max, r_max);
+ using namespace blender::math;
+
+ return blender::threading::parallel_reduce(
+ positions.index_range(),
+ 1024,
+ MinMaxResult{float3(FLT_MAX), float3(-FLT_MAX)},
+ [&](IndexRange range, const MinMaxResult &init) {
+ MinMaxResult result = init;
+ for (const int i : range) {
+ min_max(positions[i], result.min, result.max);
+ }
+ return result;
+ },
+ [](const MinMaxResult &a, const MinMaxResult &b) {
+ return MinMaxResult{min(a.min, b.min), max(a.max, b.max)};
+ });
+}
+
+static MinMaxResult min_max_with_radii(Span<float3> positions, Span<float> radii)
+{
+ using namespace blender::math;
+
+ return blender::threading::parallel_reduce(
+ positions.index_range(),
+ 1024,
+ MinMaxResult{float3(FLT_MAX), float3(-FLT_MAX)},
+ [&](IndexRange range, const MinMaxResult &init) {
+ MinMaxResult result = init;
+ for (const int i : range) {
+ result.min = min(positions[i] - radii[i], result.min);
+ result.max = max(positions[i] + radii[i], result.max);
+ }
+ return result;
+ },
+ [](const MinMaxResult &a, const MinMaxResult &b) {
+ return MinMaxResult{min(a.min, b.min), max(a.max, b.max)};
+ });
+}
+
+bool BKE_pointcloud_minmax(const PointCloud *pointcloud, float r_min[3], float r_max[3])
+{
+ using namespace blender::math;
+
+ if (!pointcloud->totpoint) {
+ return false;
}
+
+ Span<float3> positions{reinterpret_cast<float3 *>(pointcloud->co), pointcloud->totpoint};
+ const MinMaxResult min_max = (pointcloud->radius) ?
+ min_max_with_radii(positions,
+ {pointcloud->radius, pointcloud->totpoint}) :
+ min_max_no_radii(positions);
+
+ copy_v3_v3(r_min, min(min_max.min, float3(r_min)));
+ copy_v3_v3(r_max, max(min_max.max, float3(r_max)));
+
+ return true;
}
BoundBox *BKE_pointcloud_boundbox_get(Object *ob)
@@ -287,7 +346,7 @@ BoundBox *BKE_pointcloud_boundbox_get(Object *ob)
ob->runtime.bb = static_cast<BoundBox *>(MEM_callocN(sizeof(BoundBox), "pointcloud boundbox"));
}
- blender::float3 min, max;
+ float3 min, max;
INIT_MINMAX(min, max);
if (ob->runtime.geometry_set_eval != nullptr) {
ob->runtime.geometry_set_eval->compute_boundbox_without_instances(&min, &max);
diff --git a/source/blender/blenkernel/intern/screen.c b/source/blender/blenkernel/intern/screen.c
index cd8493ee559..6e352b6ba90 100644
--- a/source/blender/blenkernel/intern/screen.c
+++ b/source/blender/blenkernel/intern/screen.c
@@ -900,7 +900,7 @@ ARegion *BKE_area_find_region_active_win(ScrArea *area)
return BKE_area_find_region_type(area, RGN_TYPE_WINDOW);
}
-ARegion *BKE_area_find_region_xy(ScrArea *area, const int regiontype, int x, int y)
+ARegion *BKE_area_find_region_xy(ScrArea *area, const int regiontype, const int xy[2])
{
if (area == NULL) {
return NULL;
@@ -908,7 +908,7 @@ ARegion *BKE_area_find_region_xy(ScrArea *area, const int regiontype, int x, int
LISTBASE_FOREACH (ARegion *, region, &area->regionbase) {
if (ELEM(regiontype, RGN_TYPE_ANY, region->regiontype)) {
- if (BLI_rcti_isect_pt(&region->winrct, x, y)) {
+ if (BLI_rcti_isect_pt_v(&region->winrct, xy)) {
return region;
}
}
@@ -916,11 +916,11 @@ ARegion *BKE_area_find_region_xy(ScrArea *area, const int regiontype, int x, int
return NULL;
}
-ARegion *BKE_screen_find_region_xy(bScreen *screen, const int regiontype, int x, int y)
+ARegion *BKE_screen_find_region_xy(bScreen *screen, const int regiontype, const int xy[2])
{
LISTBASE_FOREACH (ARegion *, region, &screen->regionbase) {
if (ELEM(regiontype, RGN_TYPE_ANY, region->regiontype)) {
- if (BLI_rcti_isect_pt(&region->winrct, x, y)) {
+ if (BLI_rcti_isect_pt_v(&region->winrct, xy)) {
return region;
}
}
@@ -961,11 +961,10 @@ ScrArea *BKE_screen_find_big_area(bScreen *screen, const int spacetype, const sh
ScrArea *BKE_screen_area_map_find_area_xy(const ScrAreaMap *areamap,
const int spacetype,
- int x,
- int y)
+ const int xy[2])
{
LISTBASE_FOREACH (ScrArea *, area, &areamap->areabase) {
- if (BLI_rcti_isect_pt(&area->totrct, x, y)) {
+ if (BLI_rcti_isect_pt_v(&area->totrct, xy)) {
if (ELEM(spacetype, SPACE_TYPE_ANY, area->spacetype)) {
return area;
}
@@ -974,9 +973,9 @@ ScrArea *BKE_screen_area_map_find_area_xy(const ScrAreaMap *areamap,
}
return NULL;
}
-ScrArea *BKE_screen_find_area_xy(bScreen *screen, const int spacetype, int x, int y)
+ScrArea *BKE_screen_find_area_xy(bScreen *screen, const int spacetype, const int xy[2])
{
- return BKE_screen_area_map_find_area_xy(AREAMAP_FROM_SCREEN(screen), spacetype, x, y);
+ return BKE_screen_area_map_find_area_xy(AREAMAP_FROM_SCREEN(screen), spacetype, xy);
}
void BKE_screen_view3d_sync(View3D *v3d, struct Scene *scene)
@@ -1016,16 +1015,13 @@ void BKE_screen_view3d_shading_init(View3DShading *shading)
memcpy(shading, shading_default, sizeof(*shading));
}
-ARegion *BKE_screen_find_main_region_at_xy(bScreen *screen,
- const int space_type,
- const int x,
- const int y)
+ARegion *BKE_screen_find_main_region_at_xy(bScreen *screen, const int space_type, const int xy[2])
{
- ScrArea *area = BKE_screen_find_area_xy(screen, space_type, x, y);
+ ScrArea *area = BKE_screen_find_area_xy(screen, space_type, xy);
if (!area) {
return NULL;
}
- return BKE_area_find_region_xy(area, RGN_TYPE_WINDOW, x, y);
+ return BKE_area_find_region_xy(area, RGN_TYPE_WINDOW, xy);
}
/* Magic zoom calculation, no idea what it signifies, if you find out, tell me! -zr
diff --git a/source/blender/blenkernel/intern/shrinkwrap.c b/source/blender/blenkernel/intern/shrinkwrap.c
index 7618323f488..d51ed2832f0 100644
--- a/source/blender/blenkernel/intern/shrinkwrap.c
+++ b/source/blender/blenkernel/intern/shrinkwrap.c
@@ -75,7 +75,8 @@ typedef struct ShrinkwrapCalcData {
struct Object *ob; /* object we are applying shrinkwrap to */
- struct MVert *vert; /* Array of verts being projected (to fetch normals or other data) */
+ struct MVert *vert; /* Array of verts being projected. */
+ const float (*vert_normals)[3];
float (*vertexCos)[3]; /* vertexs being shrinkwraped */
int numVerts;
@@ -146,7 +147,7 @@ bool BKE_shrinkwrap_init_tree(
}
if (force_normals || BKE_shrinkwrap_needs_normals(shrinkType, shrinkMode)) {
- data->pnors = CustomData_get_layer(&mesh->pdata, CD_NORMAL);
+ data->pnors = BKE_mesh_poly_normals_ensure(mesh);
if ((mesh->flag & ME_AUTOSMOOTH) != 0) {
data->clnors = CustomData_get_layer(&mesh->ldata, CD_NORMAL);
}
@@ -313,18 +314,18 @@ static ShrinkwrapBoundaryData *shrinkwrap_build_boundary_data(struct Mesh *mesh)
MEM_freeN(vert_status);
/* Finalize average direction and compute normal. */
+ const float(*vert_normals)[3] = BKE_mesh_vertex_normals_ensure(mesh);
for (int i = 0; i < mesh->totvert; i++) {
int bidx = vert_boundary_id[i];
if (bidx >= 0) {
ShrinkwrapBoundaryVertData *vdata = &boundary_verts[bidx];
- float no[3], tmp[3];
+ float tmp[3];
normalize_v3(vdata->direction);
- normal_short_to_float_v3(no, mesh->mvert[i].no);
- cross_v3_v3v3(tmp, no, vdata->direction);
- cross_v3_v3v3(vdata->normal_plane, tmp, no);
+ cross_v3_v3v3(tmp, vert_normals[i], vdata->direction);
+ cross_v3_v3v3(vdata->normal_plane, tmp, vert_normals[i]);
normalize_v3(vdata->normal_plane);
}
}
@@ -540,7 +541,7 @@ static void shrinkwrap_calc_normal_projection_cb_ex(void *__restrict userdata,
* (to get correct normals) for other cases calc->verts contains undeformed coordinates and
* vertexCos should be used */
copy_v3_v3(tmp_co, calc->vert[i].co);
- normal_short_to_float_v3(tmp_no, calc->vert[i].no);
+ copy_v3_v3(tmp_no, calc->vert_normals[i]);
}
else {
copy_v3_v3(tmp_co, co);
@@ -1008,8 +1009,8 @@ static void target_project_edge(const ShrinkwrapTreeData *tree,
CLAMP(x, 0, 1);
float vedge_no[2][3];
- normal_short_to_float_v3(vedge_no[0], data->vert[edge->v1].no);
- normal_short_to_float_v3(vedge_no[1], data->vert[edge->v2].no);
+ copy_v3_v3(vedge_no[0], data->vert_normals[edge->v1]);
+ copy_v3_v3(vedge_no[1], data->vert_normals[edge->v2]);
interp_v3_v3v3(hit_co, vedge_co[0], vedge_co[1], x);
interp_v3_v3v3(hit_no, vedge_no[0], vedge_no[1], x);
@@ -1055,9 +1056,9 @@ static void mesh_looptri_target_project(void *userdata,
}
/* Decode normals */
- normal_short_to_float_v3(vtri_no[0], vtri[0]->no);
- normal_short_to_float_v3(vtri_no[1], vtri[1]->no);
- normal_short_to_float_v3(vtri_no[2], vtri[2]->no);
+ copy_v3_v3(vtri_no[0], tree->treeData.vert_normals[loop[0]->v]);
+ copy_v3_v3(vtri_no[1], tree->treeData.vert_normals[loop[1]->v]);
+ copy_v3_v3(vtri_no[2], tree->treeData.vert_normals[loop[2]->v]);
/* Solve the equations for the triangle */
if (target_project_solve_point_tri(vtri_co, vtri_no, co, raw_hit_co, dist_sq, hit_co, hit_no)) {
@@ -1191,14 +1192,13 @@ void BKE_shrinkwrap_compute_smooth_normal(const struct ShrinkwrapTreeData *tree,
{
const BVHTreeFromMesh *treeData = &tree->treeData;
const MLoopTri *tri = &treeData->looptri[looptri_idx];
+ const float(*vert_normals)[3] = tree->treeData.vert_normals;
/* Interpolate smooth normals if enabled. */
if ((tree->mesh->mpoly[tri->poly].flag & ME_SMOOTH) != 0) {
- const MVert *verts[] = {
- &treeData->vert[treeData->loop[tri->tri[0]].v],
- &treeData->vert[treeData->loop[tri->tri[1]].v],
- &treeData->vert[treeData->loop[tri->tri[2]].v],
- };
+ const uint32_t vert_indices[3] = {treeData->loop[tri->tri[0]].v,
+ treeData->loop[tri->tri[1]].v,
+ treeData->loop[tri->tri[2]].v};
float w[3], no[3][3], tmp_co[3];
/* Custom and auto smooth split normals. */
@@ -1209,9 +1209,9 @@ void BKE_shrinkwrap_compute_smooth_normal(const struct ShrinkwrapTreeData *tree,
}
/* Ordinary vertex normals. */
else {
- normal_short_to_float_v3(no[0], verts[0]->no);
- normal_short_to_float_v3(no[1], verts[1]->no);
- normal_short_to_float_v3(no[2], verts[2]->no);
+ copy_v3_v3(no[0], vert_normals[vert_indices[0]]);
+ copy_v3_v3(no[1], vert_normals[vert_indices[1]]);
+ copy_v3_v3(no[2], vert_normals[vert_indices[2]]);
}
/* Barycentric weights from hit point. */
@@ -1221,7 +1221,11 @@ void BKE_shrinkwrap_compute_smooth_normal(const struct ShrinkwrapTreeData *tree,
BLI_space_transform_apply(transform, tmp_co);
}
- interp_weights_tri_v3(w, verts[0]->co, verts[1]->co, verts[2]->co, tmp_co);
+ interp_weights_tri_v3(w,
+ treeData->vert[vert_indices[0]].co,
+ treeData->vert[vert_indices[1]].co,
+ treeData->vert[vert_indices[2]].co,
+ tmp_co);
/* Interpolate using weights. */
interp_v3_v3v3v3(r_no, no[0], no[1], no[2], w);
@@ -1424,6 +1428,7 @@ void shrinkwrapModifier_deform(ShrinkwrapModifierData *smd,
if (mesh != NULL && smd->shrinkType == MOD_SHRINKWRAP_PROJECT) {
/* Setup arrays to get vertexs positions, normals and deform weights */
calc.vert = mesh->mvert;
+ calc.vert_normals = BKE_mesh_vertex_normals_ensure(mesh);
/* Using vertexs positions/normals as if a subsurface was applied */
if (smd->subsurfLevels) {
@@ -1581,6 +1586,7 @@ void BKE_shrinkwrap_remesh_target_project(Mesh *src_me, Mesh *target_me, Object
calc.smd = &ssmd;
calc.numVerts = src_me->totvert;
calc.vertexCos = vertexCos;
+ calc.vert_normals = BKE_mesh_vertex_normals_ensure(src_me);
calc.vgroup = -1;
calc.target = target_me;
calc.keepDist = ssmd.keepDist;
diff --git a/source/blender/blenkernel/intern/simulation.cc b/source/blender/blenkernel/intern/simulation.cc
index b0f9de5963a..ec4b0e8d51d 100644
--- a/source/blender/blenkernel/intern/simulation.cc
+++ b/source/blender/blenkernel/intern/simulation.cc
@@ -28,9 +28,9 @@
#include "DNA_simulation_types.h"
#include "BLI_compiler_compat.h"
-#include "BLI_float3.hh"
#include "BLI_listbase.h"
#include "BLI_math.h"
+#include "BLI_math_vec_types.hh"
#include "BLI_rand.h"
#include "BLI_span.hh"
#include "BLI_string.h"
diff --git a/source/blender/blenkernel/intern/softbody.c b/source/blender/blenkernel/intern/softbody.c
index b811c17a3bc..baabf57f0c3 100644
--- a/source/blender/blenkernel/intern/softbody.c
+++ b/source/blender/blenkernel/intern/softbody.c
@@ -1673,7 +1673,7 @@ static int sb_detect_vertex_collisionCached(float opco[3],
if ((opco[0] < minx) || (opco[1] < miny) || (opco[2] < minz) || (opco[0] > maxx) ||
(opco[1] > maxy) || (opco[2] > maxz)) {
- /* outside the padded boundbox --> collision object is too far away */
+ /* Outside the padded bound-box -> collision object is too far away. */
BLI_ghashIterator_step(ihash);
continue;
}
diff --git a/source/blender/blenkernel/intern/spline_base.cc b/source/blender/blenkernel/intern/spline_base.cc
index 4ff392a5ddb..3262d768b6c 100644
--- a/source/blender/blenkernel/intern/spline_base.cc
+++ b/source/blender/blenkernel/intern/spline_base.cc
@@ -166,13 +166,15 @@ static void accumulate_lengths(Span<float3> positions,
const bool is_cyclic,
MutableSpan<float> lengths)
{
+ using namespace blender::math;
+
float length = 0.0f;
for (const int i : IndexRange(positions.size() - 1)) {
- length += float3::distance(positions[i], positions[i + 1]);
+ length += distance(positions[i], positions[i + 1]);
lengths[i] = length;
}
if (is_cyclic) {
- lengths.last() = length + float3::distance(positions.last(), positions.first());
+ lengths.last() = length + distance(positions.last(), positions.first());
}
}
@@ -200,11 +202,13 @@ Span<float> Spline::evaluated_lengths() const
static float3 direction_bisect(const float3 &prev, const float3 &middle, const float3 &next)
{
- const float3 dir_prev = (middle - prev).normalized();
- const float3 dir_next = (next - middle).normalized();
+ using namespace blender::math;
+
+ const float3 dir_prev = normalize(middle - prev);
+ const float3 dir_next = normalize(next - middle);
- const float3 result = (dir_prev + dir_next).normalized();
- if (UNLIKELY(result.is_zero())) {
+ const float3 result = normalize(dir_prev + dir_next);
+ if (UNLIKELY(is_zero(result))) {
return float3(0.0f, 0.0f, 1.0f);
}
return result;
@@ -214,6 +218,8 @@ static void calculate_tangents(Span<float3> positions,
const bool is_cyclic,
MutableSpan<float3> tangents)
{
+ using namespace blender::math;
+
if (positions.size() == 1) {
tangents.first() = float3(0.0f, 0.0f, 1.0f);
return;
@@ -232,8 +238,8 @@ static void calculate_tangents(Span<float3> positions,
tangents.last() = direction_bisect(second_to_last, last, first);
}
else {
- tangents.first() = (positions[1] - positions[0]).normalized();
- tangents.last() = (positions.last() - positions[positions.size() - 2]).normalized();
+ tangents.first() = normalize(positions[1] - positions[0]);
+ tangents.last() = normalize(positions.last() - positions[positions.size() - 2]);
}
}
@@ -264,18 +270,22 @@ static float3 rotate_direction_around_axis(const float3 &direction,
const float3 &axis,
const float angle)
{
+ using namespace blender::math;
+
BLI_ASSERT_UNIT_V3(direction);
BLI_ASSERT_UNIT_V3(axis);
- const float3 axis_scaled = axis * float3::dot(direction, axis);
+ const float3 axis_scaled = axis * dot(direction, axis);
const float3 diff = direction - axis_scaled;
- const float3 cross = float3::cross(axis, diff);
+ const float3 cross = blender::math::cross(axis, diff);
return axis_scaled + diff * std::cos(angle) + cross * std::sin(angle);
}
static void calculate_normals_z_up(Span<float3> tangents, MutableSpan<float3> r_normals)
{
+ using namespace blender::math;
+
BLI_assert(r_normals.size() == tangents.size());
/* Same as in `vec_to_quat`. */
@@ -286,7 +296,7 @@ static void calculate_normals_z_up(Span<float3> tangents, MutableSpan<float3> r_
r_normals[i] = {1.0f, 0.0f, 0.0f};
}
else {
- r_normals[i] = float3(tangent.y, -tangent.x, 0.0f).normalized();
+ r_normals[i] = normalize(float3(tangent.y, -tangent.x, 0.0f));
}
}
}
@@ -298,12 +308,14 @@ static float3 calculate_next_normal(const float3 &last_normal,
const float3 &last_tangent,
const float3 &current_tangent)
{
- if (last_tangent.is_zero() || current_tangent.is_zero()) {
+ using namespace blender::math;
+
+ if (is_zero(last_tangent) || is_zero(current_tangent)) {
return last_normal;
}
const float angle = angle_normalized_v3v3(last_tangent, current_tangent);
if (angle != 0.0) {
- const float3 axis = float3::cross(last_tangent, current_tangent).normalized();
+ const float3 axis = normalize(cross(last_tangent, current_tangent));
return rotate_direction_around_axis(last_normal, axis, angle);
}
return last_normal;
@@ -313,6 +325,7 @@ static void calculate_normals_minimum(Span<float3> tangents,
const bool cyclic,
MutableSpan<float3> r_normals)
{
+ using namespace blender::math;
BLI_assert(r_normals.size() == tangents.size());
if (r_normals.is_empty()) {
@@ -327,7 +340,7 @@ static void calculate_normals_minimum(Span<float3> tangents,
r_normals[0] = {1.0f, 0.0f, 0.0f};
}
else {
- r_normals[0] = float3(first_tangent.y, -first_tangent.x, 0.0f).normalized();
+ r_normals[0] = normalize(float3(first_tangent.y, -first_tangent.x, 0.0f));
}
/* Forward normal with minimum twist along the entire spline. */
@@ -417,7 +430,9 @@ Spline::LookupResult Spline::lookup_evaluated_length(const float length) const
const int next_index = (index == this->evaluated_points_size() - 1) ? 0 : index + 1;
const float previous_length = (index == 0) ? 0.0f : lengths[index - 1];
- const float factor = (length - previous_length) / (lengths[index] - previous_length);
+ const float length_in_segment = length - previous_length;
+ const float segment_length = lengths[index] - previous_length;
+ const float factor = segment_length == 0.0f ? 0.0f : length_in_segment / segment_length;
return LookupResult{index, next_index, factor};
}
diff --git a/source/blender/blenkernel/intern/spline_bezier.cc b/source/blender/blenkernel/intern/spline_bezier.cc
index 9ce285cebb8..980437014b1 100644
--- a/source/blender/blenkernel/intern/spline_bezier.cc
+++ b/source/blender/blenkernel/intern/spline_bezier.cc
@@ -70,24 +70,6 @@ void BezierSpline::set_resolution(const int value)
this->mark_cache_invalid();
}
-void BezierSpline::add_point(const float3 position,
- const HandleType handle_type_left,
- const float3 handle_position_left,
- const HandleType handle_type_right,
- const float3 handle_position_right,
- const float radius,
- const float tilt)
-{
- handle_types_left_.append(handle_type_left);
- handle_positions_left_.append(handle_position_left);
- positions_.append(position);
- handle_types_right_.append(handle_type_right);
- handle_positions_right_.append(handle_position_right);
- radii_.append(radius);
- tilts_.append(tilt);
- this->mark_cache_invalid();
-}
-
void BezierSpline::resize(const int size)
{
handle_types_left_.resize(size);
@@ -217,11 +199,13 @@ void BezierSpline::ensure_auto_handles() const
}
for (const int i : IndexRange(this->size())) {
+ using namespace blender;
+
if (ELEM(HandleType::Auto, handle_types_left_[i], handle_types_right_[i])) {
const float3 prev_diff = positions_[i] - previous_position(positions_, is_cyclic_, i);
const float3 next_diff = next_position(positions_, is_cyclic_, i) - positions_[i];
- float prev_len = prev_diff.length();
- float next_len = next_diff.length();
+ float prev_len = math::length(prev_diff);
+ float next_len = math::length(next_diff);
if (prev_len == 0.0f) {
prev_len = 1.0f;
}
@@ -231,7 +215,7 @@ void BezierSpline::ensure_auto_handles() const
const float3 dir = next_diff / next_len + prev_diff / prev_len;
/* This magic number is unfortunate, but comes from elsewhere in Blender. */
- const float len = dir.length() * 2.5614f;
+ const float len = math::length(dir) * 2.5614f;
if (len != 0.0f) {
if (handle_types_left_[i] == HandleType::Auto) {
const float prev_len_clamped = std::min(prev_len, next_len * 5.0f);
@@ -246,12 +230,12 @@ void BezierSpline::ensure_auto_handles() const
if (handle_types_left_[i] == HandleType::Vector) {
const float3 prev = previous_position(positions_, is_cyclic_, i);
- handle_positions_left_[i] = float3::interpolate(positions_[i], prev, 1.0f / 3.0f);
+ handle_positions_left_[i] = math::interpolate(positions_[i], prev, 1.0f / 3.0f);
}
if (handle_types_right_[i] == HandleType::Vector) {
const float3 next = next_position(positions_, is_cyclic_, i);
- handle_positions_right_[i] = float3::interpolate(positions_[i], next, 1.0f / 3.0f);
+ handle_positions_right_[i] = math::interpolate(positions_[i], next, 1.0f / 3.0f);
}
}
@@ -293,6 +277,8 @@ static void set_handle_position(const float3 &position,
float3 &handle,
float3 &handle_other)
{
+ using namespace blender::math;
+
/* Don't bother when the handle positions are calculated automatically anyway. */
if (ELEM(type, BezierSpline::HandleType::Auto, BezierSpline::HandleType::Vector)) {
return;
@@ -301,9 +287,9 @@ static void set_handle_position(const float3 &position,
handle = new_value;
if (type_other == BezierSpline::HandleType::Align) {
/* Keep track of the old length of the opposite handle. */
- const float length = float3::distance(handle_other, position);
+ const float length = distance(handle_other, position);
/* Set the other handle to directly opposite from the current handle. */
- const float3 dir = (handle - position).normalized();
+ const float3 dir = normalize(handle - position);
handle_other = position - dir * length;
}
}
@@ -371,6 +357,7 @@ int BezierSpline::evaluated_points_size() const
void BezierSpline::correct_end_tangents() const
{
+ using namespace blender::math;
if (is_cyclic_) {
return;
}
@@ -378,10 +365,10 @@ void BezierSpline::correct_end_tangents() const
MutableSpan<float3> tangents(evaluated_tangents_cache_);
if (handle_positions_right_.first() != positions_.first()) {
- tangents.first() = (handle_positions_right_.first() - positions_.first()).normalized();
+ tangents.first() = normalize(handle_positions_right_.first() - positions_.first());
}
if (handle_positions_left_.last() != positions_.last()) {
- tangents.last() = (positions_.last() - handle_positions_left_.last()).normalized();
+ tangents.last() = normalize(positions_.last() - handle_positions_left_.last());
}
}
@@ -389,20 +376,22 @@ BezierSpline::InsertResult BezierSpline::calculate_segment_insertion(const int i
const int next_index,
const float parameter)
{
+ using namespace blender::math;
+
BLI_assert(parameter <= 1.0f && parameter >= 0.0f);
BLI_assert(next_index == 0 || next_index == index + 1);
const float3 &point_prev = positions_[index];
const float3 &handle_prev = handle_positions_right_[index];
const float3 &handle_next = handle_positions_left_[next_index];
const float3 &point_next = positions_[next_index];
- const float3 center_point = float3::interpolate(handle_prev, handle_next, parameter);
+ const float3 center_point = interpolate(handle_prev, handle_next, parameter);
BezierSpline::InsertResult result;
- result.handle_prev = float3::interpolate(point_prev, handle_prev, parameter);
- result.handle_next = float3::interpolate(handle_next, point_next, parameter);
- result.left_handle = float3::interpolate(result.handle_prev, center_point, parameter);
- result.right_handle = float3::interpolate(center_point, result.handle_next, parameter);
- result.position = float3::interpolate(result.left_handle, result.right_handle, parameter);
+ result.handle_prev = interpolate(point_prev, handle_prev, parameter);
+ result.handle_next = interpolate(handle_next, point_next, parameter);
+ result.left_handle = interpolate(result.handle_prev, center_point, parameter);
+ result.right_handle = interpolate(center_point, result.handle_next, parameter);
+ result.position = interpolate(result.left_handle, result.right_handle, parameter);
return result;
}
diff --git a/source/blender/blenkernel/intern/spline_nurbs.cc b/source/blender/blenkernel/intern/spline_nurbs.cc
index 69afb82baa8..5993b9a9a27 100644
--- a/source/blender/blenkernel/intern/spline_nurbs.cc
+++ b/source/blender/blenkernel/intern/spline_nurbs.cc
@@ -81,19 +81,6 @@ void NURBSpline::set_order(const uint8_t value)
this->mark_cache_invalid();
}
-void NURBSpline::add_point(const float3 position,
- const float radius,
- const float tilt,
- const float weight)
-{
- positions_.append(position);
- radii_.append(radius);
- tilts_.append(tilt);
- weights_.append(weight);
- knots_dirty_ = true;
- this->mark_cache_invalid();
-}
-
void NURBSpline::resize(const int size)
{
positions_.resize(size);
@@ -192,78 +179,48 @@ int NURBSpline::knots_size() const
void NURBSpline::calculate_knots() const
{
const KnotsMode mode = this->knots_mode;
- const int length = this->size();
const int order = order_;
+ const bool is_bezier = mode == NURBSpline::KnotsMode::Bezier;
+ const bool is_end_point = mode == NURBSpline::KnotsMode::EndPoint;
+ /* Inner knots are always repeated once except on Bezier case. */
+ const int repeat_inner = is_bezier ? order - 1 : 1;
+ /* How many times to repeat 0.0 at the beginning of knot. */
+ const int head = is_end_point && !is_cyclic_ ? order : (is_bezier ? order / 2 : 1);
+ /* Number of knots replicating widths of the starting knots.
+ * Covers both Cyclic and EndPoint cases. */
+ const int tail = is_cyclic_ ? 2 * order - 1 : (is_end_point ? order : 0);
knots_.resize(this->knots_size());
-
MutableSpan<float> knots = knots_;
- if (mode == NURBSpline::KnotsMode::Normal || is_cyclic_) {
- for (const int i : knots.index_range()) {
- knots[i] = static_cast<float>(i);
- }
- }
- else if (mode == NURBSpline::KnotsMode::EndPoint) {
- float k = 0.0f;
- for (const int i : IndexRange(1, knots.size())) {
- knots[i - 1] = k;
- if (i >= order && i <= length) {
- k += 1.0f;
- }
- }
- }
- else if (mode == NURBSpline::KnotsMode::Bezier) {
- BLI_assert(ELEM(order, 3, 4));
- if (order == 3) {
- float k = 0.6f;
- for (const int i : knots.index_range()) {
- if (i >= order && i <= length) {
- k += 0.5f;
- }
- knots[i] = std::floor(k);
- }
- }
- else {
- float k = 0.34f;
- for (const int i : knots.index_range()) {
- knots[i] = std::floor(k);
- k += 1.0f / 3.0f;
- }
- }
- }
+ int r = head;
+ float current = 0.0f;
- if (is_cyclic_) {
- const int b = length + order - 1;
- if (order > 2) {
- for (const int i : IndexRange(1, order - 2)) {
- if (knots[b] != knots[b - i]) {
- if (i == order - 1) {
- knots[length + order - 2] += 1.0f;
- break;
- }
- }
- }
+ for (const int i : IndexRange(knots.size() - tail)) {
+ knots[i] = current;
+ r--;
+ if (r == 0) {
+ current += 1.0;
+ r = repeat_inner;
}
+ }
- int c = order;
- for (int i = b; i < this->knots_size(); i++) {
- knots[i] = knots[i - 1] + (knots[c] - knots[c - 1]);
- c--;
- }
+ const int tail_index = knots.size() - tail;
+ for (const int i : IndexRange(tail)) {
+ knots[tail_index + i] = current + (knots[i] - knots[0]);
}
}
Span<float> NURBSpline::knots() const
{
if (!knots_dirty_) {
- BLI_assert(knots_.size() == this->size() + order_);
+ BLI_assert(knots_.size() == this->knots_size());
return knots_;
}
std::lock_guard lock{knots_mutex_};
if (!knots_dirty_) {
- BLI_assert(knots_.size() == this->size() + order_);
+ BLI_assert(knots_.size() == this->knots_size());
return knots_;
}
diff --git a/source/blender/blenkernel/intern/spline_poly.cc b/source/blender/blenkernel/intern/spline_poly.cc
index 4af68b5f270..480bbd1dfe8 100644
--- a/source/blender/blenkernel/intern/spline_poly.cc
+++ b/source/blender/blenkernel/intern/spline_poly.cc
@@ -45,14 +45,6 @@ int PolySpline::size() const
return size;
}
-void PolySpline::add_point(const float3 position, const float radius, const float tilt)
-{
- positions_.append(position);
- radii_.append(radius);
- tilts_.append(tilt);
- this->mark_cache_invalid();
-}
-
void PolySpline::resize(const int size)
{
positions_.resize(size);
diff --git a/source/blender/blenkernel/intern/subdiv.c b/source/blender/blenkernel/intern/subdiv.c
index fd32f52351a..45810e29565 100644
--- a/source/blender/blenkernel/intern/subdiv.c
+++ b/source/blender/blenkernel/intern/subdiv.c
@@ -29,6 +29,9 @@
#include "BLI_utildefines.h"
+#include "BKE_modifier.h"
+#include "BKE_subdiv_modifier.h"
+
#include "MEM_guardedalloc.h"
#include "subdiv_converter.h"
@@ -189,6 +192,12 @@ Subdiv *BKE_subdiv_update_from_mesh(Subdiv *subdiv,
void BKE_subdiv_free(Subdiv *subdiv)
{
if (subdiv->evaluator != NULL) {
+ const eOpenSubdivEvaluator evaluator_type = subdiv->evaluator->type;
+ if (evaluator_type != OPENSUBDIV_EVALUATOR_CPU) {
+ /* Let the draw code do the freeing, to ensure that the OpenGL context is valid. */
+ BKE_subsurf_modifier_free_gpu_cache_cb(subdiv);
+ return;
+ }
openSubdiv_deleteEvaluator(subdiv->evaluator);
}
if (subdiv->topology_refiner != NULL) {
@@ -214,12 +223,13 @@ int *BKE_subdiv_face_ptex_offset_get(Subdiv *subdiv)
}
const int num_coarse_faces = topology_refiner->getNumFaces(topology_refiner);
subdiv->cache_.face_ptex_offset = MEM_malloc_arrayN(
- num_coarse_faces, sizeof(int), "subdiv face_ptex_offset");
+ num_coarse_faces + 1, sizeof(int), "subdiv face_ptex_offset");
int ptex_offset = 0;
for (int face_index = 0; face_index < num_coarse_faces; face_index++) {
const int num_ptex_faces = topology_refiner->getNumFacePtexFaces(topology_refiner, face_index);
subdiv->cache_.face_ptex_offset[face_index] = ptex_offset;
ptex_offset += num_ptex_faces;
}
+ subdiv->cache_.face_ptex_offset[num_coarse_faces] = ptex_offset;
return subdiv->cache_.face_ptex_offset;
}
diff --git a/source/blender/blenkernel/intern/subdiv_ccg.c b/source/blender/blenkernel/intern/subdiv_ccg.c
index 77962ec924c..7d876acf776 100644
--- a/source/blender/blenkernel/intern/subdiv_ccg.c
+++ b/source/blender/blenkernel/intern/subdiv_ccg.c
@@ -603,7 +603,8 @@ Mesh *BKE_subdiv_to_ccg_mesh(Subdiv *subdiv,
{
/* Make sure evaluator is ready. */
BKE_subdiv_stats_begin(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_CCG);
- if (!BKE_subdiv_eval_begin_from_mesh(subdiv, coarse_mesh, NULL)) {
+ if (!BKE_subdiv_eval_begin_from_mesh(
+ subdiv, coarse_mesh, NULL, SUBDIV_EVALUATOR_TYPE_CPU, NULL)) {
if (coarse_mesh->totpoly) {
return NULL;
}
diff --git a/source/blender/blenkernel/intern/subdiv_converter_mesh.c b/source/blender/blenkernel/intern/subdiv_converter_mesh.c
index 41fc28c5d52..fc7ef887879 100644
--- a/source/blender/blenkernel/intern/subdiv_converter_mesh.c
+++ b/source/blender/blenkernel/intern/subdiv_converter_mesh.c
@@ -40,6 +40,8 @@
#include "opensubdiv_capi.h"
#include "opensubdiv_converter_capi.h"
+#include "bmesh_class.h"
+
/* Enable work-around for non-working CPU evaluator when using bilinear scheme.
* This forces Catmark scheme with all edges marked as infinitely sharp. */
#define BUGGY_SIMPLE_SCHEME_WORKAROUND 1
@@ -47,6 +49,8 @@
typedef struct ConverterStorage {
SubdivSettings settings;
const Mesh *mesh;
+ /* CustomData layer for vertex sharpnesses. */
+ const float *cd_vertex_crease;
/* Indexed by loop index, value denotes index of face-varying vertex
* which corresponds to the UV coordinate.
*/
@@ -168,7 +172,7 @@ static float get_edge_sharpness(const OpenSubdiv_Converter *converter, int manif
}
const int edge_index = storage->manifold_edge_index_reverse[manifold_edge_index];
const MEdge *medge = storage->mesh->medge;
- return BKE_subdiv_edge_crease_to_sharpness_char(medge[edge_index].crease);
+ return BKE_subdiv_crease_to_sharpness_char(medge[edge_index].crease);
}
static bool is_infinite_sharp_vertex(const OpenSubdiv_Converter *converter,
@@ -184,14 +188,14 @@ static bool is_infinite_sharp_vertex(const OpenSubdiv_Converter *converter,
return BLI_BITMAP_TEST_BOOL(storage->infinite_sharp_vertices_map, vertex_index);
}
-static float get_vertex_sharpness(const OpenSubdiv_Converter *converter,
- int UNUSED(manifold_vertex_index))
+static float get_vertex_sharpness(const OpenSubdiv_Converter *converter, int manifold_vertex_index)
{
ConverterStorage *storage = converter->user_data;
- if (!storage->settings.use_creases) {
+ if (!storage->settings.use_creases || storage->cd_vertex_crease == NULL) {
return 0.0f;
}
- return 0.0f;
+ const int vertex_index = storage->manifold_vertex_index_reverse[manifold_vertex_index];
+ return BKE_subdiv_crease_to_sharpness_f(storage->cd_vertex_crease[vertex_index]);
}
static int get_num_uv_layers(const OpenSubdiv_Converter *converter)
@@ -393,6 +397,7 @@ static void init_user_data(OpenSubdiv_Converter *converter,
ConverterStorage *user_data = MEM_mallocN(sizeof(ConverterStorage), __func__);
user_data->settings = *settings;
user_data->mesh = mesh;
+ user_data->cd_vertex_crease = CustomData_get_layer(&mesh->vdata, CD_CREASE);
user_data->loop_uv_indices = NULL;
initialize_manifold_indices(user_data);
converter->user_data = user_data;
diff --git a/source/blender/blenkernel/intern/subdiv_deform.c b/source/blender/blenkernel/intern/subdiv_deform.c
index 7a2d639e4e5..c385b1b291d 100644
--- a/source/blender/blenkernel/intern/subdiv_deform.c
+++ b/source/blender/blenkernel/intern/subdiv_deform.c
@@ -117,7 +117,8 @@ static bool subdiv_mesh_topology_info(const SubdivForeachContext *foreach_contex
const int UNUSED(num_vertices),
const int UNUSED(num_edges),
const int UNUSED(num_loops),
- const int UNUSED(num_polygons))
+ const int UNUSED(num_polygons),
+ const int *UNUSED(subdiv_polygon_offset))
{
SubdivDeformContext *subdiv_context = foreach_context->user_data;
subdiv_mesh_prepare_accumulator(subdiv_context, subdiv_context->coarse_mesh->totvert);
@@ -202,7 +203,8 @@ void BKE_subdiv_deform_coarse_vertices(struct Subdiv *subdiv,
BKE_subdiv_stats_begin(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_MESH);
/* Make sure evaluator is up to date with possible new topology, and that
* is refined for the new positions of coarse vertices. */
- if (!BKE_subdiv_eval_begin_from_mesh(subdiv, coarse_mesh, vertex_cos)) {
+ if (!BKE_subdiv_eval_begin_from_mesh(
+ subdiv, coarse_mesh, vertex_cos, SUBDIV_EVALUATOR_TYPE_CPU, NULL)) {
/* This could happen in two situations:
* - OpenSubdiv is disabled.
* - Something totally bad happened, and OpenSubdiv rejected our
diff --git a/source/blender/blenkernel/intern/subdiv_eval.c b/source/blender/blenkernel/intern/subdiv_eval.c
index 0001eb8a205..c2f7581637b 100644
--- a/source/blender/blenkernel/intern/subdiv_eval.c
+++ b/source/blender/blenkernel/intern/subdiv_eval.c
@@ -28,6 +28,7 @@
#include "BLI_bitmap.h"
#include "BLI_math_vector.h"
+#include "BLI_task.h"
#include "BLI_utildefines.h"
#include "BKE_customdata.h"
@@ -38,7 +39,28 @@
#include "opensubdiv_evaluator_capi.h"
#include "opensubdiv_topology_refiner_capi.h"
-bool BKE_subdiv_eval_begin(Subdiv *subdiv)
+/* ============================ Helper Function ============================ */
+
+static eOpenSubdivEvaluator opensubdiv_evalutor_from_subdiv_evaluator_type(
+ eSubdivEvaluatorType evaluator_type)
+{
+ switch (evaluator_type) {
+ case SUBDIV_EVALUATOR_TYPE_CPU: {
+ return OPENSUBDIV_EVALUATOR_CPU;
+ }
+ case SUBDIV_EVALUATOR_TYPE_GLSL_COMPUTE: {
+ return OPENSUBDIV_EVALUATOR_GLSL_COMPUTE;
+ }
+ }
+ BLI_assert_msg(0, "Unknown evaluator type");
+ return OPENSUBDIV_EVALUATOR_CPU;
+}
+
+/* ====================== Main Subdivision Evaluation ====================== */
+
+bool BKE_subdiv_eval_begin(Subdiv *subdiv,
+ eSubdivEvaluatorType evaluator_type,
+ OpenSubdiv_EvaluatorCache *evaluator_cache)
{
BKE_subdiv_stats_reset(&subdiv->stats, SUBDIV_STATS_EVALUATOR_CREATE);
if (subdiv->topology_refiner == NULL) {
@@ -47,8 +69,11 @@ bool BKE_subdiv_eval_begin(Subdiv *subdiv)
return false;
}
if (subdiv->evaluator == NULL) {
+ eOpenSubdivEvaluator opensubdiv_evaluator_type =
+ opensubdiv_evalutor_from_subdiv_evaluator_type(evaluator_type);
BKE_subdiv_stats_begin(&subdiv->stats, SUBDIV_STATS_EVALUATOR_CREATE);
- subdiv->evaluator = openSubdiv_createEvaluatorFromTopologyRefiner(subdiv->topology_refiner);
+ subdiv->evaluator = openSubdiv_createEvaluatorFromTopologyRefiner(
+ subdiv->topology_refiner, opensubdiv_evaluator_type, evaluator_cache);
BKE_subdiv_stats_end(&subdiv->stats, SUBDIV_STATS_EVALUATOR_CREATE);
if (subdiv->evaluator == NULL) {
return false;
@@ -80,6 +105,9 @@ static void set_coarse_positions(Subdiv *subdiv,
BLI_BITMAP_ENABLE(vertex_used_map, loop->v);
}
}
+ /* Use a temporary buffer so we do not upload vertices one at a time to the GPU. */
+ float(*buffer)[3] = MEM_mallocN(sizeof(float[3]) * mesh->totvert, "subdiv tmp coarse positions");
+ int manifold_vertex_count = 0;
for (int vertex_index = 0, manifold_vertex_index = 0; vertex_index < mesh->totvert;
vertex_index++) {
if (!BLI_BITMAP_TEST_BOOL(vertex_used_map, vertex_index)) {
@@ -93,13 +121,49 @@ static void set_coarse_positions(Subdiv *subdiv,
const MVert *vertex = &mvert[vertex_index];
vertex_co = vertex->co;
}
- subdiv->evaluator->setCoarsePositions(subdiv->evaluator, vertex_co, manifold_vertex_index, 1);
+ copy_v3_v3(&buffer[manifold_vertex_index][0], vertex_co);
manifold_vertex_index++;
+ manifold_vertex_count++;
}
+ subdiv->evaluator->setCoarsePositions(
+ subdiv->evaluator, &buffer[0][0], 0, manifold_vertex_count);
MEM_freeN(vertex_used_map);
+ MEM_freeN(buffer);
+}
+
+/* Context which is used to fill face varying data in parallel. */
+typedef struct FaceVaryingDataFromUVContext {
+ OpenSubdiv_TopologyRefiner *topology_refiner;
+ const Mesh *mesh;
+ const MLoopUV *mloopuv;
+ float (*buffer)[2];
+ int layer_index;
+} FaceVaryingDataFromUVContext;
+
+static void set_face_varying_data_from_uv_task(void *__restrict userdata,
+ const int face_index,
+ const TaskParallelTLS *__restrict UNUSED(tls))
+{
+ FaceVaryingDataFromUVContext *ctx = userdata;
+ OpenSubdiv_TopologyRefiner *topology_refiner = ctx->topology_refiner;
+ const int layer_index = ctx->layer_index;
+ const Mesh *mesh = ctx->mesh;
+ const MPoly *mpoly = &mesh->mpoly[face_index];
+ const MLoopUV *mluv = &ctx->mloopuv[mpoly->loopstart];
+
+ /* TODO(sergey): OpenSubdiv's C-API converter can change winding of
+ * loops of a face, need to watch for that, to prevent wrong UVs assigned.
+ */
+ const int num_face_vertices = topology_refiner->getNumFaceVertices(topology_refiner, face_index);
+ const int *uv_indices = topology_refiner->getFaceFVarValueIndices(
+ topology_refiner, face_index, layer_index);
+ for (int vertex_index = 0; vertex_index < num_face_vertices; vertex_index++, mluv++) {
+ copy_v2_v2(ctx->buffer[uv_indices[vertex_index]], mluv->uv);
+ }
}
static void set_face_varying_data_from_uv(Subdiv *subdiv,
+ const Mesh *mesh,
const MLoopUV *mloopuv,
const int layer_index)
{
@@ -107,25 +171,37 @@ static void set_face_varying_data_from_uv(Subdiv *subdiv,
OpenSubdiv_Evaluator *evaluator = subdiv->evaluator;
const int num_faces = topology_refiner->getNumFaces(topology_refiner);
const MLoopUV *mluv = mloopuv;
- /* TODO(sergey): OpenSubdiv's C-API converter can change winding of
- * loops of a face, need to watch for that, to prevent wrong UVs assigned.
- */
- for (int face_index = 0; face_index < num_faces; face_index++) {
- const int num_face_vertices = topology_refiner->getNumFaceVertices(topology_refiner,
- face_index);
- const int *uv_indices = topology_refiner->getFaceFVarValueIndices(
- topology_refiner, face_index, layer_index);
- for (int vertex_index = 0; vertex_index < num_face_vertices; vertex_index++, mluv++) {
- evaluator->setFaceVaryingData(evaluator, layer_index, mluv->uv, uv_indices[vertex_index], 1);
- }
- }
+
+ const int num_fvar_values = topology_refiner->getNumFVarValues(topology_refiner, layer_index);
+ /* Use a temporary buffer so we do not upload UVs one at a time to the GPU. */
+ float(*buffer)[2] = MEM_mallocN(sizeof(float[2]) * num_fvar_values, "temp UV storage");
+
+ FaceVaryingDataFromUVContext ctx;
+ ctx.topology_refiner = topology_refiner;
+ ctx.layer_index = layer_index;
+ ctx.mloopuv = mluv;
+ ctx.mesh = mesh;
+ ctx.buffer = buffer;
+
+ TaskParallelSettings parallel_range_settings;
+ BLI_parallel_range_settings_defaults(&parallel_range_settings);
+ parallel_range_settings.min_iter_per_thread = 1;
+
+ BLI_task_parallel_range(
+ 0, num_faces, &ctx, set_face_varying_data_from_uv_task, &parallel_range_settings);
+
+ evaluator->setFaceVaryingData(evaluator, layer_index, &buffer[0][0], 0, num_fvar_values);
+
+ MEM_freeN(buffer);
}
bool BKE_subdiv_eval_begin_from_mesh(Subdiv *subdiv,
const Mesh *mesh,
- const float (*coarse_vertex_cos)[3])
+ const float (*coarse_vertex_cos)[3],
+ eSubdivEvaluatorType evaluator_type,
+ OpenSubdiv_EvaluatorCache *evaluator_cache)
{
- if (!BKE_subdiv_eval_begin(subdiv)) {
+ if (!BKE_subdiv_eval_begin(subdiv, evaluator_type, evaluator_cache)) {
return false;
}
return BKE_subdiv_eval_refine_from_mesh(subdiv, mesh, coarse_vertex_cos);
@@ -146,7 +222,7 @@ bool BKE_subdiv_eval_refine_from_mesh(Subdiv *subdiv,
const int num_uv_layers = CustomData_number_of_layers(&mesh->ldata, CD_MLOOPUV);
for (int layer_index = 0; layer_index < num_uv_layers; layer_index++) {
const MLoopUV *mloopuv = CustomData_get_layer_n(&mesh->ldata, CD_MLOOPUV, layer_index);
- set_face_varying_data_from_uv(subdiv, mloopuv, layer_index);
+ set_face_varying_data_from_uv(subdiv, mesh, mloopuv, layer_index);
}
/* Update evaluator to the new coarse geometry. */
BKE_subdiv_stats_begin(&subdiv->stats, SUBDIV_STATS_EVALUATOR_REFINE);
@@ -188,8 +264,8 @@ void BKE_subdiv_eval_limit_point_and_derivatives(Subdiv *subdiv,
* This happens, for example, in single vertex on Suzannne's nose (where two quads have 2 common
* edges).
*
- * This makes tangent space displacement (such as multires) impossible to be used in those
- * vertices, so those needs to be addressed in one way or another.
+ * This makes tangent space displacement (such as multi-resolution) impossible to be used in
+ * those vertices, so those needs to be addressed in one way or another.
*
* Simplest thing to do: step inside of the face a little bit, where there is known patch at
* which there must be proper derivatives. This might break continuity of normals, but is better
@@ -221,18 +297,6 @@ void BKE_subdiv_eval_limit_point_and_normal(Subdiv *subdiv,
normalize_v3(r_N);
}
-void BKE_subdiv_eval_limit_point_and_short_normal(Subdiv *subdiv,
- const int ptex_face_index,
- const float u,
- const float v,
- float r_P[3],
- short r_N[3])
-{
- float N_float[3];
- BKE_subdiv_eval_limit_point_and_normal(subdiv, ptex_face_index, u, v, r_P, N_float);
- normal_float_to_short_v3(r_N, N_float);
-}
-
void BKE_subdiv_eval_face_varying(Subdiv *subdiv,
const int face_varying_channel,
const int ptex_face_index,
@@ -273,125 +337,3 @@ void BKE_subdiv_eval_final_point(
BKE_subdiv_eval_limit_point(subdiv, ptex_face_index, u, v, r_P);
}
}
-
-/* =================== Patch queries at given resolution =================== */
-
-/* Move buffer forward by a given number of bytes. */
-static void buffer_apply_offset(void **buffer, const int offset)
-{
- *buffer = ((unsigned char *)*buffer) + offset;
-}
-
-/* Write given number of floats to the beginning of given buffer. */
-static void buffer_write_float_value(void **buffer, const float *values_buffer, int num_values)
-{
- memcpy(*buffer, values_buffer, sizeof(float) * num_values);
-}
-
-/* Similar to above, just operates with short values. */
-static void buffer_write_short_value(void **buffer, const short *values_buffer, int num_values)
-{
- memcpy(*buffer, values_buffer, sizeof(short) * num_values);
-}
-
-void BKE_subdiv_eval_limit_patch_resolution_point(Subdiv *subdiv,
- const int ptex_face_index,
- const int resolution,
- void *buffer,
- const int offset,
- const int stride)
-{
- buffer_apply_offset(&buffer, offset);
- const float inv_resolution_1 = 1.0f / (float)(resolution - 1);
- for (int y = 0; y < resolution; y++) {
- const float v = y * inv_resolution_1;
- for (int x = 0; x < resolution; x++) {
- const float u = x * inv_resolution_1;
- BKE_subdiv_eval_limit_point(subdiv, ptex_face_index, u, v, buffer);
- buffer_apply_offset(&buffer, stride);
- }
- }
-}
-
-void BKE_subdiv_eval_limit_patch_resolution_point_and_derivatives(Subdiv *subdiv,
- const int ptex_face_index,
- const int resolution,
- void *point_buffer,
- const int point_offset,
- const int point_stride,
- void *du_buffer,
- const int du_offset,
- const int du_stride,
- void *dv_buffer,
- const int dv_offset,
- const int dv_stride)
-{
- buffer_apply_offset(&point_buffer, point_offset);
- buffer_apply_offset(&du_buffer, du_offset);
- buffer_apply_offset(&dv_buffer, dv_offset);
- const float inv_resolution_1 = 1.0f / (float)(resolution - 1);
- for (int y = 0; y < resolution; y++) {
- const float v = y * inv_resolution_1;
- for (int x = 0; x < resolution; x++) {
- const float u = x * inv_resolution_1;
- BKE_subdiv_eval_limit_point_and_derivatives(
- subdiv, ptex_face_index, u, v, point_buffer, du_buffer, dv_buffer);
- buffer_apply_offset(&point_buffer, point_stride);
- buffer_apply_offset(&du_buffer, du_stride);
- buffer_apply_offset(&dv_buffer, dv_stride);
- }
- }
-}
-
-void BKE_subdiv_eval_limit_patch_resolution_point_and_normal(Subdiv *subdiv,
- const int ptex_face_index,
- const int resolution,
- void *point_buffer,
- const int point_offset,
- const int point_stride,
- void *normal_buffer,
- const int normal_offset,
- const int normal_stride)
-{
- buffer_apply_offset(&point_buffer, point_offset);
- buffer_apply_offset(&normal_buffer, normal_offset);
- const float inv_resolution_1 = 1.0f / (float)(resolution - 1);
- for (int y = 0; y < resolution; y++) {
- const float v = y * inv_resolution_1;
- for (int x = 0; x < resolution; x++) {
- const float u = x * inv_resolution_1;
- float normal[3];
- BKE_subdiv_eval_limit_point_and_normal(subdiv, ptex_face_index, u, v, point_buffer, normal);
- buffer_write_float_value(&normal_buffer, normal, 3);
- buffer_apply_offset(&point_buffer, point_stride);
- buffer_apply_offset(&normal_buffer, normal_stride);
- }
- }
-}
-
-void BKE_subdiv_eval_limit_patch_resolution_point_and_short_normal(Subdiv *subdiv,
- const int ptex_face_index,
- const int resolution,
- void *point_buffer,
- const int point_offset,
- const int point_stride,
- void *normal_buffer,
- const int normal_offset,
- const int normal_stride)
-{
- buffer_apply_offset(&point_buffer, point_offset);
- buffer_apply_offset(&normal_buffer, normal_offset);
- const float inv_resolution_1 = 1.0f / (float)(resolution - 1);
- for (int y = 0; y < resolution; y++) {
- const float v = y * inv_resolution_1;
- for (int x = 0; x < resolution; x++) {
- const float u = x * inv_resolution_1;
- short normal[3];
- BKE_subdiv_eval_limit_point_and_short_normal(
- subdiv, ptex_face_index, u, v, point_buffer, normal);
- buffer_write_short_value(&normal_buffer, normal, 3);
- buffer_apply_offset(&point_buffer, point_stride);
- buffer_apply_offset(&normal_buffer, normal_stride);
- }
- }
-}
diff --git a/source/blender/blenkernel/intern/subdiv_foreach.c b/source/blender/blenkernel/intern/subdiv_foreach.c
index 061c196df2a..69bead27fe6 100644
--- a/source/blender/blenkernel/intern/subdiv_foreach.c
+++ b/source/blender/blenkernel/intern/subdiv_foreach.c
@@ -1877,7 +1877,8 @@ bool BKE_subdiv_foreach_subdiv_geometry(Subdiv *subdiv,
ctx.num_subdiv_vertices,
ctx.num_subdiv_edges,
ctx.num_subdiv_loops,
- ctx.num_subdiv_polygons)) {
+ ctx.num_subdiv_polygons,
+ ctx.subdiv_polygon_offset)) {
subdiv_foreach_ctx_free(&ctx);
return false;
}
diff --git a/source/blender/blenkernel/intern/subdiv_inline.h b/source/blender/blenkernel/intern/subdiv_inline.h
index ba45d0a4997..d52adff1e61 100644
--- a/source/blender/blenkernel/intern/subdiv_inline.h
+++ b/source/blender/blenkernel/intern/subdiv_inline.h
@@ -103,13 +103,13 @@ BLI_INLINE void BKE_subdiv_rotate_grid_to_quad(
}
}
-BLI_INLINE float BKE_subdiv_edge_crease_to_sharpness_f(float edge_crease)
+BLI_INLINE float BKE_subdiv_crease_to_sharpness_f(float edge_crease)
{
return edge_crease * edge_crease * 10.0f;
}
-BLI_INLINE float BKE_subdiv_edge_crease_to_sharpness_char(char edge_crease)
+BLI_INLINE float BKE_subdiv_crease_to_sharpness_char(char edge_crease)
{
const float edge_crease_f = edge_crease / 255.0f;
- return BKE_subdiv_edge_crease_to_sharpness_f(edge_crease_f);
+ return BKE_subdiv_crease_to_sharpness_f(edge_crease_f);
}
diff --git a/source/blender/blenkernel/intern/subdiv_mesh.c b/source/blender/blenkernel/intern/subdiv_mesh.c
index e5c7d13edab..c334d9a2c33 100644
--- a/source/blender/blenkernel/intern/subdiv_mesh.c
+++ b/source/blender/blenkernel/intern/subdiv_mesh.c
@@ -21,6 +21,7 @@
* \ingroup bke
*/
+#include "BKE_mesh.h"
#include "BKE_subdiv_mesh.h"
#include "atomic_ops.h"
@@ -58,23 +59,8 @@ typedef struct SubdivMeshContext {
/* UV layers interpolation. */
int num_uv_layers;
MLoopUV *uv_layers[MAX_MTFACE];
- /* Accumulated values.
- *
- * Averaging is happening for vertices along the coarse edges and corners.
- * This is needed for both displacement and normals.
- *
- * Displacement is being accumulated to a vertices coordinates, since those
- * are not needed during traversal of edge/corner vertices.
- *
- * For normals we are using dedicated array, since we can not use same
- * vertices (normals are `short`, which will cause a lot of precision
- * issues). */
- float (*accumulated_normals)[3];
/* Per-subdivided vertex counter of averaged values. */
int *accumulated_counters;
- /* Denotes whether normals can be evaluated from a limit surface. One case
- * when it's not possible is when displacement is used. */
- bool can_evaluate_normals;
bool have_displacement;
} SubdivMeshContext;
@@ -102,20 +88,12 @@ static void subdiv_mesh_ctx_cache_custom_data_layers(SubdivMeshContext *ctx)
static void subdiv_mesh_prepare_accumulator(SubdivMeshContext *ctx, int num_vertices)
{
- if (!ctx->can_evaluate_normals && !ctx->have_displacement) {
- return;
- }
- /* TODO(sergey): Technically, this is overallocating, we don't need memory
- * for an inner subdivision vertices. */
- ctx->accumulated_normals = MEM_calloc_arrayN(
- num_vertices, sizeof(*ctx->accumulated_normals), "subdiv accumulated normals");
ctx->accumulated_counters = MEM_calloc_arrayN(
num_vertices, sizeof(*ctx->accumulated_counters), "subdiv accumulated counters");
}
static void subdiv_mesh_context_free(SubdivMeshContext *ctx)
{
- MEM_SAFE_FREE(ctx->accumulated_normals);
MEM_SAFE_FREE(ctx->accumulated_counters);
}
@@ -450,48 +428,23 @@ static void subdiv_mesh_tls_free(void *tls_v)
/** \} */
-/* -------------------------------------------------------------------- */
-/** \name Evaluation helper functions
- * \{ */
-
-static void eval_final_point_and_vertex_normal(Subdiv *subdiv,
- const int ptex_face_index,
- const float u,
- const float v,
- float r_P[3],
- short r_N[3])
-{
- if (subdiv->displacement_evaluator == NULL) {
- BKE_subdiv_eval_limit_point_and_short_normal(subdiv, ptex_face_index, u, v, r_P, r_N);
- }
- else {
- BKE_subdiv_eval_final_point(subdiv, ptex_face_index, u, v, r_P);
- }
-}
-
/** \} */
/* -------------------------------------------------------------------- */
/** \name Accumulation helpers
* \{ */
-static void subdiv_accumulate_vertex_normal_and_displacement(SubdivMeshContext *ctx,
- const int ptex_face_index,
- const float u,
- const float v,
- MVert *subdiv_vert)
+static void subdiv_accumulate_vertex_displacement(SubdivMeshContext *ctx,
+ const int ptex_face_index,
+ const float u,
+ const float v,
+ MVert *subdiv_vert)
{
Subdiv *subdiv = ctx->subdiv;
const int subdiv_vertex_index = subdiv_vert - ctx->subdiv_mesh->mvert;
float dummy_P[3], dPdu[3], dPdv[3], D[3];
BKE_subdiv_eval_limit_point_and_derivatives(subdiv, ptex_face_index, u, v, dummy_P, dPdu, dPdv);
- /* Accumulate normal. */
- if (ctx->can_evaluate_normals) {
- float N[3];
- cross_v3_v3v3(N, dPdu, dPdv);
- normalize_v3(N);
- add_v3_v3(ctx->accumulated_normals[subdiv_vertex_index], N);
- }
+
/* Accumulate displacement if needed. */
if (ctx->have_displacement) {
/* NOTE: The subdivided mesh is allocated in this module, and its vertices are kept at zero
@@ -514,9 +467,10 @@ static bool subdiv_mesh_topology_info(const SubdivForeachContext *foreach_contex
const int num_vertices,
const int num_edges,
const int num_loops,
- const int num_polygons)
+ const int num_polygons,
+ const int *UNUSED(subdiv_polygon_offset))
{
- /* Multires grid data will be applied or become invalid after subdivision,
+ /* Multi-resolution grid data will be applied or become invalid after subdivision,
* so don't try to preserve it and use memory. */
CustomData_MeshMasks mask = CD_MASK_EVERYTHING;
mask.lmask &= ~CD_MASK_MULTIRES_GRIDS;
@@ -588,13 +542,6 @@ static void evaluate_vertex_and_apply_displacement_copy(const SubdivMeshContext
BKE_subdiv_eval_limit_point(ctx->subdiv, ptex_face_index, u, v, subdiv_vert->co);
/* Apply displacement. */
add_v3_v3(subdiv_vert->co, D);
- /* Copy normal from accumulated storage. */
- if (ctx->can_evaluate_normals) {
- float N[3];
- copy_v3_v3(N, ctx->accumulated_normals[subdiv_vertex_index]);
- normalize_v3(N);
- normal_float_to_short_v3(subdiv_vert->no, N);
- }
/* Remove facedot flag. This can happen if there is more than one subsurf modifier. */
subdiv_vert->flag &= ~ME_VERT_FACEDOT;
}
@@ -621,15 +568,6 @@ static void evaluate_vertex_and_apply_displacement_interpolate(
BKE_subdiv_eval_limit_point(ctx->subdiv, ptex_face_index, u, v, subdiv_vert->co);
/* Apply displacement. */
add_v3_v3(subdiv_vert->co, D);
- /* Copy normal from accumulated storage. */
- if (ctx->can_evaluate_normals) {
- const float inv_num_accumulated = 1.0f / ctx->accumulated_counters[subdiv_vertex_index];
- float N[3];
- copy_v3_v3(N, ctx->accumulated_normals[subdiv_vertex_index]);
- mul_v3_fl(N, inv_num_accumulated);
- normalize_v3(N);
- normal_float_to_short_v3(subdiv_vert->no, N);
- }
}
static void subdiv_mesh_vertex_every_corner_or_edge(const SubdivForeachContext *foreach_context,
@@ -643,7 +581,7 @@ static void subdiv_mesh_vertex_every_corner_or_edge(const SubdivForeachContext *
Mesh *subdiv_mesh = ctx->subdiv_mesh;
MVert *subdiv_mvert = subdiv_mesh->mvert;
MVert *subdiv_vert = &subdiv_mvert[subdiv_vertex_index];
- subdiv_accumulate_vertex_normal_and_displacement(ctx, ptex_face_index, u, v, subdiv_vert);
+ subdiv_accumulate_vertex_displacement(ctx, ptex_face_index, u, v, subdiv_vert);
}
static void subdiv_mesh_vertex_every_corner(const SubdivForeachContext *foreach_context,
@@ -792,8 +730,7 @@ static void subdiv_mesh_vertex_inner(const SubdivForeachContext *foreach_context
MVert *subdiv_vert = &subdiv_mvert[subdiv_vertex_index];
subdiv_mesh_ensure_vertex_interpolation(ctx, tls, coarse_poly, coarse_corner);
subdiv_vertex_data_interpolate(ctx, subdiv_vert, &tls->vertex_interpolation, u, v);
- eval_final_point_and_vertex_normal(
- subdiv, ptex_face_index, u, v, subdiv_vert->co, subdiv_vert->no);
+ BKE_subdiv_eval_final_point(subdiv, ptex_face_index, u, v, subdiv_vert->co);
subdiv_mesh_tag_center_vertex(coarse_poly, subdiv_vert, u, v);
}
@@ -1117,7 +1054,7 @@ static void subdiv_mesh_vertex_of_loose_edge(const struct SubdivForeachContext *
find_edge_neighbors(ctx, coarse_edge, neighbors);
/* Interpolate custom data when not an end point.
* This data has already been copied from the original vertex by #subdiv_mesh_vertex_loose. */
- if (u != 0.0 && u != 1.0) {
+ if (!ELEM(u, 0.0, 1.0)) {
subdiv_mesh_vertex_of_loose_edge_interpolate(ctx, coarse_edge, u, subdiv_vertex_index);
}
/* Interpolate coordinate. */
@@ -1140,12 +1077,6 @@ static void subdiv_mesh_vertex_of_loose_edge(const struct SubdivForeachContext *
/* TODO(sergey): This matches old behavior, but we can as well interpolate
* it. Maybe even using vertex varying attributes. */
subdiv_vertex->bweight = 0.0f;
- /* Reset normal, initialize it in a similar way as edit mode does for a
- * vertices adjacent to a loose edges.
- * See `mesh_evaluate#mesh_calc_normals_vert_fallback` */
- float no[3];
- normalize_v3_v3(no, subdiv_vertex->co);
- normal_float_to_short_v3(subdiv_vertex->no, no);
}
/** \} */
@@ -1160,8 +1091,8 @@ static void setup_foreach_callbacks(const SubdivMeshContext *subdiv_context,
memset(foreach_context, 0, sizeof(*foreach_context));
/* General information. */
foreach_context->topology_info = subdiv_mesh_topology_info;
- /* Every boundary geometry. Used for displacement and normals averaging. */
- if (subdiv_context->can_evaluate_normals || subdiv_context->have_displacement) {
+ /* Every boundary geometry. Used for displacement averaging. */
+ if (subdiv_context->have_displacement) {
foreach_context->vertex_every_corner = subdiv_mesh_vertex_every_corner;
foreach_context->vertex_every_edge = subdiv_mesh_vertex_every_edge;
}
@@ -1193,7 +1124,8 @@ Mesh *BKE_subdiv_to_mesh(Subdiv *subdiv,
BKE_subdiv_stats_begin(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_MESH);
/* Make sure evaluator is up to date with possible new topology, and that
* it is refined for the new positions of coarse vertices. */
- if (!BKE_subdiv_eval_begin_from_mesh(subdiv, coarse_mesh, NULL)) {
+ if (!BKE_subdiv_eval_begin_from_mesh(
+ subdiv, coarse_mesh, NULL, SUBDIV_EVALUATOR_TYPE_CPU, NULL)) {
/* This could happen in two situations:
* - OpenSubdiv is disabled.
* - Something totally bad happened, and OpenSubdiv rejected our
@@ -1210,8 +1142,6 @@ Mesh *BKE_subdiv_to_mesh(Subdiv *subdiv,
subdiv_context.coarse_mesh = coarse_mesh;
subdiv_context.subdiv = subdiv;
subdiv_context.have_displacement = (subdiv->displacement_evaluator != NULL);
- subdiv_context.can_evaluate_normals = !subdiv_context.have_displacement &&
- subdiv_context.subdiv->settings.is_adaptive;
/* Multi-threaded traversal/evaluation. */
BKE_subdiv_stats_begin(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_MESH_GEOMETRY);
SubdivForeachContext foreach_context;
@@ -1225,9 +1155,11 @@ Mesh *BKE_subdiv_to_mesh(Subdiv *subdiv,
Mesh *result = subdiv_context.subdiv_mesh;
// BKE_mesh_validate(result, true, true);
BKE_subdiv_stats_end(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_MESH);
- if (!subdiv_context.can_evaluate_normals) {
- BKE_mesh_normals_tag_dirty(result);
- }
+ /* Using normals from the limit surface gives different results than Blender's vertex normal
+ * calculation. Since vertex normals are supposed to be a consistent cache, don't bother
+ * calculating them here. The work may have been pointless anyway if the mesh is deformed or
+ * changed afterwards. */
+ BKE_mesh_normals_tag_dirty(result);
/* Free used memory. */
subdiv_mesh_context_free(&subdiv_context);
return result;
diff --git a/source/blender/blenkernel/intern/subdiv_modifier.c b/source/blender/blenkernel/intern/subdiv_modifier.c
new file mode 100644
index 00000000000..525c4837bc4
--- /dev/null
+++ b/source/blender/blenkernel/intern/subdiv_modifier.c
@@ -0,0 +1,160 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2021 by Blender Foundation.
+ * All rights reserved.
+ */
+
+#include "BKE_subdiv_modifier.h"
+
+#include "MEM_guardedalloc.h"
+
+#include "DNA_mesh_types.h"
+#include "DNA_modifier_types.h"
+#include "DNA_object_types.h"
+#include "DNA_scene_types.h"
+#include "DNA_userdef_types.h"
+
+#include "BKE_modifier.h"
+#include "BKE_subdiv.h"
+
+#include "GPU_capabilities.h"
+#include "GPU_context.h"
+
+#include "opensubdiv_capi.h"
+
+void BKE_subsurf_modifier_subdiv_settings_init(SubdivSettings *settings,
+ const SubsurfModifierData *smd,
+ const bool use_render_params)
+{
+ const int requested_levels = (use_render_params) ? smd->renderLevels : smd->levels;
+
+ settings->is_simple = (smd->subdivType == SUBSURF_TYPE_SIMPLE);
+ settings->is_adaptive = !(smd->flags & eSubsurfModifierFlag_UseRecursiveSubdivision);
+ settings->level = settings->is_simple ?
+ 1 :
+ (settings->is_adaptive ? smd->quality : requested_levels);
+ settings->use_creases = (smd->flags & eSubsurfModifierFlag_UseCrease);
+ settings->vtx_boundary_interpolation = BKE_subdiv_vtx_boundary_interpolation_from_subsurf(
+ smd->boundary_smooth);
+ settings->fvar_linear_interpolation = BKE_subdiv_fvar_interpolation_from_uv_smooth(
+ smd->uv_smooth);
+}
+
+static ModifierData *modifier_get_last_enabled_for_mode(const Scene *scene,
+ const Object *ob,
+ int required_mode)
+{
+ ModifierData *md = ob->modifiers.last;
+
+ while (md) {
+ if (BKE_modifier_is_enabled(scene, md, required_mode)) {
+ break;
+ }
+
+ md = md->prev;
+ }
+
+ return md;
+}
+
+bool BKE_subsurf_modifier_can_do_gpu_subdiv_ex(const Scene *scene,
+ const Object *ob,
+ const SubsurfModifierData *smd,
+ int required_mode,
+ bool skip_check_is_last)
+{
+ if ((U.gpu_flag & USER_GPU_FLAG_SUBDIVISION_EVALUATION) == 0) {
+ return false;
+ }
+
+ if (!skip_check_is_last) {
+ ModifierData *md = modifier_get_last_enabled_for_mode(scene, ob, required_mode);
+ if (md != (const ModifierData *)smd) {
+ return false;
+ }
+ }
+
+ /* Only OpenGL is supported for OpenSubdiv evaluation for now. */
+ if (GPU_backend_get_type() != GPU_BACKEND_OPENGL) {
+ return false;
+ }
+
+ if (!(GPU_compute_shader_support() && GPU_shader_storage_buffer_objects_support())) {
+ return false;
+ }
+
+ const int available_evaluators = openSubdiv_getAvailableEvaluators();
+ if ((available_evaluators & OPENSUBDIV_EVALUATOR_GLSL_COMPUTE) == 0) {
+ return false;
+ }
+
+ return true;
+}
+
+bool BKE_subsurf_modifier_can_do_gpu_subdiv(const Scene *scene,
+ const Object *ob,
+ int required_mode)
+{
+ ModifierData *md = modifier_get_last_enabled_for_mode(scene, ob, required_mode);
+
+ if (!md) {
+ return false;
+ }
+
+ if (md->type != eModifierType_Subsurf) {
+ return false;
+ }
+
+ return BKE_subsurf_modifier_can_do_gpu_subdiv_ex(
+ scene, ob, (SubsurfModifierData *)md, required_mode, true);
+}
+
+void (*BKE_subsurf_modifier_free_gpu_cache_cb)(Subdiv *subdiv) = NULL;
+
+Subdiv *BKE_subsurf_modifier_subdiv_descriptor_ensure(const SubsurfModifierData *smd,
+ const SubdivSettings *subdiv_settings,
+ const Mesh *mesh,
+ const bool for_draw_code)
+{
+ SubsurfRuntimeData *runtime_data = (SubsurfRuntimeData *)smd->modifier.runtime;
+ if (runtime_data->subdiv && runtime_data->set_by_draw_code != for_draw_code) {
+ BKE_subdiv_free(runtime_data->subdiv);
+ runtime_data->subdiv = NULL;
+ }
+ Subdiv *subdiv = BKE_subdiv_update_from_mesh(runtime_data->subdiv, subdiv_settings, mesh);
+ runtime_data->subdiv = subdiv;
+ runtime_data->set_by_draw_code = for_draw_code;
+ return subdiv;
+}
+
+SubsurfRuntimeData *BKE_subsurf_modifier_ensure_runtime(SubsurfModifierData *smd)
+{
+ SubsurfRuntimeData *runtime_data = (SubsurfRuntimeData *)smd->modifier.runtime;
+ if (runtime_data == NULL) {
+ runtime_data = MEM_callocN(sizeof(*runtime_data), "subsurf runtime");
+ smd->modifier.runtime = runtime_data;
+ }
+ return runtime_data;
+}
+
+int BKE_subsurf_modifier_eval_required_mode(bool is_final_render, bool is_edit_mode)
+{
+ if (is_final_render) {
+ return eModifierMode_Render;
+ }
+
+ return eModifierMode_Realtime | (is_edit_mode ? eModifierMode_Editmode : 0);
+}
diff --git a/source/blender/blenkernel/intern/subsurf_ccg.c b/source/blender/blenkernel/intern/subsurf_ccg.c
index 2669da98488..9d66c354b54 100644
--- a/source/blender/blenkernel/intern/subsurf_ccg.c
+++ b/source/blender/blenkernel/intern/subsurf_ccg.c
@@ -803,17 +803,11 @@ static int ccgDM_getNumLoops(DerivedMesh *dm)
return 4 * ccgSubSurf_getNumFinalFaces(ccgdm->ss);
}
-static void ccgDM_getFinalVert(DerivedMesh *dm, int vertNum, MVert *mv)
+static CCGElem *get_vertex_elem(CCGDerivedMesh *ccgdm, int vertNum)
{
- CCGDerivedMesh *ccgdm = (CCGDerivedMesh *)dm;
CCGSubSurf *ss = ccgdm->ss;
- CCGElem *vd;
- CCGKey key;
int i;
- CCG_key_top_level(&key, ss);
- memset(mv, 0, sizeof(*mv));
-
if ((vertNum < ccgdm->edgeMap[0].startVert) && (ccgSubSurf_getNumFaces(ss) > 0)) {
/* this vert comes from face data */
int lastface = ccgSubSurf_getNumFaces(ss) - 1;
@@ -842,30 +836,24 @@ static void ccgDM_getFinalVert(DerivedMesh *dm, int vertNum, MVert *mv)
offset = vertNum - ccgdm->faceMap[i].startVert;
if (offset < 1) {
- vd = ccgSubSurf_getFaceCenterData(f);
- copy_v3_v3(mv->co, CCG_elem_co(&key, vd));
- normal_float_to_short_v3(mv->no, CCG_elem_no(&key, vd));
+ return ccgSubSurf_getFaceCenterData(f);
}
- else if (offset < gridSideEnd) {
+ if (offset < gridSideEnd) {
offset -= 1;
grid = offset / gridSideVerts;
x = offset % gridSideVerts + 1;
- vd = ccgSubSurf_getFaceGridEdgeData(ss, f, grid, x);
- copy_v3_v3(mv->co, CCG_elem_co(&key, vd));
- normal_float_to_short_v3(mv->no, CCG_elem_no(&key, vd));
+ return ccgSubSurf_getFaceGridEdgeData(ss, f, grid, x);
}
- else if (offset < gridInternalEnd) {
+ if (offset < gridInternalEnd) {
offset -= gridSideEnd;
grid = offset / gridInternalVerts;
offset %= gridInternalVerts;
y = offset / gridSideVerts + 1;
x = offset % gridSideVerts + 1;
- vd = ccgSubSurf_getFaceGridData(ss, f, grid, x, y);
- copy_v3_v3(mv->co, CCG_elem_co(&key, vd));
- normal_float_to_short_v3(mv->no, CCG_elem_no(&key, vd));
+ return ccgSubSurf_getFaceGridData(ss, f, grid, x, y);
}
}
- else if ((vertNum < ccgdm->vertMap[0].startVert) && (ccgSubSurf_getNumEdges(ss) > 0)) {
+ if ((vertNum < ccgdm->vertMap[0].startVert) && (ccgSubSurf_getNumEdges(ss) > 0)) {
/* this vert comes from edge data */
CCGEdge *e;
int lastedge = ccgSubSurf_getNumEdges(ss) - 1;
@@ -879,36 +867,37 @@ static void ccgDM_getFinalVert(DerivedMesh *dm, int vertNum, MVert *mv)
e = ccgdm->edgeMap[i].edge;
x = vertNum - ccgdm->edgeMap[i].startVert + 1;
- vd = ccgSubSurf_getEdgeData(ss, e, x);
- copy_v3_v3(mv->co, CCG_elem_co(&key, vd));
- normal_float_to_short_v3(mv->no, CCG_elem_no(&key, vd));
+ return ccgSubSurf_getEdgeData(ss, e, x);
}
- else {
- /* this vert comes from vert data */
- CCGVert *v;
- i = vertNum - ccgdm->vertMap[0].startVert;
- v = ccgdm->vertMap[i].vert;
- vd = ccgSubSurf_getVertData(ss, v);
- copy_v3_v3(mv->co, CCG_elem_co(&key, vd));
- normal_float_to_short_v3(mv->no, CCG_elem_no(&key, vd));
- }
+ /* this vert comes from vert data */
+ CCGVert *v;
+ i = vertNum - ccgdm->vertMap[0].startVert;
+
+ v = ccgdm->vertMap[i].vert;
+ return ccgSubSurf_getVertData(ss, v);
}
static void ccgDM_getFinalVertCo(DerivedMesh *dm, int vertNum, float r_co[3])
{
- MVert mvert;
+ CCGDerivedMesh *ccgdm = (CCGDerivedMesh *)dm;
+ CCGSubSurf *ss = ccgdm->ss;
- ccgDM_getFinalVert(dm, vertNum, &mvert);
- copy_v3_v3(r_co, mvert.co);
+ CCGElem *vd = get_vertex_elem(ccgdm, vertNum);
+ CCGKey key;
+ CCG_key_top_level(&key, ss);
+ copy_v3_v3(r_co, CCG_elem_co(&key, vd));
}
static void ccgDM_getFinalVertNo(DerivedMesh *dm, int vertNum, float r_no[3])
{
- MVert mvert;
+ CCGDerivedMesh *ccgdm = (CCGDerivedMesh *)dm;
+ CCGSubSurf *ss = ccgdm->ss;
- ccgDM_getFinalVert(dm, vertNum, &mvert);
- normal_short_to_float_v3(r_no, mvert.no);
+ CCGElem *vd = get_vertex_elem(ccgdm, vertNum);
+ CCGKey key;
+ CCG_key_top_level(&key, ss);
+ copy_v3_v3(r_no, CCG_elem_no(&key, vd));
}
void subsurf_copy_grid_hidden(DerivedMesh *dm,
@@ -995,7 +984,6 @@ void subsurf_copy_grid_paint_mask(DerivedMesh *dm,
BLI_INLINE void ccgDM_to_MVert(MVert *mv, const CCGKey *key, CCGElem *elem)
{
copy_v3_v3(mv->co, CCG_elem_co(key, elem));
- normal_float_to_short_v3(mv->no, CCG_elem_no(key, elem));
mv->flag = mv->bweight = 0;
}
diff --git a/source/blender/blenkernel/intern/texture.c b/source/blender/blenkernel/intern/texture.c
index ee9247e6e60..37d5d732a70 100644
--- a/source/blender/blenkernel/intern/texture.c
+++ b/source/blender/blenkernel/intern/texture.c
@@ -67,6 +67,8 @@
#include "BKE_scene.h"
#include "BKE_texture.h"
+#include "NOD_texture.h"
+
#include "RE_texture.h"
#include "BLO_read_write.h"
diff --git a/source/blender/blenkernel/intern/tracking_region_tracker.c b/source/blender/blenkernel/intern/tracking_region_tracker.c
index ad3f226fa92..4b23f74bc8f 100644
--- a/source/blender/blenkernel/intern/tracking_region_tracker.c
+++ b/source/blender/blenkernel/intern/tracking_region_tracker.c
@@ -26,6 +26,7 @@
#include "MEM_guardedalloc.h"
+#include "DNA_defaults.h"
#include "DNA_movieclip_types.h"
#include "BLI_threads.h"
@@ -42,7 +43,7 @@
/* **** utility functions for tracking **** */
-/* convert from float and byte RGBA to grayscale. Supports different coefficients for RGB. */
+/** Convert from float and byte RGBA to gray-scale. Supports different coefficients for RGB. */
static void float_rgba_to_gray(const float *rgba,
float *gray,
int num_pixels,
@@ -71,7 +72,7 @@ static void uint8_rgba_to_float_gray(const unsigned char *rgba,
}
}
-/* Get grayscale float search buffer for given marker and frame. */
+/** Get gray-scale float search buffer for given marker and frame. */
static float *track_get_search_floatbuf(ImBuf *ibuf,
MovieTrackingTrack *track,
MovieTrackingMarker *marker,
@@ -322,7 +323,7 @@ void BKE_tracking_refine_marker(MovieClip *clip,
int search_area_height, search_area_width;
int clip_flag = clip->flag & MCLIP_TIMECODE_FLAGS;
int reference_framenr;
- MovieClipUser user = {0};
+ MovieClipUser user = *DNA_struct_default_get(MovieClipUser);
double dst_pixel_x[5], dst_pixel_y[5];
bool tracked;
diff --git a/source/blender/blenkernel/intern/tracking_test.cc b/source/blender/blenkernel/intern/tracking_test.cc
index a3845dcad8f..d85d71b7c86 100644
--- a/source/blender/blenkernel/intern/tracking_test.cc
+++ b/source/blender/blenkernel/intern/tracking_test.cc
@@ -5,7 +5,7 @@
#include "DNA_tracking_types.h"
#include "BKE_tracking.h"
-#include "BLI_float2.hh"
+#include "BLI_math_vec_types.hh"
namespace blender {
diff --git a/source/blender/blenkernel/intern/type_conversions.cc b/source/blender/blenkernel/intern/type_conversions.cc
index b23220286e6..cb05337ef2a 100644
--- a/source/blender/blenkernel/intern/type_conversions.cc
+++ b/source/blender/blenkernel/intern/type_conversions.cc
@@ -19,8 +19,7 @@
#include "FN_multi_function_builder.hh"
#include "BLI_color.hh"
-#include "BLI_float2.hh"
-#include "BLI_float3.hh"
+#include "BLI_math_vec_types.hh"
namespace blender::bke {
diff --git a/source/blender/blenkernel/intern/undo_system.c b/source/blender/blenkernel/intern/undo_system.c
index 743ae91f6f7..3e263fafe28 100644
--- a/source/blender/blenkernel/intern/undo_system.c
+++ b/source/blender/blenkernel/intern/undo_system.c
@@ -819,6 +819,9 @@ void BKE_undosys_step_load_from_index(UndoStack *ustack, bContext *C, const int
{
UndoStep *us_target = BLI_findlink(&ustack->steps, index);
BLI_assert(us_target->skip == false);
+ if (us_target == ustack->step_active) {
+ return;
+ }
BKE_undosys_step_load_data(ustack, C, us_target);
}
diff --git a/source/blender/blenkernel/intern/volume.cc b/source/blender/blenkernel/intern/volume.cc
index 130aa957491..39a7725bfa3 100644
--- a/source/blender/blenkernel/intern/volume.cc
+++ b/source/blender/blenkernel/intern/volume.cc
@@ -28,12 +28,12 @@
#include "BLI_compiler_compat.h"
#include "BLI_fileops.h"
-#include "BLI_float3.hh"
#include "BLI_float4x4.hh"
#include "BLI_ghash.h"
#include "BLI_index_range.hh"
#include "BLI_map.hh"
#include "BLI_math.h"
+#include "BLI_math_vec_types.hh"
#include "BLI_path_util.h"
#include "BLI_string.h"
#include "BLI_string_ref.hh"
@@ -138,11 +138,19 @@ static struct VolumeFileCache {
}
std::lock_guard<std::mutex> lock(mutex);
- return simplified_grids.lookup_or_add_cb(simplify_level, [&]() {
- const float resolution_factor = 1.0f / (1 << simplify_level);
- const VolumeGridType grid_type = BKE_volume_grid_type_openvdb(*grid);
- return BKE_volume_grid_create_with_changed_resolution(grid_type, *grid, resolution_factor);
+ openvdb::GridBase::Ptr simple_grid;
+
+ /* Isolate creating grid since that's multithreaded and we are
+ * holding a mutex lock. */
+ blender::threading::isolate_task([&] {
+ simple_grid = simplified_grids.lookup_or_add_cb(simplify_level, [&]() {
+ const float resolution_factor = 1.0f / (1 << simplify_level);
+ const VolumeGridType grid_type = BKE_volume_grid_type_openvdb(*grid);
+ return BKE_volume_grid_create_with_changed_resolution(
+ grid_type, *grid, resolution_factor);
+ });
});
+ return simple_grid;
}
/* Unique key: filename + grid name. */
@@ -247,16 +255,20 @@ static struct VolumeFileCache {
protected:
void update_for_remove_user(Entry &entry)
{
- if (entry.num_metadata_users + entry.num_tree_users == 0) {
- cache.erase(entry);
- }
- else if (entry.num_tree_users == 0) {
- /* Note we replace the grid rather than clearing, so that if there is
- * any other shared pointer to the grid it will keep the tree. */
- entry.grid = entry.grid->copyGridWithNewTree();
- entry.simplified_grids.clear();
- entry.is_loaded = false;
- }
+ /* Isolate file unloading since that's multithreaded and we are
+ * holding a mutex lock. */
+ blender::threading::isolate_task([&] {
+ if (entry.num_metadata_users + entry.num_tree_users == 0) {
+ cache.erase(entry);
+ }
+ else if (entry.num_tree_users == 0) {
+ /* Note we replace the grid rather than clearing, so that if there is
+ * any other shared pointer to the grid it will keep the tree. */
+ entry.grid = entry.grid->copyGridWithNewTree();
+ entry.simplified_grids.clear();
+ entry.is_loaded = false;
+ }
+ });
}
/* Cache contents */
@@ -537,7 +549,7 @@ static void volume_copy_data(Main *UNUSED(bmain),
#ifdef WITH_OPENVDB
if (volume_src->runtime.grids) {
const VolumeGridVector &grids_src = *(volume_src->runtime.grids);
- volume_dst->runtime.grids = OBJECT_GUARDED_NEW(VolumeGridVector, grids_src);
+ volume_dst->runtime.grids = MEM_new<VolumeGridVector>(__func__, grids_src);
}
#endif
@@ -551,7 +563,8 @@ static void volume_free_data(ID *id)
BKE_volume_batch_cache_free(volume);
MEM_SAFE_FREE(volume->mat);
#ifdef WITH_OPENVDB
- OBJECT_GUARDED_SAFE_DELETE(volume->runtime.grids, VolumeGridVector);
+ MEM_delete(volume->runtime.grids);
+ volume->runtime.grids = nullptr;
#endif
}
@@ -683,7 +696,7 @@ void BKE_volume_init_grids(Volume *volume)
{
#ifdef WITH_OPENVDB
if (volume->runtime.grids == nullptr) {
- volume->runtime.grids = OBJECT_GUARDED_NEW(VolumeGridVector);
+ volume->runtime.grids = MEM_new<VolumeGridVector>(__func__);
}
#else
UNUSED_VARS(volume);
@@ -954,7 +967,7 @@ BoundBox *BKE_volume_boundbox_get(Object *ob)
}
if (ob->runtime.bb == nullptr) {
- ob->runtime.bb = (BoundBox *)MEM_callocN(sizeof(BoundBox), __func__);
+ ob->runtime.bb = MEM_cnew<BoundBox>(__func__);
}
const Volume *volume = (Volume *)ob->data;
@@ -1129,16 +1142,16 @@ void BKE_volume_grids_backup_restore(Volume *volume, VolumeGridVector *grids, co
if (!grids->is_loaded()) {
/* No grids loaded in CoW datablock, nothing lost by discarding. */
- OBJECT_GUARDED_DELETE(grids, VolumeGridVector);
+ MEM_delete(grids);
}
else if (!STREQ(volume->filepath, filepath)) {
/* Filepath changed, discard grids from CoW datablock. */
- OBJECT_GUARDED_DELETE(grids, VolumeGridVector);
+ MEM_delete(grids);
}
else {
/* Keep grids from CoW datablock. We might still unload them a little
* later in BKE_volume_eval_geometry if the frame changes. */
- OBJECT_GUARDED_DELETE(volume->runtime.grids, VolumeGridVector);
+ MEM_delete(volume->runtime.grids);
volume->runtime.grids = grids;
}
#else
@@ -1543,11 +1556,6 @@ bool BKE_volume_grid_bounds(openvdb::GridBase::ConstPtr grid, float3 &r_min, flo
return true;
}
-/**
- * Return a new grid pointer with only the metadata and transform changed.
- * This is useful for instances, where there is a separate transform on top of the original
- * grid transform that must be applied for some operations that only take a grid argument.
- */
openvdb::GridBase::ConstPtr BKE_volume_grid_shallow_transform(openvdb::GridBase::ConstPtr grid,
const blender::float4x4 &transform)
{
diff --git a/source/blender/blenkernel/intern/volume_render.cc b/source/blender/blenkernel/intern/volume_render.cc
index 6dc497bb616..c0a205b5673 100644
--- a/source/blender/blenkernel/intern/volume_render.cc
+++ b/source/blender/blenkernel/intern/volume_render.cc
@@ -21,8 +21,8 @@
#include "MEM_guardedalloc.h"
#include "BLI_array.hh"
-#include "BLI_float3.hh"
#include "BLI_math_matrix.h"
+#include "BLI_math_vec_types.hh"
#include "BLI_math_vector.h"
#include "BLI_vector.hh"
diff --git a/source/blender/blenkernel/intern/volume_to_mesh.cc b/source/blender/blenkernel/intern/volume_to_mesh.cc
index 6e465b2fdf0..336ce724e35 100644
--- a/source/blender/blenkernel/intern/volume_to_mesh.cc
+++ b/source/blender/blenkernel/intern/volume_to_mesh.cc
@@ -16,7 +16,7 @@
#include <vector>
-#include "BLI_float3.hh"
+#include "BLI_math_vec_types.hh"
#include "BLI_span.hh"
#include "BLI_utildefines.h"
@@ -121,11 +121,6 @@ struct VolumeToMeshOp {
}
};
-/**
- * Convert mesh data from the format provided by OpenVDB into Blender's #Mesh data structure.
- * This can be used to add mesh data from a grid into an existing mesh rather than merging multiple
- * meshes later on.
- */
void fill_mesh_from_openvdb_data(const Span<openvdb::Vec3s> vdb_verts,
const Span<openvdb::Vec3I> vdb_tris,
const Span<openvdb::Vec4I> vdb_quads,
@@ -165,10 +160,6 @@ void fill_mesh_from_openvdb_data(const Span<openvdb::Vec3s> vdb_verts,
}
}
-/**
- * Convert an OpenVDB volume grid to corresponding mesh data: vertex positions and quad and
- * triangle indices.
- */
bke::OpenVDBMeshData volume_to_mesh_data(const openvdb::GridBase &grid,
const VolumeToMeshResolution &resolution,
const float threshold,
diff --git a/source/blender/blenkernel/intern/writeffmpeg.c b/source/blender/blenkernel/intern/writeffmpeg.c
index 035e56993f9..4d94132e6fd 100644
--- a/source/blender/blenkernel/intern/writeffmpeg.c
+++ b/source/blender/blenkernel/intern/writeffmpeg.c
@@ -1434,6 +1434,8 @@ int BKE_ffmpeg_append(void *context_v,
/* Add +1 frame because we want to encode audio up until the next video frame. */
write_audio_frames(
context, (frame - start_frame + 1) / (((double)rd->frs_sec) / (double)rd->frs_sec_base));
+# else
+ UNUSED_VARS(start_frame);
# endif
if (context->ffmpeg_autosplit) {