Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHans Goudey <h.goudey@me.com>2022-09-05 19:56:34 +0300
committerHans Goudey <h.goudey@me.com>2022-09-05 19:56:34 +0300
commit05952aa94d33eeb504fa63618ba35c2bcc8bd19b (patch)
treec9ec37adf20c3c37ccaab44869220dcbe8e987a3 /source/blender/editors/sculpt_paint/sculpt_expand.c
parent63cfc8f9f6d623f33b50c5c07976af2b22845713 (diff)
Mesh: Remove redundant custom data pointers
For copy-on-write, we want to share attribute arrays between meshes where possible. Mutable pointers like `Mesh.mvert` make that difficult by making ownership vague. They also make code more complex by adding redundancy. The simplest solution is just removing them and retrieving layers from `CustomData` as needed. Similar changes have already been applied to curves and point clouds (e9f82d3dc7ee, 410a6efb747f). Removing use of the pointers generally makes code more obvious and more reusable. Mesh data is now accessed with a C++ API (`Mesh::edges()` or `Mesh::edges_for_write()`), and a C API (`BKE_mesh_edges(mesh)`). The CoW changes this commit makes possible are described in T95845 and T95842, and started in D14139 and D14140. The change also simplifies the ongoing mesh struct-of-array refactors from T95965. **RNA/Python Access Performance** Theoretically, accessing mesh elements with the RNA API may become slower, since the layer needs to be found on every random access. However, overhead is already high enough that this doesn't make a noticible differenc, and performance is actually improved in some cases. Random access can be up to 10% faster, but other situations might be a bit slower. Generally using `foreach_get/set` are the best way to improve performance. See the differential revision for more discussion about Python performance. Cycles has been updated to use raw pointers and the internal Blender mesh types, mostly because there is no sense in having this overhead when it's already compiled with Blender. In my tests this roughly halves the Cycles mesh creation time (0.19s to 0.10s for a 1 million face grid). Differential Revision: https://developer.blender.org/D15488
Diffstat (limited to 'source/blender/editors/sculpt_paint/sculpt_expand.c')
-rw-r--r--source/blender/editors/sculpt_paint/sculpt_expand.c26
1 files changed, 15 insertions, 11 deletions
diff --git a/source/blender/editors/sculpt_paint/sculpt_expand.c b/source/blender/editors/sculpt_paint/sculpt_expand.c
index 75cc966c0b2..63d5c14a931 100644
--- a/source/blender/editors/sculpt_paint/sculpt_expand.c
+++ b/source/blender/editors/sculpt_paint/sculpt_expand.c
@@ -690,7 +690,6 @@ static float *sculpt_expand_diagonals_falloff_create(Object *ob, const PBVHVertR
}
/* Propagate the falloff increasing the value by 1 each time a new vertex is visited. */
- Mesh *mesh = ob->data;
while (!BLI_gsqueue_is_empty(queue)) {
PBVHVertRef v_next;
BLI_gsqueue_pop(queue, &v_next);
@@ -698,9 +697,9 @@ static float *sculpt_expand_diagonals_falloff_create(Object *ob, const PBVHVertR
int v_next_i = BKE_pbvh_vertex_to_index(ss->pbvh, v_next);
for (int j = 0; j < ss->pmap[v_next_i].count; j++) {
- MPoly *p = &ss->mpoly[ss->pmap[v_next_i].indices[j]];
+ const MPoly *p = &ss->mpoly[ss->pmap[v_next_i].indices[j]];
for (int l = 0; l < p->totloop; l++) {
- const PBVHVertRef neighbor_v = BKE_pbvh_make_vref(mesh->mloop[p->loopstart + l].v);
+ const PBVHVertRef neighbor_v = BKE_pbvh_make_vref(ss->mloop[p->loopstart + l].v);
if (BLI_BITMAP_TEST(visited_vertices, neighbor_v.i)) {
continue;
}
@@ -777,11 +776,11 @@ static void sculpt_expand_grids_to_faces_falloff(SculptSession *ss,
Mesh *mesh,
ExpandCache *expand_cache)
{
-
+ const MPoly *polys = BKE_mesh_polygons(mesh);
const CCGKey *key = BKE_pbvh_get_grid_key(ss->pbvh);
for (int p = 0; p < mesh->totpoly; p++) {
- MPoly *poly = &mesh->mpoly[p];
+ const MPoly *poly = &polys[p];
float accum = 0.0f;
for (int l = 0; l < poly->totloop; l++) {
const int grid_loop_index = (poly->loopstart + l) * key->grid_area;
@@ -795,11 +794,14 @@ static void sculpt_expand_grids_to_faces_falloff(SculptSession *ss,
static void sculpt_expand_vertex_to_faces_falloff(Mesh *mesh, ExpandCache *expand_cache)
{
+ const MPoly *polys = BKE_mesh_polygons(mesh);
+ const MLoop *loops = BKE_mesh_loops(mesh);
+
for (int p = 0; p < mesh->totpoly; p++) {
- MPoly *poly = &mesh->mpoly[p];
+ const MPoly *poly = &polys[p];
float accum = 0.0f;
for (int l = 0; l < poly->totloop; l++) {
- MLoop *loop = &mesh->mloop[l + poly->loopstart];
+ const MLoop *loop = &loops[l + poly->loopstart];
accum += expand_cache->vert_falloff[loop->v];
}
expand_cache->face_falloff[p] = accum / poly->totloop;
@@ -1093,10 +1095,10 @@ static void sculpt_expand_snap_initialize_from_enabled(SculptSession *ss,
}
for (int p = 0; p < totface; p++) {
- MPoly *poly = &ss->mpoly[p];
+ const MPoly *poly = &ss->mpoly[p];
bool any_disabled = false;
for (int l = 0; l < poly->totloop; l++) {
- MLoop *loop = &ss->mloop[l + poly->loopstart];
+ const MLoop *loop = &ss->mloop[l + poly->loopstart];
if (!BLI_BITMAP_TEST(enabled_vertices, loop->v)) {
any_disabled = true;
break;
@@ -1938,6 +1940,8 @@ static void sculpt_expand_delete_face_set_id(int *r_face_sets,
{
const int totface = ss->totfaces;
MeshElemMap *pmap = ss->pmap;
+ const MPoly *polys = BKE_mesh_polygons(mesh);
+ const MLoop *loops = BKE_mesh_loops(mesh);
/* Check that all the face sets IDs in the mesh are not equal to `delete_id`
* before attempting to delete it. */
@@ -1972,9 +1976,9 @@ static void sculpt_expand_delete_face_set_id(int *r_face_sets,
while (BLI_LINKSTACK_SIZE(queue)) {
const int f_index = POINTER_AS_INT(BLI_LINKSTACK_POP(queue));
int other_id = delete_id;
- const MPoly *c_poly = &mesh->mpoly[f_index];
+ const MPoly *c_poly = &polys[f_index];
for (int l = 0; l < c_poly->totloop; l++) {
- const MLoop *c_loop = &mesh->mloop[c_poly->loopstart + l];
+ const MLoop *c_loop = &loops[c_poly->loopstart + l];
const MeshElemMap *vert_map = &pmap[c_loop->v];
for (int i = 0; i < vert_map->count; i++) {