Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/blenkernel/intern/mesh_normals.cc')
-rw-r--r--source/blender/blenkernel/intern/mesh_normals.cc160
1 files changed, 85 insertions, 75 deletions
diff --git a/source/blender/blenkernel/intern/mesh_normals.cc b/source/blender/blenkernel/intern/mesh_normals.cc
index 2366b7526a1..21dd39586ec 100644
--- a/source/blender/blenkernel/intern/mesh_normals.cc
+++ b/source/blender/blenkernel/intern/mesh_normals.cc
@@ -17,8 +17,7 @@
#include "DNA_meshdata_types.h"
#include "BLI_alloca.h"
-#include "BLI_bitmap.h"
-
+#include "BLI_bit_vector.hh"
#include "BLI_linklist.h"
#include "BLI_linklist_stack.h"
#include "BLI_math.h"
@@ -37,6 +36,8 @@
#include "atomic_ops.h"
+using blender::BitVector;
+using blender::MutableSpan;
using blender::Span;
// #define DEBUG_TIME
@@ -368,16 +369,19 @@ const float (*BKE_mesh_vertex_normals_ensure(const Mesh *mesh))[3]
/* Isolate task because a mutex is locked and computing normals is multi-threaded. */
blender::threading::isolate_task([&]() {
Mesh &mesh_mutable = *const_cast<Mesh *>(mesh);
+ const Span<MVert> verts = mesh_mutable.verts();
+ const Span<MPoly> polys = mesh_mutable.polys();
+ const Span<MLoop> loops = mesh_mutable.loops();
vert_normals = BKE_mesh_vertex_normals_for_write(&mesh_mutable);
poly_normals = BKE_mesh_poly_normals_for_write(&mesh_mutable);
- BKE_mesh_calc_normals_poly_and_vertex(mesh_mutable.mvert,
- mesh_mutable.totvert,
- mesh_mutable.mloop,
- mesh_mutable.totloop,
- mesh_mutable.mpoly,
- mesh_mutable.totpoly,
+ BKE_mesh_calc_normals_poly_and_vertex(verts.data(),
+ verts.size(),
+ loops.data(),
+ loops.size(),
+ polys.data(),
+ polys.size(),
poly_normals,
vert_normals);
@@ -413,15 +417,18 @@ const float (*BKE_mesh_poly_normals_ensure(const Mesh *mesh))[3]
/* Isolate task because a mutex is locked and computing normals is multi-threaded. */
blender::threading::isolate_task([&]() {
Mesh &mesh_mutable = *const_cast<Mesh *>(mesh);
+ const Span<MVert> verts = mesh_mutable.verts();
+ const Span<MPoly> polys = mesh_mutable.polys();
+ const Span<MLoop> loops = mesh_mutable.loops();
poly_normals = BKE_mesh_poly_normals_for_write(&mesh_mutable);
- BKE_mesh_calc_normals_poly(mesh_mutable.mvert,
- mesh_mutable.totvert,
- mesh_mutable.mloop,
- mesh_mutable.totloop,
- mesh_mutable.mpoly,
- mesh_mutable.totpoly,
+ BKE_mesh_calc_normals_poly(verts.data(),
+ verts.size(),
+ loops.data(),
+ loops.size(),
+ polys.data(),
+ polys.size(),
poly_normals);
BKE_mesh_poly_normals_clear_dirty(&mesh_mutable);
@@ -849,7 +856,10 @@ static void mesh_edges_sharp_tag(LoopSplitTaskDataCommon *data,
int(*edge_to_loops)[2] = data->edge_to_loops;
int *loop_to_poly = data->loop_to_poly;
- BLI_bitmap *sharp_edges = do_sharp_edges_tag ? BLI_BITMAP_NEW(numEdges, __func__) : nullptr;
+ BitVector sharp_edges;
+ if (do_sharp_edges_tag) {
+ sharp_edges.resize(numEdges, false);
+ }
const MPoly *mp;
int mp_index;
@@ -901,7 +911,7 @@ static void mesh_edges_sharp_tag(LoopSplitTaskDataCommon *data,
/* We want to avoid tagging edges as sharp when it is already defined as such by
* other causes than angle threshold. */
if (do_sharp_edges_tag && is_angle_sharp) {
- BLI_BITMAP_SET(sharp_edges, ml_curr->e, true);
+ sharp_edges[ml_curr->e].set();
}
}
else {
@@ -915,7 +925,7 @@ static void mesh_edges_sharp_tag(LoopSplitTaskDataCommon *data,
/* We want to avoid tagging edges as sharp when it is already defined as such by
* other causes than angle threshold. */
if (do_sharp_edges_tag) {
- BLI_BITMAP_SET(sharp_edges, ml_curr->e, false);
+ sharp_edges[ml_curr->e].reset();
}
}
/* Else, edge is already 'disqualified' (i.e. sharp)! */
@@ -927,12 +937,10 @@ static void mesh_edges_sharp_tag(LoopSplitTaskDataCommon *data,
MEdge *me;
int me_index;
for (me = (MEdge *)medges, me_index = 0; me_index < numEdges; me++, me_index++) {
- if (BLI_BITMAP_TEST(sharp_edges, me_index)) {
+ if (sharp_edges[me_index]) {
me->flag |= ME_SHARP;
}
}
-
- MEM_freeN(sharp_edges);
}
}
@@ -940,9 +948,9 @@ void BKE_edges_sharp_from_angle_set(const struct MVert *mverts,
const int UNUSED(numVerts),
struct MEdge *medges,
const int numEdges,
- struct MLoop *mloops,
+ const struct MLoop *mloops,
const int numLoops,
- struct MPoly *mpolys,
+ const struct MPoly *mpolys,
const float (*polynors)[3],
const int numPolys,
const float split_angle)
@@ -1354,7 +1362,7 @@ static bool loop_split_generator_check_cyclic_smooth_fan(const MLoop *mloops,
const int (*edge_to_loops)[2],
const int *loop_to_poly,
const int *e2l_prev,
- BLI_bitmap *skip_loops,
+ BitVector<> &skip_loops,
const MLoop *ml_curr,
const MLoop *ml_prev,
const int ml_curr_index,
@@ -1383,8 +1391,8 @@ static bool loop_split_generator_check_cyclic_smooth_fan(const MLoop *mloops,
BLI_assert(mlfan_vert_index >= 0);
BLI_assert(mpfan_curr_index >= 0);
- BLI_assert(!BLI_BITMAP_TEST(skip_loops, mlfan_vert_index));
- BLI_BITMAP_ENABLE(skip_loops, mlfan_vert_index);
+ BLI_assert(!skip_loops[mlfan_vert_index]);
+ skip_loops[mlfan_vert_index].set();
while (true) {
/* Find next loop of the smooth fan. */
@@ -1405,7 +1413,7 @@ static bool loop_split_generator_check_cyclic_smooth_fan(const MLoop *mloops,
return false;
}
/* Smooth loop/edge. */
- if (BLI_BITMAP_TEST(skip_loops, mlfan_vert_index)) {
+ if (skip_loops[mlfan_vert_index]) {
if (mlfan_vert_index == ml_curr_index) {
/* We walked around a whole cyclic smooth fan without finding any already-processed loop,
* means we can use initial `ml_curr` / `ml_prev` edge as start for this smooth fan. */
@@ -1416,7 +1424,7 @@ static bool loop_split_generator_check_cyclic_smooth_fan(const MLoop *mloops,
}
/* We can skip it in future, and keep checking the smooth fan. */
- BLI_BITMAP_ENABLE(skip_loops, mlfan_vert_index);
+ skip_loops[mlfan_vert_index].set();
}
}
@@ -1440,7 +1448,7 @@ static void loop_split_generator(TaskPool *pool, LoopSplitTaskDataCommon *common
int ml_curr_index;
int ml_prev_index;
- BLI_bitmap *skip_loops = BLI_BITMAP_NEW(numLoops, __func__);
+ BitVector<> skip_loops(numLoops, false);
LoopSplitTaskData *data_buff = nullptr;
int data_idx = 0;
@@ -1482,7 +1490,7 @@ static void loop_split_generator(TaskPool *pool, LoopSplitTaskDataCommon *common
ml_curr->e,
ml_curr->v,
IS_EDGE_SHARP(e2l_curr),
- BLI_BITMAP_TEST_BOOL(skip_loops, ml_curr_index));
+ skip_loops[ml_curr_index]);
#endif
/* A smooth edge, we have to check for cyclic smooth fan case.
@@ -1495,7 +1503,7 @@ static void loop_split_generator(TaskPool *pool, LoopSplitTaskDataCommon *common
* However, this would complicate the code, add more memory usage, and despite its logical
* complexity, #loop_manifold_fan_around_vert_next() is quite cheap in term of CPU cycles,
* so really think it's not worth it. */
- if (!IS_EDGE_SHARP(e2l_curr) && (BLI_BITMAP_TEST(skip_loops, ml_curr_index) ||
+ if (!IS_EDGE_SHARP(e2l_curr) && (skip_loops[ml_curr_index] ||
!loop_split_generator_check_cyclic_smooth_fan(mloops,
mpolys,
edge_to_loops,
@@ -1590,7 +1598,6 @@ static void loop_split_generator(TaskPool *pool, LoopSplitTaskDataCommon *common
if (edge_vectors) {
BLI_stack_free(edge_vectors);
}
- MEM_freeN(skip_loops);
#ifdef DEBUG_TIME
TIMEIT_END_AVERAGED(loop_split_generator);
@@ -1600,12 +1607,12 @@ static void loop_split_generator(TaskPool *pool, LoopSplitTaskDataCommon *common
void BKE_mesh_normals_loop_split(const MVert *mverts,
const float (*vert_normals)[3],
const int UNUSED(numVerts),
- MEdge *medges,
+ const MEdge *medges,
const int numEdges,
- MLoop *mloops,
+ const MLoop *mloops,
float (*r_loopnors)[3],
const int numLoops,
- MPoly *mpolys,
+ const MPoly *mpolys,
const float (*polynors)[3],
const int numPolys,
const bool use_split_normals,
@@ -1628,7 +1635,7 @@ void BKE_mesh_normals_loop_split(const MVert *mverts,
int mp_index;
for (mp_index = 0; mp_index < numPolys; mp_index++) {
- MPoly *mp = &mpolys[mp_index];
+ const MPoly *mp = &mpolys[mp_index];
int ml_index = mp->loopstart;
const int ml_index_end = ml_index + mp->totloop;
const bool is_poly_flat = ((mp->flag & ME_SMOOTH) == 0);
@@ -1755,10 +1762,10 @@ static void mesh_normals_loop_custom_set(const MVert *mverts,
const int numVerts,
MEdge *medges,
const int numEdges,
- MLoop *mloops,
+ const MLoop *mloops,
float (*r_custom_loopnors)[3],
const int numLoops,
- MPoly *mpolys,
+ const MPoly *mpolys,
const float (*polynors)[3],
const int numPolys,
short (*r_clnors_data)[2],
@@ -1771,7 +1778,7 @@ static void mesh_normals_loop_custom_set(const MVert *mverts,
* (and perhaps from some editing tools later?).
* So better to keep some simplicity here, and just call #BKE_mesh_normals_loop_split() twice! */
MLoopNorSpaceArray lnors_spacearr = {nullptr};
- BLI_bitmap *done_loops = BLI_BITMAP_NEW((size_t)numLoops, __func__);
+ BitVector<> done_loops(numLoops, false);
float(*lnors)[3] = (float(*)[3])MEM_calloc_arrayN((size_t)numLoops, sizeof(*lnors), __func__);
int *loop_to_poly = (int *)MEM_malloc_arrayN((size_t)numLoops, sizeof(int), __func__);
/* In this case we always consider split nors as ON,
@@ -1829,14 +1836,14 @@ static void mesh_normals_loop_custom_set(const MVert *mverts,
/* This should not happen in theory, but in some rare case (probably ugly geometry)
* we can get some nullptr loopspacearr at this point. :/
* Maybe we should set those loops' edges as sharp? */
- BLI_BITMAP_ENABLE(done_loops, i);
+ done_loops[i].set();
if (G.debug & G_DEBUG) {
printf("WARNING! Getting invalid nullptr loop space for loop %d!\n", i);
}
continue;
}
- if (!BLI_BITMAP_TEST(done_loops, i)) {
+ if (!done_loops[i]) {
/* Notes:
* - In case of mono-loop smooth fan, we have nothing to do.
* - Loops in this linklist are ordered (in reversed order compared to how they were
@@ -1847,17 +1854,17 @@ static void mesh_normals_loop_custom_set(const MVert *mverts,
* to avoid small differences adding up into a real big one in the end!
*/
if (lnors_spacearr.lspacearr[i]->flags & MLNOR_SPACE_IS_SINGLE) {
- BLI_BITMAP_ENABLE(done_loops, i);
+ done_loops[i].set();
continue;
}
LinkNode *loops = lnors_spacearr.lspacearr[i]->loops;
- MLoop *prev_ml = nullptr;
+ const MLoop *prev_ml = nullptr;
const float *org_nor = nullptr;
while (loops) {
const int lidx = POINTER_AS_INT(loops->link);
- MLoop *ml = &mloops[lidx];
+ const MLoop *ml = &mloops[lidx];
const int nidx = lidx;
float *nor = r_custom_loopnors[nidx];
@@ -1879,7 +1886,7 @@ static void mesh_normals_loop_custom_set(const MVert *mverts,
prev_ml = ml;
loops = loops->next;
- BLI_BITMAP_ENABLE(done_loops, lidx);
+ done_loops[lidx].set();
}
/* We also have to check between last and first loops,
@@ -1889,7 +1896,7 @@ static void mesh_normals_loop_custom_set(const MVert *mverts,
loops = lnors_spacearr.lspacearr[i]->loops;
if (loops && org_nor) {
const int lidx = POINTER_AS_INT(loops->link);
- MLoop *ml = &mloops[lidx];
+ const MLoop *ml = &mloops[lidx];
const int nidx = lidx;
float *nor = r_custom_loopnors[nidx];
@@ -1923,14 +1930,14 @@ static void mesh_normals_loop_custom_set(const MVert *mverts,
loop_to_poly);
}
else {
- BLI_bitmap_set_all(done_loops, true, (size_t)numLoops);
+ done_loops.fill(true);
}
/* And we just have to convert plain object-space custom normals to our
* lnor space-encoded ones. */
for (int i = 0; i < numLoops; i++) {
if (!lnors_spacearr.lspacearr[i]) {
- BLI_BITMAP_DISABLE(done_loops, i);
+ done_loops[i].reset();
if (G.debug & G_DEBUG) {
printf("WARNING! Still getting invalid nullptr loop space in second loop for loop %d!\n",
i);
@@ -1938,7 +1945,7 @@ static void mesh_normals_loop_custom_set(const MVert *mverts,
continue;
}
- if (BLI_BITMAP_TEST_BOOL(done_loops, i)) {
+ if (done_loops[i]) {
/* Note we accumulate and average all custom normals in current smooth fan,
* to avoid getting different clnors data (tiny differences in plain custom normals can
* give rather huge differences in computed 2D factors). */
@@ -1949,7 +1956,7 @@ static void mesh_normals_loop_custom_set(const MVert *mverts,
float *nor = r_custom_loopnors[nidx];
BKE_lnor_space_custom_normal_to_data(lnors_spacearr.lspacearr[i], nor, r_clnors_data[i]);
- BLI_BITMAP_DISABLE(done_loops, i);
+ done_loops[i].reset();
}
else {
int avg_nor_count = 0;
@@ -1967,7 +1974,7 @@ static void mesh_normals_loop_custom_set(const MVert *mverts,
BLI_SMALLSTACK_PUSH(clnors_data, (short *)r_clnors_data[lidx]);
loops = loops->next;
- BLI_BITMAP_DISABLE(done_loops, lidx);
+ done_loops[lidx].reset();
}
mul_v3_fl(avg_nor, 1.0f / (float)avg_nor_count);
@@ -1983,7 +1990,6 @@ static void mesh_normals_loop_custom_set(const MVert *mverts,
MEM_freeN(lnors);
MEM_freeN(loop_to_poly);
- MEM_freeN(done_loops);
BKE_lnor_spacearr_free(&lnors_spacearr);
}
@@ -1992,10 +1998,10 @@ void BKE_mesh_normals_loop_custom_set(const MVert *mverts,
const int numVerts,
MEdge *medges,
const int numEdges,
- MLoop *mloops,
+ const MLoop *mloops,
float (*r_custom_loopnors)[3],
const int numLoops,
- MPoly *mpolys,
+ const MPoly *mpolys,
const float (*polynors)[3],
const int numPolys,
short (*r_clnors_data)[2])
@@ -2015,18 +2021,18 @@ void BKE_mesh_normals_loop_custom_set(const MVert *mverts,
false);
}
-void BKE_mesh_normals_loop_custom_from_vertices_set(const MVert *mverts,
- const float (*vert_normals)[3],
- float (*r_custom_vertnors)[3],
- const int numVerts,
- MEdge *medges,
- const int numEdges,
- MLoop *mloops,
- const int numLoops,
- MPoly *mpolys,
- const float (*polynors)[3],
- const int numPolys,
- short (*r_clnors_data)[2])
+void BKE_mesh_normals_loop_custom_from_verts_set(const MVert *mverts,
+ const float (*vert_normals)[3],
+ float (*r_custom_vertnors)[3],
+ const int numVerts,
+ MEdge *medges,
+ const int numEdges,
+ const MLoop *mloops,
+ const int numLoops,
+ const MPoly *mpolys,
+ const float (*polynors)[3],
+ const int numPolys,
+ short (*r_clnors_data)[2])
{
mesh_normals_loop_custom_set(mverts,
vert_normals,
@@ -2054,20 +2060,24 @@ static void mesh_set_custom_normals(Mesh *mesh, float (*r_custom_nors)[3], const
}
else {
clnors = (short(*)[2])CustomData_add_layer(
- &mesh->ldata, CD_CUSTOMLOOPNORMAL, CD_CALLOC, nullptr, numloops);
+ &mesh->ldata, CD_CUSTOMLOOPNORMAL, CD_SET_DEFAULT, nullptr, numloops);
}
+ const Span<MVert> verts = mesh->verts();
+ MutableSpan<MEdge> edges = mesh->edges_for_write();
+ const Span<MPoly> polys = mesh->polys();
+ const Span<MLoop> loops = mesh->loops();
- mesh_normals_loop_custom_set(mesh->mvert,
+ mesh_normals_loop_custom_set(verts.data(),
BKE_mesh_vertex_normals_ensure(mesh),
- mesh->totvert,
- mesh->medge,
- mesh->totedge,
- mesh->mloop,
+ verts.size(),
+ edges.data(),
+ edges.size(),
+ loops.data(),
r_custom_nors,
- mesh->totloop,
- mesh->mpoly,
+ loops.size(),
+ polys.data(),
BKE_mesh_poly_normals_ensure(mesh),
- mesh->totpoly,
+ polys.size(),
clnors,
use_vertices);
}
@@ -2077,7 +2087,7 @@ void BKE_mesh_set_custom_normals(Mesh *mesh, float (*r_custom_loopnors)[3])
mesh_set_custom_normals(mesh, r_custom_loopnors, false);
}
-void BKE_mesh_set_custom_normals_from_vertices(Mesh *mesh, float (*r_custom_vertnors)[3])
+void BKE_mesh_set_custom_normals_from_verts(Mesh *mesh, float (*r_custom_vertnors)[3])
{
mesh_set_custom_normals(mesh, r_custom_vertnors, true);
}