Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/source
diff options
context:
space:
mode:
Diffstat (limited to 'source')
-rw-r--r--source/blender/bmesh/intern/bmesh_mesh_partial_update.c261
-rw-r--r--source/blender/bmesh/intern/bmesh_mesh_partial_update.h19
-rw-r--r--source/blender/editors/transform/transform_convert_mesh.c362
-rw-r--r--source/blender/editors/transform/transform_data.h2
4 files changed, 553 insertions, 91 deletions
diff --git a/source/blender/bmesh/intern/bmesh_mesh_partial_update.c b/source/blender/bmesh/intern/bmesh_mesh_partial_update.c
index 267705aa7c7..46fd2ad9a31 100644
--- a/source/blender/bmesh/intern/bmesh_mesh_partial_update.c
+++ b/source/blender/bmesh/intern/bmesh_mesh_partial_update.c
@@ -25,16 +25,26 @@
*
* In the future this could be integrated into GPU updates too.
*
- * Potential Improvements
- * ======================
+ * Kinds of Partial Geometry
+ * =========================
*
- * Some calculations could be significantly limited in the case of affine transformations
- * (tessellation is an obvious candidate). Where only faces which have a mix
- * of tagged and untagged vertices would need to be recalculated.
+ * All Tagged
+ * ----------
+ * Operate on everything that's tagged as well as connected geometry.
+ * see: #BM_mesh_partial_create_from_verts
*
- * In general this would work well besides some corner cases such as scaling to zero.
- * Although the exact result may depend on the normal (for N-GONS),
- * so for now update the tessellation of all tagged geometry.
+ * Grouped
+ * -------
+ * Operate on everything that is connected to both tagged and un-tagged.
+ * see: #BM_mesh_partial_create_from_verts_group_single
+ *
+ * Reduces computations when transforming isolated regions.
+ *
+ * Optionally support multiple groups since axis-mirror (for example)
+ * will transform vertices in different directions, as well as keeping centered vertices.
+ * see: #BM_mesh_partial_create_from_verts_group_multi
+ *
+ * \note Others can be added as needed.
*/
#include "DNA_object_types.h"
@@ -93,21 +103,24 @@ BLI_INLINE bool partial_elem_face_ensure(BMPartialUpdate *bmpinfo,
return false;
}
+/**
+ * All Tagged & Connected, see: #BM_mesh_partial_create_from_verts
+ * Operate on everything that's tagged as well as connected geometry.
+ */
BMPartialUpdate *BM_mesh_partial_create_from_verts(BMesh *bm,
const BMPartialUpdate_Params *params,
- const int verts_len,
- bool (*filter_fn)(BMVert *, void *user_data),
- void *user_data)
+ const BLI_bitmap *verts_mask,
+ const int verts_mask_count)
{
/* The caller is doing something wrong if this isn't the case. */
- BLI_assert(verts_len <= bm->totvert);
+ BLI_assert(verts_mask_count <= bm->totvert);
BMPartialUpdate *bmpinfo = MEM_callocN(sizeof(*bmpinfo), __func__);
/* Reserve more edges than vertices since it's common for a grid topology
* to use around twice as many edges as vertices. */
- const int default_verts_len_alloc = verts_len;
- const int default_faces_len_alloc = min_ii(bm->totface, verts_len);
+ const int default_verts_len_alloc = verts_mask_count;
+ const int default_faces_len_alloc = min_ii(bm->totface, verts_mask_count);
/* Allocate tags instead of using #BM_ELEM_TAG because the caller may already be using tags.
* Further, walking over all geometry to clear the tags isn't so efficient. */
@@ -143,7 +156,7 @@ BMPartialUpdate *BM_mesh_partial_create_from_verts(BMesh *bm,
int i;
BM_ITER_MESH_INDEX (v, &iter, bm, BM_VERTS_OF_MESH, i) {
BM_elem_index_set(v, i); /* set_inline */
- if (!filter_fn(v, user_data)) {
+ if (!BLI_BITMAP_TEST(verts_mask, i)) {
continue;
}
BMEdge *e_iter = v->e;
@@ -203,6 +216,224 @@ BMPartialUpdate *BM_mesh_partial_create_from_verts(BMesh *bm,
return bmpinfo;
}
+/**
+ * All Connected, operate on all faces that have both tagged and un-tagged vertices.
+ *
+ * Reduces computations when transforming isolated regions.
+ */
+BMPartialUpdate *BM_mesh_partial_create_from_verts_group_single(
+ BMesh *bm,
+ const BMPartialUpdate_Params *params,
+ const BLI_bitmap *verts_mask,
+ const int verts_mask_count)
+{
+ BMPartialUpdate *bmpinfo = MEM_callocN(sizeof(*bmpinfo), __func__);
+
+ BLI_bitmap *verts_tag = NULL;
+ BLI_bitmap *faces_tag = NULL;
+
+ /* It's not worth guessing a large number as isolated regions will allocate zero faces. */
+ const int default_faces_len_alloc = 1;
+
+ int face_tag_loop_len = 0;
+
+ if (params->do_normals || params->do_tessellate) {
+
+ /* Faces. */
+ if (bmpinfo->faces == NULL) {
+ bmpinfo->faces_len_alloc = default_faces_len_alloc;
+ bmpinfo->faces = MEM_mallocN((sizeof(BMFace *) * bmpinfo->faces_len_alloc), __func__);
+ faces_tag = BLI_BITMAP_NEW((size_t)bm->totface, __func__);
+ }
+
+ BMFace *f;
+ BMIter iter;
+ int i;
+ BM_ITER_MESH_INDEX (f, &iter, bm, BM_FACES_OF_MESH, i) {
+ enum { SIDE_A = (1 << 0), SIDE_B = (1 << 1) } side_flag = 0;
+ BM_elem_index_set(f, i); /* set_inline */
+ BMLoop *l_iter, *l_first;
+ l_iter = l_first = BM_FACE_FIRST_LOOP(f);
+ do {
+ const int j = BM_elem_index_get(l_iter->v);
+ side_flag |= BLI_BITMAP_TEST(verts_mask, j) ? SIDE_A : SIDE_B;
+ if (UNLIKELY(side_flag == (SIDE_A | SIDE_B))) {
+ partial_elem_face_ensure(bmpinfo, faces_tag, f);
+ face_tag_loop_len += f->len;
+ break;
+ }
+ } while ((l_iter = l_iter->next) != l_first);
+ }
+ }
+
+ if (params->do_normals) {
+ /* Extend to all faces vertices:
+ * Any changes to the faces normal needs to update all surrounding vertices. */
+
+ /* Over allocate using the total number of face loops. */
+ const int default_verts_len_alloc = min_ii(bm->totvert, max_ii(1, face_tag_loop_len));
+
+ /* Vertices. */
+ if (bmpinfo->verts == NULL) {
+ bmpinfo->verts_len_alloc = default_verts_len_alloc;
+ bmpinfo->verts = MEM_mallocN((sizeof(BMVert *) * bmpinfo->verts_len_alloc), __func__);
+ verts_tag = BLI_BITMAP_NEW((size_t)bm->totvert, __func__);
+ }
+
+ for (int i = 0; i < bmpinfo->faces_len; i++) {
+ BMFace *f = bmpinfo->faces[i];
+ BMLoop *l_iter, *l_first;
+ l_iter = l_first = BM_FACE_FIRST_LOOP(f);
+ do {
+ partial_elem_vert_ensure(bmpinfo, verts_tag, l_iter->v);
+ } while ((l_iter = l_iter->next) != l_first);
+ }
+
+ /* Loose vertex support, these need special handling as loose normals depend on location. */
+ if (bmpinfo->verts_len < verts_mask_count) {
+ BMVert *v;
+ BMIter iter;
+ int i;
+ BM_ITER_MESH_INDEX (v, &iter, bm, BM_VERTS_OF_MESH, i) {
+ if (BLI_BITMAP_TEST(verts_mask, i) && (BM_vert_find_first_loop(v) == NULL)) {
+ partial_elem_vert_ensure(bmpinfo, verts_tag, v);
+ }
+ }
+ }
+ }
+
+ if (verts_tag) {
+ MEM_freeN(verts_tag);
+ }
+ if (faces_tag) {
+ MEM_freeN(faces_tag);
+ }
+
+ bmpinfo->params = *params;
+
+ return bmpinfo;
+}
+
+/**
+ * All Connected, operate on all faces that have vertices in the same group.
+ *
+ * Reduces computations when transforming isolated regions.
+ *
+ * This is a version of #BM_mesh_partial_create_from_verts_group_single
+ * that handles multiple groups instead of a bitmap mask.
+ *
+ * This is needed for example when transform has mirror enabled,
+ * since one side needs to have a different group to the other since a face that has vertices
+ * attached to both won't have an affine transformation.
+ *
+ * \param verts_groups: Vertex aligned array of groups.
+ * Values are used as follows:
+ * - >0: Each face is grouped with other faces of the same group.
+ * - 0: Not in a group (don't handle these).
+ * - -1: Don't use grouping logic (include any face that contains a vertex with this group).
+ * \param verts_groups_count: The number of non-zero values in `verts_groups`.
+ */
+BMPartialUpdate *BM_mesh_partial_create_from_verts_group_multi(
+ BMesh *bm,
+ const BMPartialUpdate_Params *params,
+ const int *verts_group,
+ const int verts_group_count)
+{
+ /* Provide a quick way of visualizing which faces are being manipulated. */
+ // #define DEBUG_MATERIAL
+
+ BMPartialUpdate *bmpinfo = MEM_callocN(sizeof(*bmpinfo), __func__);
+
+ BLI_bitmap *verts_tag = NULL;
+ BLI_bitmap *faces_tag = NULL;
+
+ /* It's not worth guessing a large number as isolated regions will allocate zero faces. */
+ const int default_faces_len_alloc = 1;
+
+ int face_tag_loop_len = 0;
+
+ if (params->do_normals || params->do_tessellate) {
+
+ /* Faces. */
+ if (bmpinfo->faces == NULL) {
+ bmpinfo->faces_len_alloc = default_faces_len_alloc;
+ bmpinfo->faces = MEM_mallocN((sizeof(BMFace *) * bmpinfo->faces_len_alloc), __func__);
+ faces_tag = BLI_BITMAP_NEW((size_t)bm->totface, __func__);
+ }
+
+ BMFace *f;
+ BMIter iter;
+ int i;
+ BM_ITER_MESH_INDEX (f, &iter, bm, BM_FACES_OF_MESH, i) {
+ BM_elem_index_set(f, i); /* set_inline */
+ BMLoop *l_iter, *l_first;
+ l_iter = l_first = BM_FACE_FIRST_LOOP(f);
+ const int group_test = verts_group[BM_elem_index_get(l_iter->prev->v)];
+#ifdef DEBUG_MATERIAL
+ f->mat_nr = 0;
+#endif
+ do {
+ const int group_iter = verts_group[BM_elem_index_get(l_iter->v)];
+ if (UNLIKELY((group_iter != group_test) || (group_iter == -1))) {
+ partial_elem_face_ensure(bmpinfo, faces_tag, f);
+ face_tag_loop_len += f->len;
+#ifdef DEBUG_MATERIAL
+ f->mat_nr = 1;
+#endif
+ break;
+ }
+ } while ((l_iter = l_iter->next) != l_first);
+ }
+ }
+
+ if (params->do_normals) {
+ /* Extend to all faces vertices:
+ * Any changes to the faces normal needs to update all surrounding vertices. */
+
+ /* Over allocate using the total number of face loops. */
+ const int default_verts_len_alloc = min_ii(bm->totvert, max_ii(1, face_tag_loop_len));
+
+ /* Vertices. */
+ if (bmpinfo->verts == NULL) {
+ bmpinfo->verts_len_alloc = default_verts_len_alloc;
+ bmpinfo->verts = MEM_mallocN((sizeof(BMVert *) * bmpinfo->verts_len_alloc), __func__);
+ verts_tag = BLI_BITMAP_NEW((size_t)bm->totvert, __func__);
+ }
+
+ for (int i = 0; i < bmpinfo->faces_len; i++) {
+ BMFace *f = bmpinfo->faces[i];
+ BMLoop *l_iter, *l_first;
+ l_iter = l_first = BM_FACE_FIRST_LOOP(f);
+ do {
+ partial_elem_vert_ensure(bmpinfo, verts_tag, l_iter->v);
+ } while ((l_iter = l_iter->next) != l_first);
+ }
+
+ /* Loose vertex support, these need special handling as loose normals depend on location. */
+ if (bmpinfo->verts_len < verts_group_count) {
+ BMVert *v;
+ BMIter iter;
+ int i;
+ BM_ITER_MESH_INDEX (v, &iter, bm, BM_VERTS_OF_MESH, i) {
+ if ((verts_group[i] != 0) && (BM_vert_find_first_loop(v) == NULL)) {
+ partial_elem_vert_ensure(bmpinfo, verts_tag, v);
+ }
+ }
+ }
+ }
+
+ if (verts_tag) {
+ MEM_freeN(verts_tag);
+ }
+ if (faces_tag) {
+ MEM_freeN(faces_tag);
+ }
+
+ bmpinfo->params = *params;
+
+ return bmpinfo;
+}
+
void BM_mesh_partial_destroy(BMPartialUpdate *bmpinfo)
{
if (bmpinfo->verts) {
diff --git a/source/blender/bmesh/intern/bmesh_mesh_partial_update.h b/source/blender/bmesh/intern/bmesh_mesh_partial_update.h
index 3dbfb985e92..cf4eab22836 100644
--- a/source/blender/bmesh/intern/bmesh_mesh_partial_update.h
+++ b/source/blender/bmesh/intern/bmesh_mesh_partial_update.h
@@ -54,9 +54,20 @@ typedef struct BMPartialUpdate {
BMPartialUpdate *BM_mesh_partial_create_from_verts(BMesh *bm,
const BMPartialUpdate_Params *params,
- const int verts_len,
- bool (*filter_fn)(BMVert *, void *user_data),
- void *user_data)
- ATTR_NONNULL(1, 2, 4) ATTR_WARN_UNUSED_RESULT;
+ const unsigned int *verts_mask,
+ const int verts_mask_count)
+ ATTR_NONNULL(1, 2, 3) ATTR_WARN_UNUSED_RESULT;
+
+BMPartialUpdate *BM_mesh_partial_create_from_verts_group_single(
+ BMesh *bm,
+ const BMPartialUpdate_Params *params,
+ const unsigned int *verts_mask,
+ const int verts_mask_count) ATTR_NONNULL(1, 2, 3) ATTR_WARN_UNUSED_RESULT;
+
+BMPartialUpdate *BM_mesh_partial_create_from_verts_group_multi(
+ BMesh *bm,
+ const BMPartialUpdate_Params *params,
+ const int *verts_group,
+ const int verts_group_count) ATTR_NONNULL(1, 2, 3) ATTR_WARN_UNUSED_RESULT;
void BM_mesh_partial_destroy(BMPartialUpdate *bmpinfo) ATTR_NONNULL(1);
diff --git a/source/blender/editors/transform/transform_convert_mesh.c b/source/blender/editors/transform/transform_convert_mesh.c
index 4c674136b6a..58cf82c72d3 100644
--- a/source/blender/editors/transform/transform_convert_mesh.c
+++ b/source/blender/editors/transform/transform_convert_mesh.c
@@ -27,6 +27,7 @@
#include "MEM_guardedalloc.h"
#include "BLI_alloca.h"
+#include "BLI_bitmap.h"
#include "BLI_linklist_stack.h"
#include "BLI_math.h"
#include "BLI_memarena.h"
@@ -59,16 +60,37 @@ static void tc_mesh_customdata_free_fn(struct TransInfo *t,
struct TransCustomDataLayer;
static void tc_mesh_customdatacorrect_free(struct TransCustomDataLayer *tcld);
+struct TransCustomData_PartialUpdate {
+ struct BMPartialUpdate *cache;
+
+ /** The size of proportional editing used for #BMPartialUpdate. */
+ float prop_size;
+ /** The size of proportional editing for the last update. */
+ float prop_size_prev;
+};
+
+enum ePartialType {
+ PARTIAL_NONE = -1,
+ /**
+ * Update for all tagged vertices (any kind of deformation).
+ * Use as a default since it can be used with any kind of deformation.
+ */
+ PARTIAL_TYPE_ALL = 0,
+ /**
+ * Update only faces between tagged and non-tagged faces (affine transformations).
+ * Use when transforming is guaranteed not to change the relative locations of vertices.
+ *
+ * This has the advantage that selecting the entire mesh or only isolated elements,
+ * can skip normal/tessellation updates entirely, so it's worth using when possible.
+ */
+ PARTIAL_TYPE_GROUP = 1,
+
+};
+#define PARTIAL_TYPE_MAX 2
+
struct TransCustomDataMesh {
struct TransCustomDataLayer *cd_layer_correct;
- struct {
- struct BMPartialUpdate *cache;
-
- /** The size of proportional editing used for `partial_update_cache`. */
- float prop_size;
- /** The size of proportional editing for the last update. */
- float prop_size_prev;
- } partial_update;
+ struct TransCustomData_PartialUpdate partial_update[PARTIAL_TYPE_MAX];
};
static struct TransCustomDataMesh *tc_mesh_customdata_ensure(TransDataContainer *tc)
@@ -90,8 +112,10 @@ static void tc_mesh_customdata_free(struct TransCustomDataMesh *tcmd)
tc_mesh_customdatacorrect_free(tcmd->cd_layer_correct);
}
- if (tcmd->partial_update.cache != NULL) {
- BM_mesh_partial_destroy(tcmd->partial_update.cache);
+ for (int i = 0; i < ARRAY_SIZE(tcmd->partial_update); i++) {
+ if (tcmd->partial_update[i].cache != NULL) {
+ BM_mesh_partial_destroy(tcmd->partial_update[i].cache);
+ }
}
MEM_freeN(tcmd);
@@ -1676,19 +1700,18 @@ void createTransEditVerts(TransInfo *t)
/** \} */
/* -------------------------------------------------------------------- */
-/** \name Recalc Mesh Data
+/** \name Recalc Mesh Data (Partial Update)
* \{ */
-static bool bm_vert_tag_filter_fn(BMVert *v, void *UNUSED(user_data))
-{
- return BM_elem_flag_test(v, BM_ELEM_TAG);
-}
-
-static BMPartialUpdate *tc_mesh_ensure_partial_update(TransInfo *t, TransDataContainer *tc)
+static BMPartialUpdate *tc_mesh_partial_ensure(TransInfo *t,
+ TransDataContainer *tc,
+ enum ePartialType partial_type)
{
struct TransCustomDataMesh *tcmd = tc_mesh_customdata_ensure(tc);
- if (tcmd->partial_update.cache) {
+ struct TransCustomData_PartialUpdate *pupdate = &tcmd->partial_update[partial_type];
+
+ if (pupdate->cache) {
/* Recalculate partial update data when the proportional editing size changes.
*
@@ -1699,80 +1722,285 @@ static BMPartialUpdate *tc_mesh_ensure_partial_update(TransInfo *t, TransDataCon
* Extra logic is needed to account for this situation. */
bool recalc;
- if (tcmd->partial_update.prop_size_prev < t->prop_size) {
+ if (pupdate->prop_size_prev < t->prop_size) {
/* Size increase, simply recalculate. */
recalc = true;
}
- else if (tcmd->partial_update.prop_size_prev > t->prop_size) {
+ else if (pupdate->prop_size_prev > t->prop_size) {
/* Size decreased, first use this partial data since reducing the size will transform
* geometry which needs recalculating. */
- tcmd->partial_update.prop_size_prev = t->prop_size;
+ pupdate->prop_size_prev = t->prop_size;
recalc = false;
}
- else if (tcmd->partial_update.prop_size != t->prop_size) {
- BLI_assert(tcmd->partial_update.prop_size > tcmd->partial_update.prop_size_prev);
+ else if (pupdate->prop_size != t->prop_size) {
+ BLI_assert(pupdate->prop_size > pupdate->prop_size_prev);
recalc = true;
}
else {
- BLI_assert(t->prop_size == tcmd->partial_update.prop_size_prev);
+ BLI_assert(t->prop_size == pupdate->prop_size_prev);
recalc = false;
}
if (!recalc) {
- return tcmd->partial_update.cache;
+ return pupdate->cache;
}
- BM_mesh_partial_destroy(tcmd->partial_update.cache);
- tcmd->partial_update.cache = NULL;
+ BM_mesh_partial_destroy(pupdate->cache);
+ pupdate->cache = NULL;
}
BMEditMesh *em = BKE_editmesh_from_object(tc->obedit);
- BM_mesh_elem_hflag_disable_all(em->bm, BM_VERT, BM_ELEM_TAG, false);
+ BM_mesh_elem_index_ensure(em->bm, BM_VERT);
- int verts_len = 0;
- int i;
- TransData *td;
- for (i = 0, td = tc->data; i < tc->data_len; i++, td++) {
- if (td->factor != 0.0f) {
- BMVert *v = (BMVert *)td->extra;
- BM_elem_flag_enable(v, BM_ELEM_TAG);
- verts_len += 1;
+ /* Only use `verts_group` or `verts_mask`. */
+ int *verts_group = NULL;
+ int verts_group_count = 0; /* Number of non-zero elements in `verts_group`. */
+
+ BLI_bitmap *verts_mask = NULL;
+ int verts_mask_count = 0; /* Number of elements enabled in `verts_mask`. */
+
+ if ((partial_type == PARTIAL_TYPE_GROUP) &&
+ ((t->flag & T_PROP_EDIT) || tc->use_mirror_axis_any)) {
+ verts_group = MEM_callocN(sizeof(*verts_group) * em->bm->totvert, __func__);
+ int i;
+ TransData *td;
+ for (i = 0, td = tc->data; i < tc->data_len; i++, td++) {
+ if (td->factor == 0.0f) {
+ continue;
+ }
+ const BMVert *v = (BMVert *)td->extra;
+ const int v_index = BM_elem_index_get(v);
+ BLI_assert(verts_group[v_index] == 0);
+ if (td->factor < 1.0f) {
+ /* Don't use grouping logic with the factor is under 1.0. */
+ verts_group[v_index] = -1;
+ }
+ else {
+ BLI_assert(td->factor == 1.0f);
+ verts_group[v_index] = 1;
+ if (tc->use_mirror_axis_any) {
+ /* Use bits 2-4 for central alignment (don't overlap the first bit). */
+ const int flag = td->flag & (TD_MIRROR_EDGE_X | TD_MIRROR_EDGE_Y | TD_MIRROR_EDGE_Z);
+ verts_group[v_index] |= (flag >> TD_MIRROR_EDGE_AXIS_SHIFT) << 1;
+ }
+ }
+ verts_mask_count += 1;
}
- }
- TransDataMirror *td_mirror = tc->data_mirror;
- for (i = 0; i < tc->data_mirror_len; i++, td_mirror++) {
- BMVert *v_mirr = (BMVert *)POINTER_OFFSET(td_mirror->loc_src, -offsetof(BMVert, co));
+ TransDataMirror *td_mirror = tc->data_mirror;
+ for (i = 0; i < tc->data_mirror_len; i++, td_mirror++) {
+ BMVert *v_mirr = (BMVert *)POINTER_OFFSET(td_mirror->loc_src, -offsetof(BMVert, co));
+ /* The equality check is to account for the case when topology mirror moves
+ * the vertex from it's original location to match it's symmetrical position,
+ * with proportional editing enabled. */
+ const int v_mirr_index = BM_elem_index_get(v_mirr);
+ if (verts_group[v_mirr_index] == 0 && equals_v3v3(td_mirror->loc, td_mirror->iloc)) {
+ continue;
+ }
- /* The equality check is to account for the case when topology mirror moves
- * the vertex from it's original location to match it's symmetrical position,
- * with proportional editing enabled. */
- if (BM_elem_flag_test(v_mirr, BM_ELEM_TAG) || !equals_v3v3(td_mirror->loc, td_mirror->iloc)) {
BMVert *v_mirr_other = (BMVert *)td_mirror->extra;
/* This assert should never fail since there is no overlap
* between mirrored vertices and non-mirrored. */
- BLI_assert(!BM_elem_flag_test(v_mirr_other, BM_ELEM_TAG));
- BM_elem_flag_enable(v_mirr_other, BM_ELEM_TAG);
- verts_len += 1;
+ BLI_assert(verts_group[BM_elem_index_get(v_mirr_other)] == 0);
+ const int v_mirr_other_index = BM_elem_index_get(v_mirr_other);
+
+ if (verts_group[v_mirr_index] == -1) {
+ verts_group[v_mirr_other_index] = -1;
+ }
+ else {
+ /* Use bits 5-8 for mirror (don't overlap previous bits). */
+ const int flag = td_mirror->flag & (TD_MIRROR_X | TD_MIRROR_Y | TD_MIRROR_Z);
+ verts_group[v_mirr_other_index] |= (flag >> TD_MIRROR_EDGE_AXIS_SHIFT) << 4;
+ }
+ verts_mask_count += 1;
+ }
+ }
+ else {
+ /* See the body of the comments in the previous block for details. */
+ verts_mask = BLI_BITMAP_NEW(em->bm->totvert, __func__);
+ int i;
+ TransData *td;
+ for (i = 0, td = tc->data; i < tc->data_len; i++, td++) {
+ if (td->factor == 0.0f) {
+ continue;
+ }
+ const BMVert *v = (BMVert *)td->extra;
+ const int v_index = BM_elem_index_get(v);
+ BLI_assert(!BLI_BITMAP_TEST(verts_mask, v_index));
+ BLI_BITMAP_ENABLE(verts_mask, v_index);
+ verts_mask_count += 1;
+ }
+
+ TransDataMirror *td_mirror = tc->data_mirror;
+ for (i = 0; i < tc->data_mirror_len; i++, td_mirror++) {
+ BMVert *v_mirr = (BMVert *)POINTER_OFFSET(td_mirror->loc_src, -offsetof(BMVert, co));
+ if (!BLI_BITMAP_TEST(verts_mask, BM_elem_index_get(v_mirr)) &&
+ equals_v3v3(td_mirror->loc, td_mirror->iloc)) {
+ continue;
+ }
+
+ BMVert *v_mirr_other = (BMVert *)td_mirror->extra;
+ BLI_assert(!BLI_BITMAP_TEST(verts_mask, BM_elem_index_get(v_mirr_other)));
+ const int v_mirr_other_index = BM_elem_index_get(v_mirr_other);
+ BLI_BITMAP_ENABLE(verts_mask, v_mirr_other_index);
+ verts_mask_count += 1;
+ }
+ }
+
+ switch (partial_type) {
+ case PARTIAL_TYPE_ALL: {
+ pupdate->cache = BM_mesh_partial_create_from_verts(em->bm,
+ &(BMPartialUpdate_Params){
+ .do_tessellate = true,
+ .do_normals = true,
+ },
+ verts_mask,
+ verts_mask_count);
+ break;
+ }
+ case PARTIAL_TYPE_GROUP: {
+ pupdate->cache =
+ (verts_group ? BM_mesh_partial_create_from_verts_group_multi(em->bm,
+ &(BMPartialUpdate_Params){
+ .do_tessellate = true,
+ .do_normals = true,
+ },
+ verts_group,
+ verts_group_count) :
+ BM_mesh_partial_create_from_verts_group_single(em->bm,
+ &(BMPartialUpdate_Params){
+ .do_tessellate = true,
+ .do_normals = true,
+ },
+ verts_mask,
+ verts_mask_count));
+ break;
+ }
+ case PARTIAL_NONE: {
+ BLI_assert_unreachable();
}
}
- tcmd->partial_update.cache = BM_mesh_partial_create_from_verts(em->bm,
- &(BMPartialUpdate_Params){
- .do_tessellate = true,
- .do_normals = true,
- },
- verts_len,
- bm_vert_tag_filter_fn,
- NULL);
+ if (verts_group) {
+ MEM_freeN(verts_group);
+ }
+ else {
+ MEM_freeN(verts_mask);
+ }
- tcmd->partial_update.prop_size_prev = t->prop_size;
- tcmd->partial_update.prop_size = t->prop_size;
+ pupdate->prop_size_prev = t->prop_size;
+ pupdate->prop_size = t->prop_size;
- return tcmd->partial_update.cache;
+ return pupdate->cache;
}
+static void tc_mesh_partial_types_calc(TransInfo *t,
+ enum ePartialType *r_partial_for_looptri,
+ enum ePartialType *r_partial_for_normals)
+{
+ /* Calculate the kind of partial updates which can be performed. */
+ enum ePartialType partial_for_normals = PARTIAL_NONE;
+ enum ePartialType partial_for_looptri = PARTIAL_NONE;
+
+ /* Note that operations such as #TFM_CREASE are not handled here
+ * (if they were, leaving as #PARTIAL_NONE would be appropriate). */
+ switch (t->mode) {
+ case TFM_TRANSLATION: {
+ partial_for_looptri = PARTIAL_TYPE_GROUP;
+ partial_for_normals = PARTIAL_TYPE_GROUP;
+ break;
+ }
+ case TFM_ROTATION: {
+ partial_for_looptri = PARTIAL_TYPE_GROUP;
+ partial_for_normals = PARTIAL_TYPE_ALL;
+ break;
+ }
+ case TFM_RESIZE: {
+ partial_for_looptri = PARTIAL_TYPE_GROUP;
+ partial_for_normals = PARTIAL_TYPE_GROUP;
+ /* Non-uniform scale needs to recalculate all normals
+ * since their relative locations change.
+ * Uniform negative scale can keep normals as-is since the faces are flipped,
+ * normals remain unchanged. */
+ if ((t->con.mode & CON_APPLY) ||
+ (t->values_final[0] != t->values_final[1] || t->values_final[0] != t->values_final[2])) {
+ partial_for_normals = PARTIAL_TYPE_ALL;
+ }
+ break;
+ }
+ default: {
+ partial_for_looptri = PARTIAL_TYPE_ALL;
+ partial_for_normals = PARTIAL_TYPE_ALL;
+ break;
+ }
+ }
+
+ /* With projection, transform isn't affine. */
+ if (activeSnap_with_project(t)) {
+ if (partial_for_looptri == PARTIAL_TYPE_GROUP) {
+ partial_for_looptri = PARTIAL_TYPE_ALL;
+ }
+ if (partial_for_normals == PARTIAL_TYPE_GROUP) {
+ partial_for_normals = PARTIAL_TYPE_ALL;
+ }
+ }
+
+ *r_partial_for_looptri = partial_for_looptri;
+ *r_partial_for_normals = partial_for_normals;
+}
+
+static void tc_mesh_partial_update(TransInfo *t,
+ TransDataContainer *tc,
+ enum ePartialType partial_for_looptri,
+ enum ePartialType partial_for_normals)
+{
+ BMEditMesh *em = BKE_editmesh_from_object(tc->obedit);
+
+ /* Matching. */
+ if ((partial_for_looptri == PARTIAL_TYPE_ALL) && (partial_for_normals == PARTIAL_TYPE_ALL)) {
+ /* The additional cost of generating the partial connectivity data isn't justified
+ * when all data needs to be updated.
+ *
+ * While proportional editing can cause all geometry to need updating with a partial
+ * selection. It's impractical to calculate this ahead of time. Further, the down side of
+ * using partial updates when their not needed is negligible. */
+ if (em->bm->totvert == em->bm->totvertsel) {
+ BKE_editmesh_looptri_and_normals_calc(em);
+ return;
+ }
+ }
+
+ /* Not matching. */
+ if (partial_for_looptri != PARTIAL_NONE) {
+ BMPartialUpdate *bmpinfo = tc_mesh_partial_ensure(t, tc, partial_for_looptri);
+ BKE_editmesh_looptri_calc_with_partial_ex(em,
+ bmpinfo,
+ &(const struct BMeshCalcTessellation_Params){
+ .face_normals = true,
+ });
+ }
+
+ if (partial_for_normals != PARTIAL_NONE) {
+ BMPartialUpdate *bmpinfo = tc_mesh_partial_ensure(t, tc, partial_for_normals);
+ /* While not a large difference, take advantage of existing normals where possible. */
+ const bool face_normals = !((partial_for_looptri == PARTIAL_TYPE_ALL) ||
+ ((partial_for_looptri == PARTIAL_TYPE_GROUP) &&
+ (partial_for_normals == PARTIAL_TYPE_GROUP)));
+ BM_mesh_normals_update_with_partial_ex(em->bm,
+ bmpinfo,
+ &(const struct BMeshNormalsUpdate_Params){
+ .face_normals = face_normals,
+ });
+ }
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+/** \name Recalc Mesh Data
+ * \{ */
+
static void tc_mesh_transdata_mirror_apply(TransDataContainer *tc)
{
if (tc->use_mirror_axis_any) {
@@ -1831,23 +2059,13 @@ void recalcData_mesh(TransInfo *t)
tc_mesh_customdatacorrect_restore(t);
}
+ enum ePartialType partial_for_looptri, partial_for_normals;
+ tc_mesh_partial_types_calc(t, &partial_for_looptri, &partial_for_normals);
+
FOREACH_TRANS_DATA_CONTAINER (t, tc) {
DEG_id_tag_update(tc->obedit->data, ID_RECALC_GEOMETRY);
- BMEditMesh *em = BKE_editmesh_from_object(tc->obedit);
- /* The additional cost of generating the partial connectivity data isn't justified
- * when all data needs to be updated.
- *
- * While proportional editing can cause all geometry to need updating with a partial selection.
- * It's impractical to calculate this ahead of time.
- * Further, the down side of using partial updates when their not needed is negligible. */
- if (em->bm->totvert == em->bm->totvertsel) {
- BKE_editmesh_looptri_and_normals_calc(em);
- }
- else {
- BMPartialUpdate *partial_update_cache = tc_mesh_ensure_partial_update(t, tc);
- BKE_editmesh_looptri_and_normals_calc_with_partial(em, partial_update_cache);
- }
+ tc_mesh_partial_update(t, tc, partial_for_looptri, partial_for_normals);
}
}
/** \} */
diff --git a/source/blender/editors/transform/transform_data.h b/source/blender/editors/transform/transform_data.h
index 5b01433c96b..59b76c2eec5 100644
--- a/source/blender/editors/transform/transform_data.h
+++ b/source/blender/editors/transform/transform_data.h
@@ -173,10 +173,12 @@ enum {
/** For Graph Editor - curves that can only have int-values
* need their keyframes tagged with this. */
TD_INTVALUES = 1 << 11,
+#define TD_MIRROR_AXIS_SHIFT 12
/** For editmode mirror. */
TD_MIRROR_X = 1 << 12,
TD_MIRROR_Y = 1 << 13,
TD_MIRROR_Z = 1 << 14,
+#define TD_MIRROR_EDGE_AXIS_SHIFT 12
/** For editmode mirror, clamp axis to 0 */
TD_MIRROR_EDGE_X = 1 << 12,
TD_MIRROR_EDGE_Y = 1 << 13,