diff options
author | Campbell Barton <ideasman42@gmail.com> | 2021-06-25 10:03:14 +0300 |
---|---|---|
committer | Campbell Barton <ideasman42@gmail.com> | 2021-06-26 10:07:05 +0300 |
commit | b5542c1ea4c29c56338706158578c41f6e65df5c (patch) | |
tree | 2d6c0a62a7f9b91a5172ef96ef1076904566545f /source/blender/editors | |
parent | c1fe58244646c7ecc58fba1bdbf7c511750b14c9 (diff) |
Edit Mesh: optimize common use-cases for partial updates
Skip updating normals & tessellation for contiguous geometry regions
for operations such as translate & uniform scale.
This means when all geometry is selected, no updates are needed
as the relative locations of vertices aren't being modified.
Performance:
As this is skipping a multi-threaded operation,
larger improvements are noticeable on systems with fewer cores.
- ~1.15x to ~1.3x overall gain for 32 cores.
- ~1.7x to ~2.2x overall gain for 1 core (limited using `-t 1` argument).
Details:
- Rotate & non-uniform scale only skip tessellation.
- Proportional editing and axis-mirror have special handling
ensure geometry is properly grouped before considering
a face part of a single group that can be skipped.
- Loose vertices always need their normals to be recalculated
since they're calculated based on the location.
- Non-affine transform operations such as shrink-fatten & bend,
don't take advantage of this optimization.
- Snap projection also disables the optimization.
Diffstat (limited to 'source/blender/editors')
-rw-r--r-- | source/blender/editors/transform/transform_convert_mesh.c | 362 | ||||
-rw-r--r-- | source/blender/editors/transform/transform_data.h | 2 |
2 files changed, 292 insertions, 72 deletions
diff --git a/source/blender/editors/transform/transform_convert_mesh.c b/source/blender/editors/transform/transform_convert_mesh.c index 4c674136b6a..58cf82c72d3 100644 --- a/source/blender/editors/transform/transform_convert_mesh.c +++ b/source/blender/editors/transform/transform_convert_mesh.c @@ -27,6 +27,7 @@ #include "MEM_guardedalloc.h" #include "BLI_alloca.h" +#include "BLI_bitmap.h" #include "BLI_linklist_stack.h" #include "BLI_math.h" #include "BLI_memarena.h" @@ -59,16 +60,37 @@ static void tc_mesh_customdata_free_fn(struct TransInfo *t, struct TransCustomDataLayer; static void tc_mesh_customdatacorrect_free(struct TransCustomDataLayer *tcld); +struct TransCustomData_PartialUpdate { + struct BMPartialUpdate *cache; + + /** The size of proportional editing used for #BMPartialUpdate. */ + float prop_size; + /** The size of proportional editing for the last update. */ + float prop_size_prev; +}; + +enum ePartialType { + PARTIAL_NONE = -1, + /** + * Update for all tagged vertices (any kind of deformation). + * Use as a default since it can be used with any kind of deformation. + */ + PARTIAL_TYPE_ALL = 0, + /** + * Update only faces between tagged and non-tagged faces (affine transformations). + * Use when transforming is guaranteed not to change the relative locations of vertices. + * + * This has the advantage that selecting the entire mesh or only isolated elements, + * can skip normal/tessellation updates entirely, so it's worth using when possible. + */ + PARTIAL_TYPE_GROUP = 1, + +}; +#define PARTIAL_TYPE_MAX 2 + struct TransCustomDataMesh { struct TransCustomDataLayer *cd_layer_correct; - struct { - struct BMPartialUpdate *cache; - - /** The size of proportional editing used for `partial_update_cache`. */ - float prop_size; - /** The size of proportional editing for the last update. */ - float prop_size_prev; - } partial_update; + struct TransCustomData_PartialUpdate partial_update[PARTIAL_TYPE_MAX]; }; static struct TransCustomDataMesh *tc_mesh_customdata_ensure(TransDataContainer *tc) @@ -90,8 +112,10 @@ static void tc_mesh_customdata_free(struct TransCustomDataMesh *tcmd) tc_mesh_customdatacorrect_free(tcmd->cd_layer_correct); } - if (tcmd->partial_update.cache != NULL) { - BM_mesh_partial_destroy(tcmd->partial_update.cache); + for (int i = 0; i < ARRAY_SIZE(tcmd->partial_update); i++) { + if (tcmd->partial_update[i].cache != NULL) { + BM_mesh_partial_destroy(tcmd->partial_update[i].cache); + } } MEM_freeN(tcmd); @@ -1676,19 +1700,18 @@ void createTransEditVerts(TransInfo *t) /** \} */ /* -------------------------------------------------------------------- */ -/** \name Recalc Mesh Data +/** \name Recalc Mesh Data (Partial Update) * \{ */ -static bool bm_vert_tag_filter_fn(BMVert *v, void *UNUSED(user_data)) -{ - return BM_elem_flag_test(v, BM_ELEM_TAG); -} - -static BMPartialUpdate *tc_mesh_ensure_partial_update(TransInfo *t, TransDataContainer *tc) +static BMPartialUpdate *tc_mesh_partial_ensure(TransInfo *t, + TransDataContainer *tc, + enum ePartialType partial_type) { struct TransCustomDataMesh *tcmd = tc_mesh_customdata_ensure(tc); - if (tcmd->partial_update.cache) { + struct TransCustomData_PartialUpdate *pupdate = &tcmd->partial_update[partial_type]; + + if (pupdate->cache) { /* Recalculate partial update data when the proportional editing size changes. * @@ -1699,80 +1722,285 @@ static BMPartialUpdate *tc_mesh_ensure_partial_update(TransInfo *t, TransDataCon * Extra logic is needed to account for this situation. */ bool recalc; - if (tcmd->partial_update.prop_size_prev < t->prop_size) { + if (pupdate->prop_size_prev < t->prop_size) { /* Size increase, simply recalculate. */ recalc = true; } - else if (tcmd->partial_update.prop_size_prev > t->prop_size) { + else if (pupdate->prop_size_prev > t->prop_size) { /* Size decreased, first use this partial data since reducing the size will transform * geometry which needs recalculating. */ - tcmd->partial_update.prop_size_prev = t->prop_size; + pupdate->prop_size_prev = t->prop_size; recalc = false; } - else if (tcmd->partial_update.prop_size != t->prop_size) { - BLI_assert(tcmd->partial_update.prop_size > tcmd->partial_update.prop_size_prev); + else if (pupdate->prop_size != t->prop_size) { + BLI_assert(pupdate->prop_size > pupdate->prop_size_prev); recalc = true; } else { - BLI_assert(t->prop_size == tcmd->partial_update.prop_size_prev); + BLI_assert(t->prop_size == pupdate->prop_size_prev); recalc = false; } if (!recalc) { - return tcmd->partial_update.cache; + return pupdate->cache; } - BM_mesh_partial_destroy(tcmd->partial_update.cache); - tcmd->partial_update.cache = NULL; + BM_mesh_partial_destroy(pupdate->cache); + pupdate->cache = NULL; } BMEditMesh *em = BKE_editmesh_from_object(tc->obedit); - BM_mesh_elem_hflag_disable_all(em->bm, BM_VERT, BM_ELEM_TAG, false); + BM_mesh_elem_index_ensure(em->bm, BM_VERT); - int verts_len = 0; - int i; - TransData *td; - for (i = 0, td = tc->data; i < tc->data_len; i++, td++) { - if (td->factor != 0.0f) { - BMVert *v = (BMVert *)td->extra; - BM_elem_flag_enable(v, BM_ELEM_TAG); - verts_len += 1; + /* Only use `verts_group` or `verts_mask`. */ + int *verts_group = NULL; + int verts_group_count = 0; /* Number of non-zero elements in `verts_group`. */ + + BLI_bitmap *verts_mask = NULL; + int verts_mask_count = 0; /* Number of elements enabled in `verts_mask`. */ + + if ((partial_type == PARTIAL_TYPE_GROUP) && + ((t->flag & T_PROP_EDIT) || tc->use_mirror_axis_any)) { + verts_group = MEM_callocN(sizeof(*verts_group) * em->bm->totvert, __func__); + int i; + TransData *td; + for (i = 0, td = tc->data; i < tc->data_len; i++, td++) { + if (td->factor == 0.0f) { + continue; + } + const BMVert *v = (BMVert *)td->extra; + const int v_index = BM_elem_index_get(v); + BLI_assert(verts_group[v_index] == 0); + if (td->factor < 1.0f) { + /* Don't use grouping logic with the factor is under 1.0. */ + verts_group[v_index] = -1; + } + else { + BLI_assert(td->factor == 1.0f); + verts_group[v_index] = 1; + if (tc->use_mirror_axis_any) { + /* Use bits 2-4 for central alignment (don't overlap the first bit). */ + const int flag = td->flag & (TD_MIRROR_EDGE_X | TD_MIRROR_EDGE_Y | TD_MIRROR_EDGE_Z); + verts_group[v_index] |= (flag >> TD_MIRROR_EDGE_AXIS_SHIFT) << 1; + } + } + verts_mask_count += 1; } - } - TransDataMirror *td_mirror = tc->data_mirror; - for (i = 0; i < tc->data_mirror_len; i++, td_mirror++) { - BMVert *v_mirr = (BMVert *)POINTER_OFFSET(td_mirror->loc_src, -offsetof(BMVert, co)); + TransDataMirror *td_mirror = tc->data_mirror; + for (i = 0; i < tc->data_mirror_len; i++, td_mirror++) { + BMVert *v_mirr = (BMVert *)POINTER_OFFSET(td_mirror->loc_src, -offsetof(BMVert, co)); + /* The equality check is to account for the case when topology mirror moves + * the vertex from it's original location to match it's symmetrical position, + * with proportional editing enabled. */ + const int v_mirr_index = BM_elem_index_get(v_mirr); + if (verts_group[v_mirr_index] == 0 && equals_v3v3(td_mirror->loc, td_mirror->iloc)) { + continue; + } - /* The equality check is to account for the case when topology mirror moves - * the vertex from it's original location to match it's symmetrical position, - * with proportional editing enabled. */ - if (BM_elem_flag_test(v_mirr, BM_ELEM_TAG) || !equals_v3v3(td_mirror->loc, td_mirror->iloc)) { BMVert *v_mirr_other = (BMVert *)td_mirror->extra; /* This assert should never fail since there is no overlap * between mirrored vertices and non-mirrored. */ - BLI_assert(!BM_elem_flag_test(v_mirr_other, BM_ELEM_TAG)); - BM_elem_flag_enable(v_mirr_other, BM_ELEM_TAG); - verts_len += 1; + BLI_assert(verts_group[BM_elem_index_get(v_mirr_other)] == 0); + const int v_mirr_other_index = BM_elem_index_get(v_mirr_other); + + if (verts_group[v_mirr_index] == -1) { + verts_group[v_mirr_other_index] = -1; + } + else { + /* Use bits 5-8 for mirror (don't overlap previous bits). */ + const int flag = td_mirror->flag & (TD_MIRROR_X | TD_MIRROR_Y | TD_MIRROR_Z); + verts_group[v_mirr_other_index] |= (flag >> TD_MIRROR_EDGE_AXIS_SHIFT) << 4; + } + verts_mask_count += 1; + } + } + else { + /* See the body of the comments in the previous block for details. */ + verts_mask = BLI_BITMAP_NEW(em->bm->totvert, __func__); + int i; + TransData *td; + for (i = 0, td = tc->data; i < tc->data_len; i++, td++) { + if (td->factor == 0.0f) { + continue; + } + const BMVert *v = (BMVert *)td->extra; + const int v_index = BM_elem_index_get(v); + BLI_assert(!BLI_BITMAP_TEST(verts_mask, v_index)); + BLI_BITMAP_ENABLE(verts_mask, v_index); + verts_mask_count += 1; + } + + TransDataMirror *td_mirror = tc->data_mirror; + for (i = 0; i < tc->data_mirror_len; i++, td_mirror++) { + BMVert *v_mirr = (BMVert *)POINTER_OFFSET(td_mirror->loc_src, -offsetof(BMVert, co)); + if (!BLI_BITMAP_TEST(verts_mask, BM_elem_index_get(v_mirr)) && + equals_v3v3(td_mirror->loc, td_mirror->iloc)) { + continue; + } + + BMVert *v_mirr_other = (BMVert *)td_mirror->extra; + BLI_assert(!BLI_BITMAP_TEST(verts_mask, BM_elem_index_get(v_mirr_other))); + const int v_mirr_other_index = BM_elem_index_get(v_mirr_other); + BLI_BITMAP_ENABLE(verts_mask, v_mirr_other_index); + verts_mask_count += 1; + } + } + + switch (partial_type) { + case PARTIAL_TYPE_ALL: { + pupdate->cache = BM_mesh_partial_create_from_verts(em->bm, + &(BMPartialUpdate_Params){ + .do_tessellate = true, + .do_normals = true, + }, + verts_mask, + verts_mask_count); + break; + } + case PARTIAL_TYPE_GROUP: { + pupdate->cache = + (verts_group ? BM_mesh_partial_create_from_verts_group_multi(em->bm, + &(BMPartialUpdate_Params){ + .do_tessellate = true, + .do_normals = true, + }, + verts_group, + verts_group_count) : + BM_mesh_partial_create_from_verts_group_single(em->bm, + &(BMPartialUpdate_Params){ + .do_tessellate = true, + .do_normals = true, + }, + verts_mask, + verts_mask_count)); + break; + } + case PARTIAL_NONE: { + BLI_assert_unreachable(); } } - tcmd->partial_update.cache = BM_mesh_partial_create_from_verts(em->bm, - &(BMPartialUpdate_Params){ - .do_tessellate = true, - .do_normals = true, - }, - verts_len, - bm_vert_tag_filter_fn, - NULL); + if (verts_group) { + MEM_freeN(verts_group); + } + else { + MEM_freeN(verts_mask); + } - tcmd->partial_update.prop_size_prev = t->prop_size; - tcmd->partial_update.prop_size = t->prop_size; + pupdate->prop_size_prev = t->prop_size; + pupdate->prop_size = t->prop_size; - return tcmd->partial_update.cache; + return pupdate->cache; } +static void tc_mesh_partial_types_calc(TransInfo *t, + enum ePartialType *r_partial_for_looptri, + enum ePartialType *r_partial_for_normals) +{ + /* Calculate the kind of partial updates which can be performed. */ + enum ePartialType partial_for_normals = PARTIAL_NONE; + enum ePartialType partial_for_looptri = PARTIAL_NONE; + + /* Note that operations such as #TFM_CREASE are not handled here + * (if they were, leaving as #PARTIAL_NONE would be appropriate). */ + switch (t->mode) { + case TFM_TRANSLATION: { + partial_for_looptri = PARTIAL_TYPE_GROUP; + partial_for_normals = PARTIAL_TYPE_GROUP; + break; + } + case TFM_ROTATION: { + partial_for_looptri = PARTIAL_TYPE_GROUP; + partial_for_normals = PARTIAL_TYPE_ALL; + break; + } + case TFM_RESIZE: { + partial_for_looptri = PARTIAL_TYPE_GROUP; + partial_for_normals = PARTIAL_TYPE_GROUP; + /* Non-uniform scale needs to recalculate all normals + * since their relative locations change. + * Uniform negative scale can keep normals as-is since the faces are flipped, + * normals remain unchanged. */ + if ((t->con.mode & CON_APPLY) || + (t->values_final[0] != t->values_final[1] || t->values_final[0] != t->values_final[2])) { + partial_for_normals = PARTIAL_TYPE_ALL; + } + break; + } + default: { + partial_for_looptri = PARTIAL_TYPE_ALL; + partial_for_normals = PARTIAL_TYPE_ALL; + break; + } + } + + /* With projection, transform isn't affine. */ + if (activeSnap_with_project(t)) { + if (partial_for_looptri == PARTIAL_TYPE_GROUP) { + partial_for_looptri = PARTIAL_TYPE_ALL; + } + if (partial_for_normals == PARTIAL_TYPE_GROUP) { + partial_for_normals = PARTIAL_TYPE_ALL; + } + } + + *r_partial_for_looptri = partial_for_looptri; + *r_partial_for_normals = partial_for_normals; +} + +static void tc_mesh_partial_update(TransInfo *t, + TransDataContainer *tc, + enum ePartialType partial_for_looptri, + enum ePartialType partial_for_normals) +{ + BMEditMesh *em = BKE_editmesh_from_object(tc->obedit); + + /* Matching. */ + if ((partial_for_looptri == PARTIAL_TYPE_ALL) && (partial_for_normals == PARTIAL_TYPE_ALL)) { + /* The additional cost of generating the partial connectivity data isn't justified + * when all data needs to be updated. + * + * While proportional editing can cause all geometry to need updating with a partial + * selection. It's impractical to calculate this ahead of time. Further, the down side of + * using partial updates when their not needed is negligible. */ + if (em->bm->totvert == em->bm->totvertsel) { + BKE_editmesh_looptri_and_normals_calc(em); + return; + } + } + + /* Not matching. */ + if (partial_for_looptri != PARTIAL_NONE) { + BMPartialUpdate *bmpinfo = tc_mesh_partial_ensure(t, tc, partial_for_looptri); + BKE_editmesh_looptri_calc_with_partial_ex(em, + bmpinfo, + &(const struct BMeshCalcTessellation_Params){ + .face_normals = true, + }); + } + + if (partial_for_normals != PARTIAL_NONE) { + BMPartialUpdate *bmpinfo = tc_mesh_partial_ensure(t, tc, partial_for_normals); + /* While not a large difference, take advantage of existing normals where possible. */ + const bool face_normals = !((partial_for_looptri == PARTIAL_TYPE_ALL) || + ((partial_for_looptri == PARTIAL_TYPE_GROUP) && + (partial_for_normals == PARTIAL_TYPE_GROUP))); + BM_mesh_normals_update_with_partial_ex(em->bm, + bmpinfo, + &(const struct BMeshNormalsUpdate_Params){ + .face_normals = face_normals, + }); + } +} + +/** \} */ + +/* -------------------------------------------------------------------- */ +/** \name Recalc Mesh Data + * \{ */ + static void tc_mesh_transdata_mirror_apply(TransDataContainer *tc) { if (tc->use_mirror_axis_any) { @@ -1831,23 +2059,13 @@ void recalcData_mesh(TransInfo *t) tc_mesh_customdatacorrect_restore(t); } + enum ePartialType partial_for_looptri, partial_for_normals; + tc_mesh_partial_types_calc(t, &partial_for_looptri, &partial_for_normals); + FOREACH_TRANS_DATA_CONTAINER (t, tc) { DEG_id_tag_update(tc->obedit->data, ID_RECALC_GEOMETRY); - BMEditMesh *em = BKE_editmesh_from_object(tc->obedit); - /* The additional cost of generating the partial connectivity data isn't justified - * when all data needs to be updated. - * - * While proportional editing can cause all geometry to need updating with a partial selection. - * It's impractical to calculate this ahead of time. - * Further, the down side of using partial updates when their not needed is negligible. */ - if (em->bm->totvert == em->bm->totvertsel) { - BKE_editmesh_looptri_and_normals_calc(em); - } - else { - BMPartialUpdate *partial_update_cache = tc_mesh_ensure_partial_update(t, tc); - BKE_editmesh_looptri_and_normals_calc_with_partial(em, partial_update_cache); - } + tc_mesh_partial_update(t, tc, partial_for_looptri, partial_for_normals); } } /** \} */ diff --git a/source/blender/editors/transform/transform_data.h b/source/blender/editors/transform/transform_data.h index 5b01433c96b..59b76c2eec5 100644 --- a/source/blender/editors/transform/transform_data.h +++ b/source/blender/editors/transform/transform_data.h @@ -173,10 +173,12 @@ enum { /** For Graph Editor - curves that can only have int-values * need their keyframes tagged with this. */ TD_INTVALUES = 1 << 11, +#define TD_MIRROR_AXIS_SHIFT 12 /** For editmode mirror. */ TD_MIRROR_X = 1 << 12, TD_MIRROR_Y = 1 << 13, TD_MIRROR_Z = 1 << 14, +#define TD_MIRROR_EDGE_AXIS_SHIFT 12 /** For editmode mirror, clamp axis to 0 */ TD_MIRROR_EDGE_X = 1 << 12, TD_MIRROR_EDGE_Y = 1 << 13, |