Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCampbell Barton <ideasman42@gmail.com>2016-05-30 08:31:31 +0300
committerCampbell Barton <ideasman42@gmail.com>2016-05-30 09:27:15 +0300
commit91bfdacde262c4df18ade2e5266e5c969bb5f3af (patch)
treed294f0e2a19e7f7e230ad7b911722e720fb382de /source/blender/editors/mesh
parent8a7d1f3b3cadfbd3a62fee66480d568d5e024d2e (diff)
Editmesh undo memory optimization
Previously a whole mesh was stored between undo steps, This commit uses BLI_array_store to de-duplicate memory use between undo steps. Memory saving depends entirely on kinds of edits performed, in own tests 5x-15x less memory use is common. Compacting the memory does give some overhead however its done in a background thread so its not blocking in most cases. New behavior and threading can be ifdef'd out to check for regressions. See D2026 for details.
Diffstat (limited to 'source/blender/editors/mesh')
-rw-r--r--source/blender/editors/mesh/editmesh_undo.c581
1 files changed, 567 insertions, 14 deletions
diff --git a/source/blender/editors/mesh/editmesh_undo.c b/source/blender/editors/mesh/editmesh_undo.c
index 287ee97e9a0..b9d3fd6c8be 100644
--- a/source/blender/editors/mesh/editmesh_undo.c
+++ b/source/blender/editors/mesh/editmesh_undo.c
@@ -39,18 +39,39 @@
#include "ED_mesh.h"
#include "ED_util.h"
+#define USE_ARRAY_STORE
-/* for callbacks */
+#ifdef USE_ARRAY_STORE
+// # define DEBUG_PRINT
+// # define DEBUG_TIME
+# ifdef DEBUG_TIME
+# include "PIL_time_utildefines.h"
+# endif
-static void *getEditMesh(bContext *C)
-{
- Object *obedit = CTX_data_edit_object(C);
- if (obedit && obedit->type == OB_MESH) {
- Mesh *me = obedit->data;
- return me->edit_btmesh;
- }
- return NULL;
-}
+# include "BLI_array_store.h"
+# include "BLI_math_base.h"
+ /* check on best size later... */
+# define ARRAY_CHUNK_SIZE 256
+
+# define USE_ARRAY_STORE_THREAD
+#endif
+
+#ifdef USE_ARRAY_STORE_THREAD
+# include "BLI_task.h"
+#endif
+
+
+#ifdef USE_ARRAY_STORE
+
+/* Single linked list of layers stored per type */
+typedef struct BArrayCustomData {
+ struct BArrayCustomData *next;
+ CustomDataType type;
+ int states_len; /* number of layers for each type */
+ BArrayState *states[0];
+} BArrayCustomData;
+
+#endif
typedef struct UndoMesh {
Mesh me;
@@ -65,11 +86,463 @@ typedef struct UndoMesh {
* There are a few ways this could be made to work but for now its a known limitation with mixing
* object and editmode operations - Campbell */
int shapenr;
+
+#ifdef USE_ARRAY_STORE
+ /* NULL arrays are considered empty */
+ struct {
+ /* most data is stored as 'custom' data */
+ BArrayCustomData *vdata, *edata, *ldata, *pdata;
+ BArrayState **keyblocks;
+ BArrayState *mselect;
+ } store;
+#endif /* USE_ARRAY_STORE */
} UndoMesh;
+
+#ifdef USE_ARRAY_STORE
+
+/** \name Array Store
+ * \{ */
+
+static struct {
+ BArrayStore **bs_all;
+ int bs_all_len;
+ int users;
+
+ /* We could have the undo API pass in the previous state, for now store a local list */
+ ListBase local_links;
+
+#ifdef USE_ARRAY_STORE_THREAD
+ TaskPool *task_pool;
+#endif
+
+} um_arraystore = {NULL};
+
+static BArrayStore *array_store_at_size_ensure(const int stride)
+{
+ if (um_arraystore.bs_all_len < stride) {
+ um_arraystore.bs_all_len = stride;
+ um_arraystore.bs_all = MEM_recallocN(um_arraystore.bs_all, sizeof(*um_arraystore.bs_all) * stride);
+ }
+ BArrayStore **bs_p = &um_arraystore.bs_all[stride - 1];
+
+ if ((*bs_p) == NULL) {
+#if 0
+ unsigned int chunk_count = ARRAY_CHUNK_SIZE;
+#else
+ /* calculate best chunk-count to fit a power of two */
+ unsigned int chunk_count = ARRAY_CHUNK_SIZE;
+ {
+ unsigned int size = chunk_count * stride;
+ size = power_of_2_max_u(size);
+ size = MEM_SIZE_OPTIMAL(size);
+ chunk_count = size / stride;
+ }
+#endif
+
+ (*bs_p) = BLI_array_store_create(stride, chunk_count);
+ }
+ return *bs_p;
+}
+
+static BArrayStore *array_store_at_size_get(const int stride)
+{
+ BLI_assert(stride > 0 && stride <= um_arraystore.bs_all_len);
+ return um_arraystore.bs_all[stride - 1];
+}
+
+#ifdef DEBUG_PRINT
+static void um_arraystore_memory_usage(size_t *r_size_expanded, size_t *r_size_compacted)
+{
+ size_t size_compacted = 0;
+ size_t size_expanded = 0;
+ for (int i = 0; i < um_arraystore.bs_all_len; i++) {
+ BArrayStore *bs = um_arraystore.bs_all[i];
+ if (bs) {
+ size_compacted += BLI_array_store_calc_size_compacted_get(bs);
+ size_expanded += BLI_array_store_calc_size_expanded_get(bs);
+ }
+ }
+
+ *r_size_expanded = size_expanded;
+ *r_size_compacted = size_compacted;
+}
+#endif
+
+static void um_arraystore_cd_compact(
+ struct CustomData *cdata, const size_t data_len,
+ bool create,
+ const BArrayCustomData *bcd_reference,
+ BArrayCustomData **r_bcd_first)
+{
+ if (data_len == 0) {
+ if (create) {
+ *r_bcd_first = NULL;
+ }
+ }
+
+ const BArrayCustomData *bcd_reference_current = bcd_reference;
+ BArrayCustomData *bcd = NULL, *bcd_first = NULL, *bcd_prev = NULL;
+ for (int layer_start = 0, layer_end; layer_start < cdata->totlayer; layer_start = layer_end) {
+ const CustomDataType type = cdata->layers[layer_start].type;
+
+ layer_end = layer_start + 1;
+ while ((layer_end < cdata->totlayer) &&
+ (type == cdata->layers[layer_end].type))
+ {
+ layer_end++;
+ }
+
+ const int stride = CustomData_sizeof(type);
+ BArrayStore *bs = create ? array_store_at_size_ensure(stride) : NULL;
+ const int layer_len = layer_end - layer_start;
+
+ if (create) {
+ if (bcd_reference_current && (bcd_reference_current->type == type)) {
+ /* common case, the reference is aligned */
+ }
+ else {
+ bcd_reference_current = NULL;
+
+ /* do a full lookup when un-alligned */
+ if (bcd_reference) {
+ const BArrayCustomData *bcd_iter = bcd_reference;
+ while (bcd_iter) {
+ if (bcd_iter->type == type) {
+ bcd_reference_current = bcd_iter;
+ break;
+ }
+ bcd_iter = bcd_iter->next;
+ }
+ }
+ }
+ }
+
+ if (create) {
+ bcd = MEM_callocN(sizeof(BArrayCustomData) + (layer_len * sizeof(BArrayState *)), __func__);
+ bcd->next = NULL;
+ bcd->type = type;
+ bcd->states_len = layer_end - layer_start;
+
+ if (bcd_prev) {
+ bcd_prev->next = bcd;
+ bcd_prev = bcd;
+ }
+ else {
+ bcd_first = bcd;
+ bcd_prev = bcd;
+ }
+ }
+
+ CustomDataLayer *layer = &cdata->layers[layer_start];
+ for (int i = 0; i < layer_len; i++, layer++) {
+ if (create) {
+ if (layer->data) {
+ BArrayState *state_reference =
+ (bcd_reference_current && i < bcd_reference_current->states_len) ?
+ bcd_reference_current->states[i] : NULL;
+ bcd->states[i] = BLI_array_store_state_add(
+ bs, layer->data, (size_t)data_len * stride, state_reference);
+ }
+ else {
+ bcd->states[i] = NULL;
+ }
+ }
+
+ if (layer->data) {
+ MEM_freeN(layer->data);
+ layer->data = NULL;
+ }
+ }
+
+ if (create) {
+ if (bcd_reference_current) {
+ bcd_reference_current = bcd_reference_current->next;
+ }
+ }
+ }
+
+ if (create) {
+ *r_bcd_first = bcd_first;
+ }
+}
+
+/**
+ * \note There is no room for data going out of sync here.
+ * The layers and the states are stored together so this can be kept working.
+ */
+static void um_arraystore_cd_expand(
+ const BArrayCustomData *bcd, struct CustomData *cdata, const size_t data_len)
+{
+ CustomDataLayer *layer = cdata->layers;
+ while (bcd) {
+ const int stride = CustomData_sizeof(bcd->type);
+ for (int i = 0; i < bcd->states_len; i++) {
+ BLI_assert(bcd->type == layer->type);
+ if (bcd->states[i]) {
+ size_t state_len;
+ layer->data = BLI_array_store_state_data_get_alloc(bcd->states[i], &state_len);
+ BLI_assert(stride * data_len == state_len);
+ UNUSED_VARS_NDEBUG(stride, data_len);
+ }
+ else {
+ layer->data = NULL;
+ }
+ layer++;
+ }
+ bcd = bcd->next;
+ }
+}
+
+static void um_arraystore_cd_free(BArrayCustomData *bcd)
+{
+ while (bcd) {
+ BArrayCustomData *bcd_next = bcd->next;
+ const int stride = CustomData_sizeof(bcd->type);
+ BArrayStore *bs = array_store_at_size_get(stride);
+ for (int i = 0; i < bcd->states_len; i++) {
+ if (bcd->states[i]) {
+ BLI_array_store_state_remove(bs, bcd->states[i]);
+ }
+ }
+ MEM_freeN(bcd);
+ bcd = bcd_next;
+ }
+}
+
+/**
+ * \param create: When false, only free the arrays.
+ * This is done since when reading from an undo state, they must be temporarily expanded.
+ * then discarded afterwards, having this argument avoids having 2x code paths.
+ */
+static void um_arraystore_compact_ex(
+ UndoMesh *um, const UndoMesh *um_ref,
+ bool create)
+{
+ Mesh *me = &um->me;
+
+ um_arraystore_cd_compact(&me->vdata, me->totvert, create, um_ref ? um_ref->store.vdata : NULL, &um->store.vdata);
+ um_arraystore_cd_compact(&me->edata, me->totedge, create, um_ref ? um_ref->store.edata : NULL, &um->store.edata);
+ um_arraystore_cd_compact(&me->ldata, me->totloop, create, um_ref ? um_ref->store.ldata : NULL, &um->store.ldata);
+ um_arraystore_cd_compact(&me->pdata, me->totpoly, create, um_ref ? um_ref->store.pdata : NULL, &um->store.pdata);
+
+ if (me->key && me->key->totkey) {
+ const size_t stride = me->key->elemsize;
+ BArrayStore *bs = create ? array_store_at_size_ensure(stride) : NULL;
+ if (create) {
+ um->store.keyblocks = MEM_mallocN(me->key->totkey * sizeof(*um->store.keyblocks), __func__);
+ }
+ KeyBlock *keyblock = me->key->block.first;
+ for (int i = 0; i < me->key->totkey; i++, keyblock = keyblock->next) {
+ if (create) {
+ BArrayState *state_reference =
+ (um_ref && um_ref->me.key && (i < um_ref->me.key->totkey)) ?
+ um_ref->store.keyblocks[i] : NULL;
+ um->store.keyblocks[i] = BLI_array_store_state_add(
+ bs, keyblock->data, (size_t)keyblock->totelem * stride,
+ state_reference);
+ }
+
+ if (keyblock->data) {
+ MEM_freeN(keyblock->data);
+ keyblock->data = NULL;
+ }
+ }
+ }
+
+ if (me->mselect && me->totselect) {
+ BLI_assert(create == (um->store.mselect == NULL));
+ if (create) {
+ BArrayState *state_reference = um_ref ? um_ref->store.mselect : NULL;
+ const size_t stride = sizeof(*me->mselect);
+ BArrayStore *bs = array_store_at_size_ensure(stride);
+ um->store.mselect = BLI_array_store_state_add(
+ bs, me->mselect, (size_t)me->totselect * stride, state_reference);
+ }
+
+ /* keep me->totselect for validation */
+ MEM_freeN(me->mselect);
+ me->mselect = NULL;
+ }
+
+ if (create) {
+ um_arraystore.users += 1;
+ }
+
+ BKE_mesh_update_customdata_pointers(me, false);
+}
+
+/**
+ * Move data from allocated arrays to de-duplicated states and clear arrays.
+ */
+static void um_arraystore_compact(UndoMesh *um, const UndoMesh *um_ref)
+{
+ um_arraystore_compact_ex(um, um_ref, true);
+}
+
+static void um_arraystore_compact_with_info(UndoMesh *um, const UndoMesh *um_ref)
+{
+#ifdef DEBUG_PRINT
+ size_t size_expanded_prev, size_compacted_prev;
+ um_arraystore_memory_usage(&size_expanded_prev, &size_compacted_prev);
+#endif
+
+#ifdef DEBUG_TIME
+ TIMEIT_START(mesh_undo_compact);
+#endif
+
+ um_arraystore_compact(um, um_ref);
+
+#ifdef DEBUG_TIME
+ TIMEIT_END(mesh_undo_compact);
+#endif
+
+#ifdef DEBUG_PRINT
+ {
+ size_t size_expanded, size_compacted;
+ um_arraystore_memory_usage(&size_expanded, &size_compacted);
+
+ const double percent_total = size_expanded ?
+ (((double)size_compacted / (double)size_expanded) * 100.0) : -1.0;
+
+ size_t size_expanded_step = size_expanded - size_expanded_prev;
+ size_t size_compacted_step = size_compacted - size_compacted_prev;
+ const double percent_step = size_expanded_step ?
+ (((double)size_compacted_step / (double)size_expanded_step) * 100.0) : -1.0;
+
+ printf("overall memory use: %.8f%% of expanded size\n", percent_total);
+ printf("step memory use: %.8f%% of expanded size\n", percent_step);
+ }
+#endif
+}
+
+#ifdef USE_ARRAY_STORE_THREAD
+
+struct UMArrayData {
+ UndoMesh *um;
+ const UndoMesh *um_ref; /* can be NULL */
+};
+static void um_arraystore_compact_cb(TaskPool *UNUSED(pool), void *taskdata, int UNUSED(threadid))
+{
+ struct UMArrayData *um_data = taskdata;
+ um_arraystore_compact_with_info(um_data->um, um_data->um_ref);
+}
+
+#endif /* USE_ARRAY_STORE_THREAD */
+
+/**
+ * Remove data we only expanded for temporary use.
+ */
+static void um_arraystore_expand_clear(UndoMesh *um)
+{
+ um_arraystore_compact_ex(um, NULL, false);
+}
+
+static void um_arraystore_expand(UndoMesh *um)
+{
+ Mesh *me = &um->me;
+
+ um_arraystore_cd_expand(um->store.vdata, &me->vdata, me->totvert);
+ um_arraystore_cd_expand(um->store.edata, &me->edata, me->totedge);
+ um_arraystore_cd_expand(um->store.ldata, &me->ldata, me->totloop);
+ um_arraystore_cd_expand(um->store.pdata, &me->pdata, me->totpoly);
+
+ if (um->store.keyblocks) {
+ const size_t stride = me->key->elemsize;
+ KeyBlock *keyblock = me->key->block.first;
+ for (int i = 0; i < me->key->totkey; i++, keyblock = keyblock->next) {
+ BArrayState *state = um->store.keyblocks[i];
+ size_t state_len;
+ keyblock->data = BLI_array_store_state_data_get_alloc(state, &state_len);
+ BLI_assert(keyblock->totelem == (state_len / stride));
+ UNUSED_VARS_NDEBUG(stride);
+ }
+ }
+
+ if (um->store.mselect) {
+ const size_t stride = sizeof(*me->mselect);
+ BArrayState *state = um->store.mselect;
+ size_t state_len;
+ me->mselect = BLI_array_store_state_data_get_alloc(state, &state_len);
+ BLI_assert(me->totselect == (state_len / stride));
+ UNUSED_VARS_NDEBUG(stride);
+ }
+
+ /* not essential, but prevents accidental dangling pointer access */
+ BKE_mesh_update_customdata_pointers(me, false);
+}
+
+static void um_arraystore_free(UndoMesh *um)
+{
+ Mesh *me = &um->me;
+
+ um_arraystore_cd_free(um->store.vdata);
+ um_arraystore_cd_free(um->store.edata);
+ um_arraystore_cd_free(um->store.ldata);
+ um_arraystore_cd_free(um->store.pdata);
+
+ if (um->store.keyblocks) {
+ const size_t stride = me->key->elemsize;
+ BArrayStore *bs = array_store_at_size_get(stride);
+ for (int i = 0; i < me->key->totkey; i++) {
+ BArrayState *state = um->store.keyblocks[i];
+ BLI_array_store_state_remove(bs, state);
+ }
+ MEM_freeN(um->store.keyblocks);
+ um->store.keyblocks = NULL;
+ }
+
+ if (um->store.mselect) {
+ const size_t stride = sizeof(*me->mselect);
+ BArrayStore *bs = array_store_at_size_get(stride);
+ BArrayState *state = um->store.mselect;
+ BLI_array_store_state_remove(bs, state);
+ um->store.mselect = NULL;
+ }
+
+ um_arraystore.users -= 1;
+
+ BLI_assert(um_arraystore.users >= 0);
+
+ if (um_arraystore.users == 0) {
+#ifdef DEBUG_PRINT
+ printf("mesh undo store: freeing all data!\n");
+#endif
+ for (int i = 0; i < um_arraystore.bs_all_len; i += 1) {
+ if (um_arraystore.bs_all[i]) {
+ BLI_array_store_destroy(um_arraystore.bs_all[i]);
+ }
+ }
+
+ MEM_freeN(um_arraystore.bs_all);
+ um_arraystore.bs_all = NULL;
+ um_arraystore.bs_all_len = 0;
+
+#ifdef USE_ARRAY_STORE_THREAD
+ BLI_task_pool_free(um_arraystore.task_pool);
+ um_arraystore.task_pool = NULL;
+#endif
+ }
+
+}
+
+/** \} */
+
+#endif /* USE_ARRAY_STORE */
+
+
+/* for callbacks */
/* undo simply makes copies of a bmesh */
static void *editbtMesh_to_undoMesh(void *emv, void *obdata)
{
+
+#ifdef USE_ARRAY_STORE_THREAD
+ /* changes this waits is low, but must have finished */
+ if (um_arraystore.task_pool) {
+ BLI_task_pool_work_and_wait(um_arraystore.task_pool);
+ }
+#endif
+
BMEditMesh *em = emv;
Mesh *obme = obdata;
@@ -88,17 +561,63 @@ static void *editbtMesh_to_undoMesh(void *emv, void *obdata)
um->selectmode = em->selectmode;
um->shapenr = em->bm->shapenr;
+#ifdef USE_ARRAY_STORE
+ {
+ /* We could be more clever here,
+ * the previous undo state may be from a separate mesh. */
+ const UndoMesh *um_ref = um_arraystore.local_links.last ?
+ ((LinkData *)um_arraystore.local_links.last)->data : NULL;
+
+ /* add oursrlves */
+ BLI_addtail(&um_arraystore.local_links, BLI_genericNodeN(um));
+
+#ifdef USE_ARRAY_STORE_THREAD
+ if (um_arraystore.task_pool == NULL) {
+ TaskScheduler *scheduler = BLI_task_scheduler_get();
+ um_arraystore.task_pool = BLI_task_pool_create_background(scheduler, NULL);
+ }
+
+ struct UMArrayData *um_data = MEM_mallocN(sizeof(*um_data), __func__);
+ um_data->um = um;
+ um_data->um_ref = um_ref;
+
+ BLI_task_pool_push(
+ um_arraystore.task_pool,
+ um_arraystore_compact_cb, um_data, true, TASK_PRIORITY_LOW);
+#else
+ um_arraystore_compact_with_info(um, um_ref);
+#endif
+ }
+#endif
+
return um;
}
-static void undoMesh_to_editbtMesh(void *umv, void *em_v, void *obdata)
+static void undoMesh_to_editbtMesh(void *um_v, void *em_v, void *obdata)
{
BMEditMesh *em = em_v, *em_tmp;
Object *ob = em->ob;
- UndoMesh *um = umv;
+ UndoMesh *um = um_v;
BMesh *bm;
Key *key = ((Mesh *) obdata)->key;
+#ifdef USE_ARRAY_STORE
+#ifdef USE_ARRAY_STORE_THREAD
+ /* changes this waits is low, but must have finished */
+ BLI_task_pool_work_and_wait(um_arraystore.task_pool);
+#endif
+
+#ifdef DEBUG_TIME
+ TIMEIT_START(mesh_undo_expand);
+#endif
+
+ um_arraystore_expand(um);
+
+#ifdef DEBUG_TIME
+ TIMEIT_END(mesh_undo_expand);
+#endif
+#endif /* USE_ARRAY_STORE */
+
const BMAllocTemplate allocsize = BMALLOC_TEMPLATE_FROM_ME(&um->me);
em->bm->shapenr = um->shapenr;
@@ -145,11 +664,35 @@ static void undoMesh_to_editbtMesh(void *umv, void *em_v, void *obdata)
ob->shapenr = um->shapenr;
MEM_freeN(em_tmp);
+
+#ifdef USE_ARRAY_STORE
+ um_arraystore_expand_clear(um);
+#endif
}
-static void free_undo(void *me_v)
+static void free_undo(void *um_v)
{
- Mesh *me = me_v;
+ UndoMesh *um = um_v;
+ Mesh *me = &um->me;
+
+#ifdef USE_ARRAY_STORE
+
+#ifdef USE_ARRAY_STORE_THREAD
+ /* changes this waits is low, but must have finished */
+ BLI_task_pool_work_and_wait(um_arraystore.task_pool);
+#endif
+
+ /* we need to expand so any allocations in custom-data are freed with the mesh */
+ um_arraystore_expand(um);
+
+ {
+ LinkData *link = BLI_findptr(&um_arraystore.local_links, um, offsetof(LinkData, data));
+ BLI_remlink(&um_arraystore.local_links, link);
+ MEM_freeN(link);
+ }
+ um_arraystore_free(um);
+#endif
+
if (me->key) {
BKE_key_free(me->key);
MEM_freeN(me->key);
@@ -159,6 +702,16 @@ static void free_undo(void *me_v)
MEM_freeN(me);
}
+static void *getEditMesh(bContext *C)
+{
+ Object *obedit = CTX_data_edit_object(C);
+ if (obedit && obedit->type == OB_MESH) {
+ Mesh *me = obedit->data;
+ return me->edit_btmesh;
+ }
+ return NULL;
+}
+
/* and this is all the undo system needs to know */
void undo_push_mesh(bContext *C, const char *name)
{