Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCampbell Barton <ideasman42@gmail.com>2019-04-17 07:17:24 +0300
committerCampbell Barton <ideasman42@gmail.com>2019-04-17 07:21:24 +0300
commite12c08e8d170b7ca40f204a5b0423c23a9fbc2c1 (patch)
tree8cf3453d12edb177a218ef8009357518ec6cab6a /source/blender/blenkernel/intern/undo_system.c
parentb3dabc200a4b0399ec6b81f2ff2730d07b44fcaa (diff)
ClangFormat: apply to source, most of intern
Apply clang format as proposed in T53211. For details on usage and instructions for migrating branches without conflicts, see: https://wiki.blender.org/wiki/Tools/ClangFormat
Diffstat (limited to 'source/blender/blenkernel/intern/undo_system.c')
-rw-r--r--source/blender/blenkernel/intern/undo_system.c1248
1 files changed, 631 insertions, 617 deletions
diff --git a/source/blender/blenkernel/intern/undo_system.c b/source/blender/blenkernel/intern/undo_system.c
index 448fe6cb167..d32bc7c6054 100644
--- a/source/blender/blenkernel/intern/undo_system.c
+++ b/source/blender/blenkernel/intern/undo_system.c
@@ -40,7 +40,7 @@
#include "MEM_guardedalloc.h"
-#define undo_stack _wm_undo_stack_disallow /* pass in as a variable always. */
+#define undo_stack _wm_undo_stack_disallow /* pass in as a variable always. */
/** Odd requirement of Blender that we always keep a memfile undo in the stack. */
#define WITH_GLOBAL_UNDO_KEEP_ONE
@@ -68,18 +68,22 @@ static CLG_LogRef LOG = {"bke.undosys"};
#ifdef WITH_NESTED_UNDO_CHECK
static bool g_undo_callback_running = false;
# define UNDO_NESTED_ASSERT(state) BLI_assert(g_undo_callback_running == state)
-# define UNDO_NESTED_CHECK_BEGIN { \
- UNDO_NESTED_ASSERT(false); \
- g_undo_callback_running = true; \
-} ((void)0)
-# define UNDO_NESTED_CHECK_END { \
- UNDO_NESTED_ASSERT(true); \
- g_undo_callback_running = false; \
-} ((void)0)
+# define UNDO_NESTED_CHECK_BEGIN \
+ { \
+ UNDO_NESTED_ASSERT(false); \
+ g_undo_callback_running = true; \
+ } \
+ ((void)0)
+# define UNDO_NESTED_CHECK_END \
+ { \
+ UNDO_NESTED_ASSERT(true); \
+ g_undo_callback_running = false; \
+ } \
+ ((void)0)
#else
# define UNDO_NESTED_ASSERT(state) ((void)0)
# define UNDO_NESTED_CHECK_BEGIN ((void)0)
-# define UNDO_NESTED_CHECK_END ((void)0)
+# define UNDO_NESTED_CHECK_END ((void)0)
#endif
/** \} */
@@ -102,13 +106,13 @@ static ListBase g_undo_types = {NULL, NULL};
static const UndoType *BKE_undosys_type_from_context(bContext *C)
{
- for (const UndoType *ut = g_undo_types.first; ut; ut = ut->next) {
- /* No poll means we don't check context. */
- if (ut->poll && ut->poll(C)) {
- return ut;
- }
- }
- return NULL;
+ for (const UndoType *ut = g_undo_types.first; ut; ut = ut->next) {
+ /* No poll means we don't check context. */
+ if (ut->poll && ut->poll(C)) {
+ return ut;
+ }
+ }
+ return NULL;
}
/* -------------------------------------------------------------------- */
@@ -121,103 +125,103 @@ static const UndoType *BKE_undosys_type_from_context(bContext *C)
static void undosys_id_ref_store(void *UNUSED(user_data), UndoRefID *id_ref)
{
- BLI_assert(id_ref->name[0] == '\0');
- if (id_ref->ptr) {
- BLI_strncpy(id_ref->name, id_ref->ptr->name, sizeof(id_ref->name));
- /* Not needed, just prevents stale data access. */
- id_ref->ptr = NULL;
- }
+ BLI_assert(id_ref->name[0] == '\0');
+ if (id_ref->ptr) {
+ BLI_strncpy(id_ref->name, id_ref->ptr->name, sizeof(id_ref->name));
+ /* Not needed, just prevents stale data access. */
+ id_ref->ptr = NULL;
+ }
}
static void undosys_id_ref_resolve(void *user_data, UndoRefID *id_ref)
{
- /* Note: we could optimize this, for now it's not too bad since it only runs when we access undo! */
- Main *bmain = user_data;
- ListBase *lb = which_libbase(bmain, GS(id_ref->name));
- for (ID *id = lb->first; id; id = id->next) {
- if (STREQ(id_ref->name, id->name) && (id->lib == NULL)) {
- id_ref->ptr = id;
- break;
- }
- }
+ /* Note: we could optimize this, for now it's not too bad since it only runs when we access undo! */
+ Main *bmain = user_data;
+ ListBase *lb = which_libbase(bmain, GS(id_ref->name));
+ for (ID *id = lb->first; id; id = id->next) {
+ if (STREQ(id_ref->name, id->name) && (id->lib == NULL)) {
+ id_ref->ptr = id;
+ break;
+ }
+ }
}
static bool undosys_step_encode(bContext *C, Main *bmain, UndoStack *ustack, UndoStep *us)
{
- CLOG_INFO(&LOG, 2, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
- UNDO_NESTED_CHECK_BEGIN;
- bool ok = us->type->step_encode(C, bmain, us);
- UNDO_NESTED_CHECK_END;
- if (ok) {
- if (us->type->step_foreach_ID_ref != NULL) {
- /* Don't use from context yet because sometimes context is fake and not all members are filled in. */
- us->type->step_foreach_ID_ref(us, undosys_id_ref_store, bmain);
- }
+ CLOG_INFO(&LOG, 2, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
+ UNDO_NESTED_CHECK_BEGIN;
+ bool ok = us->type->step_encode(C, bmain, us);
+ UNDO_NESTED_CHECK_END;
+ if (ok) {
+ if (us->type->step_foreach_ID_ref != NULL) {
+ /* Don't use from context yet because sometimes context is fake and not all members are filled in. */
+ us->type->step_foreach_ID_ref(us, undosys_id_ref_store, bmain);
+ }
#ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
- if (us->type == BKE_UNDOSYS_TYPE_MEMFILE) {
- ustack->step_active_memfile = us;
- }
+ if (us->type == BKE_UNDOSYS_TYPE_MEMFILE) {
+ ustack->step_active_memfile = us;
+ }
#endif
- }
- if (ok == false) {
- CLOG_INFO(&LOG, 2, "encode callback didn't create undo step");
- }
- return ok;
+ }
+ if (ok == false) {
+ CLOG_INFO(&LOG, 2, "encode callback didn't create undo step");
+ }
+ return ok;
}
static void undosys_step_decode(bContext *C, Main *bmain, UndoStack *ustack, UndoStep *us, int dir)
{
- CLOG_INFO(&LOG, 2, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
+ CLOG_INFO(&LOG, 2, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
- if (us->type->step_foreach_ID_ref) {
+ if (us->type->step_foreach_ID_ref) {
#ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
- if (us->type != BKE_UNDOSYS_TYPE_MEMFILE) {
- for (UndoStep *us_iter = us->prev; us_iter; us_iter = us_iter->prev) {
- if (us_iter->type == BKE_UNDOSYS_TYPE_MEMFILE) {
- if (us_iter == ustack->step_active_memfile) {
- /* Common case, we're already using the last memfile state. */
- }
- else {
- /* Load the previous memfile state so any ID's referenced in this
- * undo step will be correctly resolved, see: T56163. */
- undosys_step_decode(C, bmain, ustack, us_iter, dir);
- /* May have been freed on memfile read. */
- bmain = G.main;
- }
- break;
- }
- }
- }
+ if (us->type != BKE_UNDOSYS_TYPE_MEMFILE) {
+ for (UndoStep *us_iter = us->prev; us_iter; us_iter = us_iter->prev) {
+ if (us_iter->type == BKE_UNDOSYS_TYPE_MEMFILE) {
+ if (us_iter == ustack->step_active_memfile) {
+ /* Common case, we're already using the last memfile state. */
+ }
+ else {
+ /* Load the previous memfile state so any ID's referenced in this
+ * undo step will be correctly resolved, see: T56163. */
+ undosys_step_decode(C, bmain, ustack, us_iter, dir);
+ /* May have been freed on memfile read. */
+ bmain = G.main;
+ }
+ break;
+ }
+ }
+ }
#endif
- /* Don't use from context yet because sometimes context is fake and not all members are filled in. */
- us->type->step_foreach_ID_ref(us, undosys_id_ref_resolve, bmain);
- }
+ /* Don't use from context yet because sometimes context is fake and not all members are filled in. */
+ us->type->step_foreach_ID_ref(us, undosys_id_ref_resolve, bmain);
+ }
- UNDO_NESTED_CHECK_BEGIN;
- us->type->step_decode(C, bmain, us, dir);
- UNDO_NESTED_CHECK_END;
+ UNDO_NESTED_CHECK_BEGIN;
+ us->type->step_decode(C, bmain, us, dir);
+ UNDO_NESTED_CHECK_END;
#ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
- if (us->type == BKE_UNDOSYS_TYPE_MEMFILE) {
- ustack->step_active_memfile = us;
- }
+ if (us->type == BKE_UNDOSYS_TYPE_MEMFILE) {
+ ustack->step_active_memfile = us;
+ }
#endif
}
static void undosys_step_free_and_unlink(UndoStack *ustack, UndoStep *us)
{
- CLOG_INFO(&LOG, 2, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
- UNDO_NESTED_CHECK_BEGIN;
- us->type->step_free(us);
- UNDO_NESTED_CHECK_END;
+ CLOG_INFO(&LOG, 2, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
+ UNDO_NESTED_CHECK_BEGIN;
+ us->type->step_free(us);
+ UNDO_NESTED_CHECK_END;
- BLI_remlink(&ustack->steps, us);
- MEM_freeN(us);
+ BLI_remlink(&ustack->steps, us);
+ MEM_freeN(us);
#ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
- if (ustack->step_active_memfile == us) {
- ustack->step_active_memfile = NULL;
- }
+ if (ustack->step_active_memfile == us) {
+ ustack->step_active_memfile = NULL;
+ }
#endif
}
@@ -230,146 +234,146 @@ static void undosys_step_free_and_unlink(UndoStack *ustack, UndoStep *us)
#ifndef NDEBUG
static void undosys_stack_validate(UndoStack *ustack, bool expect_non_empty)
{
- if (ustack->step_active != NULL) {
- BLI_assert(!BLI_listbase_is_empty(&ustack->steps));
- BLI_assert(BLI_findindex(&ustack->steps, ustack->step_active) != -1);
- }
- if (expect_non_empty) {
- BLI_assert(!BLI_listbase_is_empty(&ustack->steps));
- }
+ if (ustack->step_active != NULL) {
+ BLI_assert(!BLI_listbase_is_empty(&ustack->steps));
+ BLI_assert(BLI_findindex(&ustack->steps, ustack->step_active) != -1);
+ }
+ if (expect_non_empty) {
+ BLI_assert(!BLI_listbase_is_empty(&ustack->steps));
+ }
}
#else
static void undosys_stack_validate(UndoStack *ustack, bool expect_non_empty)
{
- UNUSED_VARS(ustack, expect_non_empty);
+ UNUSED_VARS(ustack, expect_non_empty);
}
#endif
UndoStack *BKE_undosys_stack_create(void)
{
- UndoStack *ustack = MEM_callocN(sizeof(UndoStack), __func__);
- return ustack;
+ UndoStack *ustack = MEM_callocN(sizeof(UndoStack), __func__);
+ return ustack;
}
void BKE_undosys_stack_destroy(UndoStack *ustack)
{
- BKE_undosys_stack_clear(ustack);
- MEM_freeN(ustack);
+ BKE_undosys_stack_clear(ustack);
+ MEM_freeN(ustack);
}
void BKE_undosys_stack_clear(UndoStack *ustack)
{
- UNDO_NESTED_ASSERT(false);
- CLOG_INFO(&LOG, 1, "steps=%d", BLI_listbase_count(&ustack->steps));
- for (UndoStep *us = ustack->steps.last, *us_prev; us; us = us_prev) {
- us_prev = us->prev;
- undosys_step_free_and_unlink(ustack, us);
- }
- BLI_listbase_clear(&ustack->steps);
- ustack->step_active = NULL;
+ UNDO_NESTED_ASSERT(false);
+ CLOG_INFO(&LOG, 1, "steps=%d", BLI_listbase_count(&ustack->steps));
+ for (UndoStep *us = ustack->steps.last, *us_prev; us; us = us_prev) {
+ us_prev = us->prev;
+ undosys_step_free_and_unlink(ustack, us);
+ }
+ BLI_listbase_clear(&ustack->steps);
+ ustack->step_active = NULL;
}
void BKE_undosys_stack_clear_active(UndoStack *ustack)
{
- /* Remove active and all following undos. */
- UndoStep *us = ustack->step_active;
-
- if (us) {
- ustack->step_active = us->prev;
- bool is_not_empty = ustack->step_active != NULL;
-
- while (ustack->steps.last != ustack->step_active) {
- UndoStep *us_iter = ustack->steps.last;
- undosys_step_free_and_unlink(ustack, us_iter);
- undosys_stack_validate(ustack, is_not_empty);
- }
- }
+ /* Remove active and all following undos. */
+ UndoStep *us = ustack->step_active;
+
+ if (us) {
+ ustack->step_active = us->prev;
+ bool is_not_empty = ustack->step_active != NULL;
+
+ while (ustack->steps.last != ustack->step_active) {
+ UndoStep *us_iter = ustack->steps.last;
+ undosys_step_free_and_unlink(ustack, us_iter);
+ undosys_stack_validate(ustack, is_not_empty);
+ }
+ }
}
/* Caller is responsible for handling active. */
static void undosys_stack_clear_all_last(UndoStack *ustack, UndoStep *us)
{
- if (us) {
- bool is_not_empty = true;
- UndoStep *us_iter;
- do {
- us_iter = ustack->steps.last;
- BLI_assert(us_iter != ustack->step_active);
- undosys_step_free_and_unlink(ustack, us_iter);
- undosys_stack_validate(ustack, is_not_empty);
- } while ((us != us_iter));
- }
+ if (us) {
+ bool is_not_empty = true;
+ UndoStep *us_iter;
+ do {
+ us_iter = ustack->steps.last;
+ BLI_assert(us_iter != ustack->step_active);
+ undosys_step_free_and_unlink(ustack, us_iter);
+ undosys_stack_validate(ustack, is_not_empty);
+ } while ((us != us_iter));
+ }
}
static void undosys_stack_clear_all_first(UndoStack *ustack, UndoStep *us)
{
- if (us) {
- bool is_not_empty = true;
- UndoStep *us_iter;
- do {
- us_iter = ustack->steps.first;
- BLI_assert(us_iter != ustack->step_active);
- undosys_step_free_and_unlink(ustack, us_iter);
- undosys_stack_validate(ustack, is_not_empty);
- } while ((us != us_iter));
- }
+ if (us) {
+ bool is_not_empty = true;
+ UndoStep *us_iter;
+ do {
+ us_iter = ustack->steps.first;
+ BLI_assert(us_iter != ustack->step_active);
+ undosys_step_free_and_unlink(ustack, us_iter);
+ undosys_stack_validate(ustack, is_not_empty);
+ } while ((us != us_iter));
+ }
}
static bool undosys_stack_push_main(UndoStack *ustack, const char *name, struct Main *bmain)
{
- UNDO_NESTED_ASSERT(false);
- BLI_assert(ustack->step_init == NULL);
- CLOG_INFO(&LOG, 1, "'%s'", name);
- bContext *C_temp = CTX_create();
- CTX_data_main_set(C_temp, bmain);
- bool ok = BKE_undosys_step_push_with_type(ustack, C_temp, name, BKE_UNDOSYS_TYPE_MEMFILE);
- CTX_free(C_temp);
- return ok;
+ UNDO_NESTED_ASSERT(false);
+ BLI_assert(ustack->step_init == NULL);
+ CLOG_INFO(&LOG, 1, "'%s'", name);
+ bContext *C_temp = CTX_create();
+ CTX_data_main_set(C_temp, bmain);
+ bool ok = BKE_undosys_step_push_with_type(ustack, C_temp, name, BKE_UNDOSYS_TYPE_MEMFILE);
+ CTX_free(C_temp);
+ return ok;
}
void BKE_undosys_stack_init_from_main(UndoStack *ustack, struct Main *bmain)
{
- UNDO_NESTED_ASSERT(false);
- undosys_stack_push_main(ustack, "original", bmain);
+ UNDO_NESTED_ASSERT(false);
+ undosys_stack_push_main(ustack, "original", bmain);
}
/* called after 'BKE_undosys_stack_init_from_main' */
void BKE_undosys_stack_init_from_context(UndoStack *ustack, bContext *C)
{
- const UndoType *ut = BKE_undosys_type_from_context(C);
- if ((ut != NULL) && (ut != BKE_UNDOSYS_TYPE_MEMFILE)) {
- BKE_undosys_step_push_with_type(ustack, C, "original mode", ut);
- }
+ const UndoType *ut = BKE_undosys_type_from_context(C);
+ if ((ut != NULL) && (ut != BKE_UNDOSYS_TYPE_MEMFILE)) {
+ BKE_undosys_step_push_with_type(ustack, C, "original mode", ut);
+ }
}
/* name optional */
bool BKE_undosys_stack_has_undo(UndoStack *ustack, const char *name)
{
- if (name) {
- UndoStep *us = BLI_rfindstring(&ustack->steps, name, offsetof(UndoStep, name));
- return us && us->prev;
- }
+ if (name) {
+ UndoStep *us = BLI_rfindstring(&ustack->steps, name, offsetof(UndoStep, name));
+ return us && us->prev;
+ }
- return !BLI_listbase_is_empty(&ustack->steps);
+ return !BLI_listbase_is_empty(&ustack->steps);
}
UndoStep *BKE_undosys_stack_active_with_type(UndoStack *ustack, const UndoType *ut)
{
- UndoStep *us = ustack->step_active;
- while (us && (us->type != ut)) {
- us = us->prev;
- }
- return us;
+ UndoStep *us = ustack->step_active;
+ while (us && (us->type != ut)) {
+ us = us->prev;
+ }
+ return us;
}
UndoStep *BKE_undosys_stack_init_or_active_with_type(UndoStack *ustack, const UndoType *ut)
{
- UNDO_NESTED_ASSERT(false);
- CLOG_INFO(&LOG, 1, "type='%s'", ut->name);
- if (ustack->step_init && (ustack->step_init->type == ut)) {
- return ustack->step_init;
- }
- return BKE_undosys_stack_active_with_type(ustack, ut);
+ UNDO_NESTED_ASSERT(false);
+ CLOG_INFO(&LOG, 1, "type='%s'", ut->name);
+ if (ustack->step_init && (ustack->step_init->type == ut)) {
+ return ustack->step_init;
+ }
+ return BKE_undosys_stack_active_with_type(ustack, ut);
}
/**
@@ -378,218 +382,223 @@ UndoStep *BKE_undosys_stack_init_or_active_with_type(UndoStack *ustack, const Un
*/
void BKE_undosys_stack_limit_steps_and_memory(UndoStack *ustack, int steps, size_t memory_limit)
{
- UNDO_NESTED_ASSERT(false);
- if (!(steps || memory_limit)) {
- return;
- }
+ UNDO_NESTED_ASSERT(false);
+ if (!(steps || memory_limit)) {
+ return;
+ }
- CLOG_INFO(&LOG, 1, "steps=%d, memory_limit=%zu", steps, memory_limit);
- UndoStep *us;
+ CLOG_INFO(&LOG, 1, "steps=%d, memory_limit=%zu", steps, memory_limit);
+ UndoStep *us;
#ifdef WITH_GLOBAL_UNDO_KEEP_ONE
- UndoStep *us_exclude = NULL;
+ UndoStep *us_exclude = NULL;
#endif
- /* keep at least two (original + other) */
- size_t data_size_all = 0;
- size_t us_count = 0;
- for (us = ustack->steps.last; us && us->prev; us = us->prev) {
- if (memory_limit) {
- data_size_all += us->data_size;
- if (data_size_all > memory_limit) {
- break;
- }
- }
- if (steps) {
- if (us_count == steps) {
- break;
- }
- if (us->skip == false) {
- us_count += 1;
- }
- }
- }
-
- if (us) {
- if (us->prev && us->prev->prev) {
- us = us->prev;
- }
+ /* keep at least two (original + other) */
+ size_t data_size_all = 0;
+ size_t us_count = 0;
+ for (us = ustack->steps.last; us && us->prev; us = us->prev) {
+ if (memory_limit) {
+ data_size_all += us->data_size;
+ if (data_size_all > memory_limit) {
+ break;
+ }
+ }
+ if (steps) {
+ if (us_count == steps) {
+ break;
+ }
+ if (us->skip == false) {
+ us_count += 1;
+ }
+ }
+ }
+
+ if (us) {
+ if (us->prev && us->prev->prev) {
+ us = us->prev;
+ }
#ifdef WITH_GLOBAL_UNDO_KEEP_ONE
- /* Hack, we need to keep at least one BKE_UNDOSYS_TYPE_MEMFILE. */
- if (us->type != BKE_UNDOSYS_TYPE_MEMFILE) {
- us_exclude = us->prev;
- while (us_exclude && us->type != BKE_UNDOSYS_TYPE_MEMFILE) {
- us_exclude = us_exclude->prev;
- }
- if (us_exclude) {
- BLI_remlink(&ustack->steps, us_exclude);
- }
- }
+ /* Hack, we need to keep at least one BKE_UNDOSYS_TYPE_MEMFILE. */
+ if (us->type != BKE_UNDOSYS_TYPE_MEMFILE) {
+ us_exclude = us->prev;
+ while (us_exclude && us->type != BKE_UNDOSYS_TYPE_MEMFILE) {
+ us_exclude = us_exclude->prev;
+ }
+ if (us_exclude) {
+ BLI_remlink(&ustack->steps, us_exclude);
+ }
+ }
#endif
- /* Free from first to last, free functions may update de-duplication info (see #MemFileUndoStep). */
- undosys_stack_clear_all_first(ustack, us->prev);
+ /* Free from first to last, free functions may update de-duplication info (see #MemFileUndoStep). */
+ undosys_stack_clear_all_first(ustack, us->prev);
#ifdef WITH_GLOBAL_UNDO_KEEP_ONE
- if (us_exclude) {
- BLI_addhead(&ustack->steps, us_exclude);
- }
+ if (us_exclude) {
+ BLI_addhead(&ustack->steps, us_exclude);
+ }
#endif
- }
+ }
}
/** \} */
-UndoStep *BKE_undosys_step_push_init_with_type(UndoStack *ustack, bContext *C, const char *name, const UndoType *ut)
-{
- UNDO_NESTED_ASSERT(false);
- /* We could detect and clean this up (but it should never happen!). */
- BLI_assert(ustack->step_init == NULL);
- if (ut->step_encode_init) {
- undosys_stack_validate(ustack, false);
-
- if (ustack->step_active) {
- undosys_stack_clear_all_last(ustack, ustack->step_active->next);
- }
-
- UndoStep *us = MEM_callocN(ut->step_size, __func__);
- CLOG_INFO(&LOG, 1, "addr=%p, name='%s', type='%s'", us, name, ut->name);
- if (name != NULL) {
- BLI_strncpy(us->name, name, sizeof(us->name));
- }
- us->type = ut;
- ustack->step_init = us;
- ut->step_encode_init(C, us);
- undosys_stack_validate(ustack, false);
- return us;
- }
- else {
- return NULL;
- }
+UndoStep *BKE_undosys_step_push_init_with_type(UndoStack *ustack,
+ bContext *C,
+ const char *name,
+ const UndoType *ut)
+{
+ UNDO_NESTED_ASSERT(false);
+ /* We could detect and clean this up (but it should never happen!). */
+ BLI_assert(ustack->step_init == NULL);
+ if (ut->step_encode_init) {
+ undosys_stack_validate(ustack, false);
+
+ if (ustack->step_active) {
+ undosys_stack_clear_all_last(ustack, ustack->step_active->next);
+ }
+
+ UndoStep *us = MEM_callocN(ut->step_size, __func__);
+ CLOG_INFO(&LOG, 1, "addr=%p, name='%s', type='%s'", us, name, ut->name);
+ if (name != NULL) {
+ BLI_strncpy(us->name, name, sizeof(us->name));
+ }
+ us->type = ut;
+ ustack->step_init = us;
+ ut->step_encode_init(C, us);
+ undosys_stack_validate(ustack, false);
+ return us;
+ }
+ else {
+ return NULL;
+ }
}
UndoStep *BKE_undosys_step_push_init(UndoStack *ustack, bContext *C, const char *name)
{
- UNDO_NESTED_ASSERT(false);
- /* We could detect and clean this up (but it should never happen!). */
- BLI_assert(ustack->step_init == NULL);
- const UndoType *ut = BKE_undosys_type_from_context(C);
- if (ut == NULL) {
- return NULL;
- }
- return BKE_undosys_step_push_init_with_type(ustack, C, name, ut);
+ UNDO_NESTED_ASSERT(false);
+ /* We could detect and clean this up (but it should never happen!). */
+ BLI_assert(ustack->step_init == NULL);
+ const UndoType *ut = BKE_undosys_type_from_context(C);
+ if (ut == NULL) {
+ return NULL;
+ }
+ return BKE_undosys_step_push_init_with_type(ustack, C, name, ut);
}
/**
* \param C: Can be NULL from some callers if their encoding function doesn't need it
*/
-bool BKE_undosys_step_push_with_type(UndoStack *ustack, bContext *C, const char *name, const UndoType *ut)
+bool BKE_undosys_step_push_with_type(UndoStack *ustack,
+ bContext *C,
+ const char *name,
+ const UndoType *ut)
{
- UNDO_NESTED_ASSERT(false);
- undosys_stack_validate(ustack, false);
- bool is_not_empty = ustack->step_active != NULL;
-
- /* Might not be final place for this to be called - probably only want to call it from some
- * undo handlers, not all of them? */
- if (BKE_override_static_is_enabled()) {
- BKE_main_override_static_operations_create(G.main, false);
- }
-
- /* Remove all undos after (also when 'ustack->step_active == NULL'). */
- while (ustack->steps.last != ustack->step_active) {
- UndoStep *us_iter = ustack->steps.last;
- undosys_step_free_and_unlink(ustack, us_iter);
- undosys_stack_validate(ustack, is_not_empty);
- }
-
- if (ustack->step_active) {
- BLI_assert(BLI_findindex(&ustack->steps, ustack->step_active) != -1);
- }
+ UNDO_NESTED_ASSERT(false);
+ undosys_stack_validate(ustack, false);
+ bool is_not_empty = ustack->step_active != NULL;
+
+ /* Might not be final place for this to be called - probably only want to call it from some
+ * undo handlers, not all of them? */
+ if (BKE_override_static_is_enabled()) {
+ BKE_main_override_static_operations_create(G.main, false);
+ }
+
+ /* Remove all undos after (also when 'ustack->step_active == NULL'). */
+ while (ustack->steps.last != ustack->step_active) {
+ UndoStep *us_iter = ustack->steps.last;
+ undosys_step_free_and_unlink(ustack, us_iter);
+ undosys_stack_validate(ustack, is_not_empty);
+ }
+
+ if (ustack->step_active) {
+ BLI_assert(BLI_findindex(&ustack->steps, ustack->step_active) != -1);
+ }
#ifdef WITH_GLOBAL_UNDO_ENSURE_UPDATED
- if (ut->step_foreach_ID_ref != NULL) {
- if (G_MAIN->is_memfile_undo_written == false) {
- const char *name_internal = "MemFile Internal (pre)";
- /* Don't let 'step_init' cause issues when adding memfile undo step. */
- void *step_init = ustack->step_init;
- ustack->step_init = NULL;
- const bool ok = undosys_stack_push_main(ustack, name_internal, G_MAIN);
- /* Restore 'step_init'. */
- ustack->step_init = step_init;
- if (ok) {
- UndoStep *us = ustack->steps.last;
- BLI_assert(STREQ(us->name, name_internal));
- us->skip = true;
-#ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
- ustack->step_active_memfile = us;
-#endif
- }
- }
- }
+ if (ut->step_foreach_ID_ref != NULL) {
+ if (G_MAIN->is_memfile_undo_written == false) {
+ const char *name_internal = "MemFile Internal (pre)";
+ /* Don't let 'step_init' cause issues when adding memfile undo step. */
+ void *step_init = ustack->step_init;
+ ustack->step_init = NULL;
+ const bool ok = undosys_stack_push_main(ustack, name_internal, G_MAIN);
+ /* Restore 'step_init'. */
+ ustack->step_init = step_init;
+ if (ok) {
+ UndoStep *us = ustack->steps.last;
+ BLI_assert(STREQ(us->name, name_internal));
+ us->skip = true;
+# ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
+ ustack->step_active_memfile = us;
+# endif
+ }
+ }
+ }
#endif
- bool use_memfile_step = false;
- {
- UndoStep *us = ustack->step_init ? ustack->step_init : MEM_callocN(ut->step_size, __func__);
- ustack->step_init = NULL;
- if (us->name[0] == '\0') {
- BLI_strncpy(us->name, name, sizeof(us->name));
- }
- us->type = ut;
- /* initialized, not added yet. */
-
- if (!undosys_step_encode(C, G_MAIN, ustack, us)) {
- MEM_freeN(us);
- undosys_stack_validate(ustack, true);
- return false;
- }
- ustack->step_active = us;
- BLI_addtail(&ustack->steps, us);
- use_memfile_step = us->use_memfile_step;
- }
-
- if (use_memfile_step) {
- const char *name_internal = "MemFile Internal (post)";
- const bool ok = undosys_stack_push_main(ustack, name_internal, G_MAIN);
- if (ok) {
- UndoStep *us = ustack->steps.last;
- BLI_assert(STREQ(us->name, name_internal));
- us->skip = true;
+ bool use_memfile_step = false;
+ {
+ UndoStep *us = ustack->step_init ? ustack->step_init : MEM_callocN(ut->step_size, __func__);
+ ustack->step_init = NULL;
+ if (us->name[0] == '\0') {
+ BLI_strncpy(us->name, name, sizeof(us->name));
+ }
+ us->type = ut;
+ /* initialized, not added yet. */
+
+ if (!undosys_step_encode(C, G_MAIN, ustack, us)) {
+ MEM_freeN(us);
+ undosys_stack_validate(ustack, true);
+ return false;
+ }
+ ustack->step_active = us;
+ BLI_addtail(&ustack->steps, us);
+ use_memfile_step = us->use_memfile_step;
+ }
+
+ if (use_memfile_step) {
+ const char *name_internal = "MemFile Internal (post)";
+ const bool ok = undosys_stack_push_main(ustack, name_internal, G_MAIN);
+ if (ok) {
+ UndoStep *us = ustack->steps.last;
+ BLI_assert(STREQ(us->name, name_internal));
+ us->skip = true;
#ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
- ustack->step_active_memfile = us;
+ ustack->step_active_memfile = us;
#endif
- ustack->step_active = us;
- }
- }
+ ustack->step_active = us;
+ }
+ }
- undosys_stack_validate(ustack, true);
- return true;
+ undosys_stack_validate(ustack, true);
+ return true;
}
bool BKE_undosys_step_push(UndoStack *ustack, bContext *C, const char *name)
{
- UNDO_NESTED_ASSERT(false);
- const UndoType *ut = ustack->step_init ? ustack->step_init->type : BKE_undosys_type_from_context(C);
- if (ut == NULL) {
- return false;
- }
- return BKE_undosys_step_push_with_type(ustack, C, name, ut);
+ UNDO_NESTED_ASSERT(false);
+ const UndoType *ut = ustack->step_init ? ustack->step_init->type :
+ BKE_undosys_type_from_context(C);
+ if (ut == NULL) {
+ return false;
+ }
+ return BKE_undosys_step_push_with_type(ustack, C, name, ut);
}
-
/**
* Useful when we want to diff against previous undo data but can't be sure the types match.
*/
UndoStep *BKE_undosys_step_same_type_next(UndoStep *us)
{
- if (us) {
- const UndoType *ut = us->type;
- while ((us = us->next)) {
- if (us->type == ut) {
- return us;
- }
- }
-
- }
- return us;
+ if (us) {
+ const UndoType *ut = us->type;
+ while ((us = us->next)) {
+ if (us->type == ut) {
+ return us;
+ }
+ }
+ }
+ return us;
}
/**
@@ -597,175 +606,180 @@ UndoStep *BKE_undosys_step_same_type_next(UndoStep *us)
*/
UndoStep *BKE_undosys_step_same_type_prev(UndoStep *us)
{
- if (us) {
- const UndoType *ut = us->type;
- while ((us = us->prev)) {
- if (us->type == ut) {
- return us;
- }
- }
-
- }
- return us;
+ if (us) {
+ const UndoType *ut = us->type;
+ while ((us = us->prev)) {
+ if (us->type == ut) {
+ return us;
+ }
+ }
+ }
+ return us;
}
-UndoStep *BKE_undosys_step_find_by_name_with_type(UndoStack *ustack, const char *name, const UndoType *ut)
+UndoStep *BKE_undosys_step_find_by_name_with_type(UndoStack *ustack,
+ const char *name,
+ const UndoType *ut)
{
- for (UndoStep *us = ustack->steps.last; us; us = us->prev) {
- if (us->type == ut) {
- if (STREQ(name, us->name)) {
- return us;
- }
- }
- }
- return NULL;
+ for (UndoStep *us = ustack->steps.last; us; us = us->prev) {
+ if (us->type == ut) {
+ if (STREQ(name, us->name)) {
+ return us;
+ }
+ }
+ }
+ return NULL;
}
UndoStep *BKE_undosys_step_find_by_name(UndoStack *ustack, const char *name)
{
- return BLI_rfindstring(&ustack->steps, name, offsetof(UndoStep, name));
+ return BLI_rfindstring(&ustack->steps, name, offsetof(UndoStep, name));
}
UndoStep *BKE_undosys_step_find_by_type(UndoStack *ustack, const UndoType *ut)
{
- for (UndoStep *us = ustack->steps.last; us; us = us->prev) {
- if (us->type == ut) {
- return us;
- }
- }
- return NULL;
-}
-
-bool BKE_undosys_step_undo_with_data_ex(
- UndoStack *ustack, bContext *C, UndoStep *us,
- bool use_skip)
-{
- UNDO_NESTED_ASSERT(false);
- if (us) {
- undosys_stack_validate(ustack, true);
- }
- UndoStep *us_prev = us ? us->prev : NULL;
- if (us) {
- /* The current state is a copy, we need to load the previous state. */
- us = us_prev;
- }
-
- if (us != NULL) {
- CLOG_INFO(&LOG, 1, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
-
- /* Handle accumulate steps. */
- if (ustack->step_active) {
- UndoStep *us_iter = ustack->step_active;
- while (us_iter != us) {
- /* TODO:
- * - skip successive steps that store the same data, eg: memfile steps.
- * - or steps that include another steps data, eg: a memfile step includes text undo data.
- */
- undosys_step_decode(C, G_MAIN, ustack, us_iter, -1);
- us_iter = us_iter->prev;
- }
- }
-
- undosys_step_decode(C, G_MAIN, ustack, us, -1);
-
- ustack->step_active = us_prev;
- undosys_stack_validate(ustack, true);
- if (use_skip) {
- if (ustack->step_active && ustack->step_active->skip) {
- CLOG_INFO(&LOG, 2, "undo continue with skip %p '%s', type='%s'", us, us->name, us->type->name);
- BKE_undosys_step_undo_with_data(ustack, C, ustack->step_active);
- }
- }
- return true;
- }
- return false;
+ for (UndoStep *us = ustack->steps.last; us; us = us->prev) {
+ if (us->type == ut) {
+ return us;
+ }
+ }
+ return NULL;
+}
+
+bool BKE_undosys_step_undo_with_data_ex(UndoStack *ustack,
+ bContext *C,
+ UndoStep *us,
+ bool use_skip)
+{
+ UNDO_NESTED_ASSERT(false);
+ if (us) {
+ undosys_stack_validate(ustack, true);
+ }
+ UndoStep *us_prev = us ? us->prev : NULL;
+ if (us) {
+ /* The current state is a copy, we need to load the previous state. */
+ us = us_prev;
+ }
+
+ if (us != NULL) {
+ CLOG_INFO(&LOG, 1, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
+
+ /* Handle accumulate steps. */
+ if (ustack->step_active) {
+ UndoStep *us_iter = ustack->step_active;
+ while (us_iter != us) {
+ /* TODO:
+ * - skip successive steps that store the same data, eg: memfile steps.
+ * - or steps that include another steps data, eg: a memfile step includes text undo data.
+ */
+ undosys_step_decode(C, G_MAIN, ustack, us_iter, -1);
+ us_iter = us_iter->prev;
+ }
+ }
+
+ undosys_step_decode(C, G_MAIN, ustack, us, -1);
+
+ ustack->step_active = us_prev;
+ undosys_stack_validate(ustack, true);
+ if (use_skip) {
+ if (ustack->step_active && ustack->step_active->skip) {
+ CLOG_INFO(
+ &LOG, 2, "undo continue with skip %p '%s', type='%s'", us, us->name, us->type->name);
+ BKE_undosys_step_undo_with_data(ustack, C, ustack->step_active);
+ }
+ }
+ return true;
+ }
+ return false;
}
bool BKE_undosys_step_undo_with_data(UndoStack *ustack, bContext *C, UndoStep *us)
{
- return BKE_undosys_step_undo_with_data_ex(ustack, C, us, true);
+ return BKE_undosys_step_undo_with_data_ex(ustack, C, us, true);
}
bool BKE_undosys_step_undo(UndoStack *ustack, bContext *C)
{
- return BKE_undosys_step_undo_with_data(ustack, C, ustack->step_active);
+ return BKE_undosys_step_undo_with_data(ustack, C, ustack->step_active);
}
void BKE_undosys_step_undo_from_index(UndoStack *ustack, bContext *C, int index)
{
- UndoStep *us = BLI_findlink(&ustack->steps, index);
- BLI_assert(us->skip == false);
- BKE_undosys_step_load_data(ustack, C, us);
-}
-
-bool BKE_undosys_step_redo_with_data_ex(
- UndoStack *ustack, bContext *C, UndoStep *us,
- bool use_skip)
-{
- UNDO_NESTED_ASSERT(false);
- UndoStep *us_next = us ? us->next : NULL;
- /* Unlike undo accumulate, we always use the next. */
- us = us_next;
-
- if (us != NULL) {
- CLOG_INFO(&LOG, 1, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
-
- /* Handle accumulate steps. */
- if (ustack->step_active && ustack->step_active->next) {
- UndoStep *us_iter = ustack->step_active->next;
- while (us_iter != us) {
- undosys_step_decode(C, G_MAIN, ustack, us_iter, 1);
- us_iter = us_iter->next;
- }
- }
-
- undosys_step_decode(C, G_MAIN, ustack, us, 1);
- ustack->step_active = us_next;
- if (use_skip) {
- if (ustack->step_active && ustack->step_active->skip) {
- CLOG_INFO(&LOG, 2, "redo continue with skip %p '%s', type='%s'", us, us->name, us->type->name);
- BKE_undosys_step_redo_with_data(ustack, C, ustack->step_active);
- }
- }
- return true;
- }
- return false;
+ UndoStep *us = BLI_findlink(&ustack->steps, index);
+ BLI_assert(us->skip == false);
+ BKE_undosys_step_load_data(ustack, C, us);
+}
+
+bool BKE_undosys_step_redo_with_data_ex(UndoStack *ustack,
+ bContext *C,
+ UndoStep *us,
+ bool use_skip)
+{
+ UNDO_NESTED_ASSERT(false);
+ UndoStep *us_next = us ? us->next : NULL;
+ /* Unlike undo accumulate, we always use the next. */
+ us = us_next;
+
+ if (us != NULL) {
+ CLOG_INFO(&LOG, 1, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
+
+ /* Handle accumulate steps. */
+ if (ustack->step_active && ustack->step_active->next) {
+ UndoStep *us_iter = ustack->step_active->next;
+ while (us_iter != us) {
+ undosys_step_decode(C, G_MAIN, ustack, us_iter, 1);
+ us_iter = us_iter->next;
+ }
+ }
+
+ undosys_step_decode(C, G_MAIN, ustack, us, 1);
+ ustack->step_active = us_next;
+ if (use_skip) {
+ if (ustack->step_active && ustack->step_active->skip) {
+ CLOG_INFO(
+ &LOG, 2, "redo continue with skip %p '%s', type='%s'", us, us->name, us->type->name);
+ BKE_undosys_step_redo_with_data(ustack, C, ustack->step_active);
+ }
+ }
+ return true;
+ }
+ return false;
}
bool BKE_undosys_step_redo_with_data(UndoStack *ustack, bContext *C, UndoStep *us)
{
- return BKE_undosys_step_redo_with_data_ex(ustack, C, us, true);
+ return BKE_undosys_step_redo_with_data_ex(ustack, C, us, true);
}
bool BKE_undosys_step_redo(UndoStack *ustack, bContext *C)
{
- return BKE_undosys_step_redo_with_data(ustack, C, ustack->step_active);
+ return BKE_undosys_step_redo_with_data(ustack, C, ustack->step_active);
}
bool BKE_undosys_step_load_data(UndoStack *ustack, bContext *C, UndoStep *us)
{
- UNDO_NESTED_ASSERT(false);
- const int index_active = BLI_findindex(&ustack->steps, ustack->step_active);
- const int index_target = BLI_findindex(&ustack->steps, us);
- BLI_assert(!ELEM(-1, index_active, index_target));
- bool ok = true;
-
- if (index_target < index_active) {
- uint i = index_active - index_target;
- while (i-- && ok) {
- ok = BKE_undosys_step_undo_with_data_ex(ustack, C, ustack->step_active, false);
- }
- }
- else if (index_target > index_active) {
- uint i = index_target - index_active;
- while (i-- && ok) {
- ok = BKE_undosys_step_redo_with_data_ex(ustack, C, ustack->step_active, false);
- }
- }
-
- if (ok) {
- BLI_assert(ustack->step_active == us);
- }
-
- return ok;
+ UNDO_NESTED_ASSERT(false);
+ const int index_active = BLI_findindex(&ustack->steps, ustack->step_active);
+ const int index_target = BLI_findindex(&ustack->steps, us);
+ BLI_assert(!ELEM(-1, index_active, index_target));
+ bool ok = true;
+
+ if (index_target < index_active) {
+ uint i = index_active - index_target;
+ while (i-- && ok) {
+ ok = BKE_undosys_step_undo_with_data_ex(ustack, C, ustack->step_active, false);
+ }
+ }
+ else if (index_target > index_active) {
+ uint i = index_target - index_active;
+ while (i-- && ok) {
+ ok = BKE_undosys_step_redo_with_data_ex(ustack, C, ustack->step_active, false);
+ }
+ }
+
+ if (ok) {
+ BLI_assert(ustack->step_active == us);
+ }
+
+ return ok;
}
/**
@@ -773,23 +787,23 @@ bool BKE_undosys_step_load_data(UndoStack *ustack, bContext *C, UndoStep *us)
*/
UndoType *BKE_undosys_type_append(void (*undosys_fn)(UndoType *))
{
- UndoType *ut;
+ UndoType *ut;
- ut = MEM_callocN(sizeof(UndoType), __func__);
+ ut = MEM_callocN(sizeof(UndoType), __func__);
- undosys_fn(ut);
+ undosys_fn(ut);
- BLI_addtail(&g_undo_types, ut);
+ BLI_addtail(&g_undo_types, ut);
- return ut;
+ return ut;
}
void BKE_undosys_type_free_all(void)
{
- UndoType *ut;
- while ((ut = BLI_pophead(&g_undo_types))) {
- MEM_freeN(ut);
- }
+ UndoType *ut;
+ while ((ut = BLI_pophead(&g_undo_types))) {
+ MEM_freeN(ut);
+ }
}
/** \} */
@@ -804,35 +818,36 @@ void BKE_undosys_type_free_all(void)
* The pointer can only be a key, we can't read it's contents. */
#define USE_LIB_SKIP
-static void UNUSED_FUNCTION(BKE_undosys_foreach_ID_ref(
- UndoStack *ustack, UndoTypeForEachIDRefFn foreach_ID_ref_fn, void *user_data))
+static void UNUSED_FUNCTION(BKE_undosys_foreach_ID_ref(UndoStack *ustack,
+ UndoTypeForEachIDRefFn foreach_ID_ref_fn,
+ void *user_data))
{
- for (UndoStep *us = ustack->steps.first; us; us = us->next) {
- const UndoType *ut = us->type;
- if (ut->step_foreach_ID_ref != NULL) {
- ut->step_foreach_ID_ref(us, foreach_ID_ref_fn, user_data);
- }
- }
+ for (UndoStep *us = ustack->steps.first; us; us = us->next) {
+ const UndoType *ut = us->type;
+ if (ut->step_foreach_ID_ref != NULL) {
+ ut->step_foreach_ID_ref(us, foreach_ID_ref_fn, user_data);
+ }
+ }
}
typedef struct UndoIDPtrMapItem {
- /** Never changes (matches undo data). Use as sort key for binary search. */
- const void *ptr;
- /** Write the new pointers here. */
- uint index;
+ /** Never changes (matches undo data). Use as sort key for binary search. */
+ const void *ptr;
+ /** Write the new pointers here. */
+ uint index;
} UndoIDPtrMapItem;
typedef struct UndoIDPtrMap {
- UndoRefID *refs;
- /**
- * Pointer map, update 'dst' members before use.
- * This is always sorted (adds some overhead when adding, in practice it's acceptable since).
- */
- UndoIDPtrMapItem *pmap;
-
- /** Length for both 'refs' & 'pmap' */
- uint len;
- uint len_alloc;
+ UndoRefID *refs;
+ /**
+ * Pointer map, update 'dst' members before use.
+ * This is always sorted (adds some overhead when adding, in practice it's acceptable since).
+ */
+ UndoIDPtrMapItem *pmap;
+
+ /** Length for both 'refs' & 'pmap' */
+ uint len;
+ uint len_alloc;
} UndoIDPtrMap;
#ifdef DEBUG
@@ -841,13 +856,13 @@ typedef struct UndoIDPtrMap {
# define PMAP_DEFAULT_ALLOC 32
#endif
-void BKE_undosys_ID_map_foreach_ID_ref(
- UndoIDPtrMap *map,
- UndoTypeForEachIDRefFn foreach_ID_ref_fn, void *user_data)
+void BKE_undosys_ID_map_foreach_ID_ref(UndoIDPtrMap *map,
+ UndoTypeForEachIDRefFn foreach_ID_ref_fn,
+ void *user_data)
{
- for (uint i = 0; i < map->len; i++) {
- foreach_ID_ref_fn(user_data, &map->refs[i]);
- }
+ for (uint i = 0; i < map->len; i++) {
+ foreach_ID_ref_fn(user_data, &map->refs[i]);
+ }
}
/**
@@ -855,34 +870,34 @@ void BKE_undosys_ID_map_foreach_ID_ref(
*/
static bool undosys_ID_map_lookup_index(const UndoIDPtrMap *map, const void *key, uint *r_index)
{
- const UndoIDPtrMapItem *pmap = map->pmap;
- const uint len = map->len;
- if (len == 0) {
- if (r_index) {
- *r_index = 0;
- }
- return false;
- }
- int min = 0, max = len - 1;
- while (min <= max) {
- const uint mid = (min + max) / 2;
- if (pmap[mid].ptr < key) {
- min = mid + 1;
- }
- else if (pmap[mid].ptr == key) {
- if (r_index) {
- *r_index = mid;
- }
- return true;
- }
- else if (pmap[mid].ptr > key) {
- max = mid - 1;
- }
- }
- if (r_index) {
- *r_index = min;
- }
- return false;
+ const UndoIDPtrMapItem *pmap = map->pmap;
+ const uint len = map->len;
+ if (len == 0) {
+ if (r_index) {
+ *r_index = 0;
+ }
+ return false;
+ }
+ int min = 0, max = len - 1;
+ while (min <= max) {
+ const uint mid = (min + max) / 2;
+ if (pmap[mid].ptr < key) {
+ min = mid + 1;
+ }
+ else if (pmap[mid].ptr == key) {
+ if (r_index) {
+ *r_index = mid;
+ }
+ return true;
+ }
+ else if (pmap[mid].ptr > key) {
+ max = mid - 1;
+ }
+ }
+ if (r_index) {
+ *r_index = min;
+ }
+ return false;
}
/**
@@ -893,121 +908,120 @@ static bool undosys_ID_map_lookup_index(const UndoIDPtrMap *map, const void *key
*/
UndoIDPtrMap *BKE_undosys_ID_map_create(void)
{
- UndoIDPtrMap *map = MEM_mallocN(sizeof(*map), __func__);
- map->len_alloc = PMAP_DEFAULT_ALLOC;
- map->refs = MEM_mallocN(sizeof(*map->refs) * map->len_alloc, __func__);
- map->pmap = MEM_mallocN(sizeof(*map->pmap) * map->len_alloc, __func__);
- map->len = 0;
- return map;
+ UndoIDPtrMap *map = MEM_mallocN(sizeof(*map), __func__);
+ map->len_alloc = PMAP_DEFAULT_ALLOC;
+ map->refs = MEM_mallocN(sizeof(*map->refs) * map->len_alloc, __func__);
+ map->pmap = MEM_mallocN(sizeof(*map->pmap) * map->len_alloc, __func__);
+ map->len = 0;
+ return map;
}
void BKE_undosys_ID_map_destroy(UndoIDPtrMap *idpmap)
{
- MEM_SAFE_FREE(idpmap->refs);
- MEM_SAFE_FREE(idpmap->pmap);
- MEM_freeN(idpmap);
+ MEM_SAFE_FREE(idpmap->refs);
+ MEM_SAFE_FREE(idpmap->pmap);
+ MEM_freeN(idpmap);
}
void BKE_undosys_ID_map_add(UndoIDPtrMap *map, ID *id)
{
- uint index;
+ uint index;
#ifdef USE_LIB_SKIP
- if (id->lib != NULL) {
- return;
- }
+ if (id->lib != NULL) {
+ return;
+ }
#endif
- if (undosys_ID_map_lookup_index(map, id, &index)) {
- return; /* exists. */
- }
-
- const uint len_src = map->len;
- const uint len_dst = map->len + 1;
- if (len_dst > map->len_alloc) {
- map->len_alloc *= 2;
- BLI_assert(map->len_alloc >= len_dst);
- map->pmap = MEM_reallocN(map->pmap, sizeof(*map->pmap) * map->len_alloc);
- map->refs = MEM_reallocN(map->refs, sizeof(*map->refs) * map->len_alloc);
- }
-
-#if 0 /* Will be done automatically in callback. */
- BLI_strncpy(map->refs[len_src].name, id->name, sizeof(id->name));
+ if (undosys_ID_map_lookup_index(map, id, &index)) {
+ return; /* exists. */
+ }
+
+ const uint len_src = map->len;
+ const uint len_dst = map->len + 1;
+ if (len_dst > map->len_alloc) {
+ map->len_alloc *= 2;
+ BLI_assert(map->len_alloc >= len_dst);
+ map->pmap = MEM_reallocN(map->pmap, sizeof(*map->pmap) * map->len_alloc);
+ map->refs = MEM_reallocN(map->refs, sizeof(*map->refs) * map->len_alloc);
+ }
+
+#if 0 /* Will be done automatically in callback. */
+ BLI_strncpy(map->refs[len_src].name, id->name, sizeof(id->name));
#else
- map->refs[len_src].name[0] = '\0';
+ map->refs[len_src].name[0] = '\0';
#endif
- map->refs[len_src].ptr = id;
+ map->refs[len_src].ptr = id;
- if (len_src != 0 && index != len_src) {
- memmove(&map->pmap[index + 1], &map->pmap[index], sizeof(*map->pmap) * (len_src - index));
- }
- map->pmap[index].ptr = id;
- map->pmap[index].index = len_src;
+ if (len_src != 0 && index != len_src) {
+ memmove(&map->pmap[index + 1], &map->pmap[index], sizeof(*map->pmap) * (len_src - index));
+ }
+ map->pmap[index].ptr = id;
+ map->pmap[index].index = len_src;
- map->len = len_dst;
+ map->len = len_dst;
}
ID *BKE_undosys_ID_map_lookup(const UndoIDPtrMap *map, const ID *id_src)
{
- /* We should only ever lookup indices which exist! */
- uint index;
- if (!undosys_ID_map_lookup_index(map, id_src, &index)) {
- BLI_assert(0);
- }
- index = map->pmap[index].index;
- ID *id_dst = map->refs[index].ptr;
- BLI_assert(id_dst != NULL);
- BLI_assert(STREQ(id_dst->name, map->refs[index].name));
- return id_dst;
+ /* We should only ever lookup indices which exist! */
+ uint index;
+ if (!undosys_ID_map_lookup_index(map, id_src, &index)) {
+ BLI_assert(0);
+ }
+ index = map->pmap[index].index;
+ ID *id_dst = map->refs[index].ptr;
+ BLI_assert(id_dst != NULL);
+ BLI_assert(STREQ(id_dst->name, map->refs[index].name));
+ return id_dst;
}
void BKE_undosys_ID_map_add_with_prev(UndoIDPtrMap *map, ID *id, ID **id_prev)
{
- if (id == *id_prev) {
- return;
- }
- *id_prev = id;
- BKE_undosys_ID_map_add(map, id);
+ if (id == *id_prev) {
+ return;
+ }
+ *id_prev = id;
+ BKE_undosys_ID_map_add(map, id);
}
ID *BKE_undosys_ID_map_lookup_with_prev(const UndoIDPtrMap *map, ID *id_src, ID *id_prev_match[2])
{
- if (id_src == id_prev_match[0]) {
- return id_prev_match[1];
- }
- else {
+ if (id_src == id_prev_match[0]) {
+ return id_prev_match[1];
+ }
+ else {
#ifdef USE_LIB_SKIP
- ID *id_dst = BKE_undosys_ID_map_lookup(map, id_src);
+ ID *id_dst = BKE_undosys_ID_map_lookup(map, id_src);
#else
- ID *id_dst = (id_src->lib == NULL) ? BKE_undosys_ID_map_lookup(map, id_src) : id_src;
+ ID *id_dst = (id_src->lib == NULL) ? BKE_undosys_ID_map_lookup(map, id_src) : id_src;
#endif
- id_prev_match[0] = id_src;
- id_prev_match[1] = id_dst;
- return id_dst;
- }
+ id_prev_match[0] = id_src;
+ id_prev_match[1] = id_dst;
+ return id_dst;
+ }
}
/** \} */
-
/* -------------------------------------------------------------------- */
/** \name Debug Helpers
* \{ */
void BKE_undosys_print(UndoStack *ustack)
{
- printf("Undo %d Steps (*: active, #=applied, M=memfile-active, S=skip)\n",
- BLI_listbase_count(&ustack->steps));
- int index = 0;
- for (UndoStep *us = ustack->steps.first; us; us = us->next) {
- printf("[%c%c%c%c] %3d type='%s', name='%s'\n",
- (us == ustack->step_active) ? '*' : ' ',
- us->is_applied ? '#' : ' ',
- (us == ustack->step_active_memfile) ? 'M' : ' ',
- us->skip ? 'S' : ' ',
- index,
- us->type->name,
- us->name);
- index++;
- }
+ printf("Undo %d Steps (*: active, #=applied, M=memfile-active, S=skip)\n",
+ BLI_listbase_count(&ustack->steps));
+ int index = 0;
+ for (UndoStep *us = ustack->steps.first; us; us = us->next) {
+ printf("[%c%c%c%c] %3d type='%s', name='%s'\n",
+ (us == ustack->step_active) ? '*' : ' ',
+ us->is_applied ? '#' : ' ',
+ (us == ustack->step_active_memfile) ? 'M' : ' ',
+ us->skip ? 'S' : ' ',
+ index,
+ us->type->name,
+ us->name);
+ index++;
+ }
}
/** \} */