Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'intern/guardedalloc/intern/mallocn.c')
-rw-r--r--intern/guardedalloc/intern/mallocn.c52
1 files changed, 52 insertions, 0 deletions
diff --git a/intern/guardedalloc/intern/mallocn.c b/intern/guardedalloc/intern/mallocn.c
index e85f8eb03ed..f0dd29a0b9e 100644
--- a/intern/guardedalloc/intern/mallocn.c
+++ b/intern/guardedalloc/intern/mallocn.c
@@ -36,6 +36,7 @@
const char *malloc_conf = "background_thread:true,dirty_decay_ms:4000";
#endif
+/* NOTE: Keep in sync with MEM_use_lockfree_allocator(). */
size_t (*MEM_allocN_len)(const void *vmemh) = MEM_lockfree_allocN_len;
void (*MEM_freeN)(void *vmemh) = MEM_lockfree_freeN;
void *(*MEM_dupallocN)(const void *vmemh) = MEM_lockfree_dupallocN;
@@ -95,8 +96,59 @@ void aligned_free(void *ptr)
#endif
}
+/* Perform assert checks on allocator type change.
+ *
+ * Helps catching issues (in debug build) caused by an unintended allocator type change when there
+ * are allocation happenned. */
+static void assert_for_allocator_change(void)
+{
+ /* NOTE: Assume that there is no "sticky" internal state which would make switching allocator
+ * type after all allocations are freed unsafe. In fact, it should be safe to change allocator
+ * type after all blocks has been freed: some regression tests do rely on this property of
+ * allocators. */
+ assert(MEM_get_memory_blocks_in_use() == 0);
+}
+
+void MEM_use_lockfree_allocator(void)
+{
+ /* NOTE: Keep in sync with static initialization of the variables. */
+
+ /* TODO(sergey): Find a way to de-duplicate the logic. Maybe by requiring an explicit call
+ * to guarded allocator initialization at an application startup. */
+
+ assert_for_allocator_change();
+
+ MEM_allocN_len = MEM_lockfree_allocN_len;
+ MEM_freeN = MEM_lockfree_freeN;
+ MEM_dupallocN = MEM_lockfree_dupallocN;
+ MEM_reallocN_id = MEM_lockfree_reallocN_id;
+ MEM_recallocN_id = MEM_lockfree_recallocN_id;
+ MEM_callocN = MEM_lockfree_callocN;
+ MEM_calloc_arrayN = MEM_lockfree_calloc_arrayN;
+ MEM_mallocN = MEM_lockfree_mallocN;
+ MEM_malloc_arrayN = MEM_lockfree_malloc_arrayN;
+ MEM_mallocN_aligned = MEM_lockfree_mallocN_aligned;
+ MEM_printmemlist_pydict = MEM_lockfree_printmemlist_pydict;
+ MEM_printmemlist = MEM_lockfree_printmemlist;
+ MEM_callbackmemlist = MEM_lockfree_callbackmemlist;
+ MEM_printmemlist_stats = MEM_lockfree_printmemlist_stats;
+ MEM_set_error_callback = MEM_lockfree_set_error_callback;
+ MEM_consistency_check = MEM_lockfree_consistency_check;
+ MEM_set_memory_debug = MEM_lockfree_set_memory_debug;
+ MEM_get_memory_in_use = MEM_lockfree_get_memory_in_use;
+ MEM_get_memory_blocks_in_use = MEM_lockfree_get_memory_blocks_in_use;
+ MEM_reset_peak_memory = MEM_lockfree_reset_peak_memory;
+ MEM_get_peak_memory = MEM_lockfree_get_peak_memory;
+
+#ifndef NDEBUG
+ MEM_name_ptr = MEM_lockfree_name_ptr;
+#endif
+}
+
void MEM_use_guarded_allocator(void)
{
+ assert_for_allocator_change();
+
MEM_allocN_len = MEM_guarded_allocN_len;
MEM_freeN = MEM_guarded_freeN;
MEM_dupallocN = MEM_guarded_dupallocN;