Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSergey Sharybin <sergey@blender.org>2020-11-17 13:33:47 +0300
committerSergey Sharybin <sergey@blender.org>2020-11-19 18:17:48 +0300
commit4a8cf9d182e86ae5366b5a8d4cbfedbef5c3e15e (patch)
tree3900401afe5c37d56a95052e1c3b71570ba1c7cd /intern/guardedalloc
parent42b2ae5f694d554b1f4c58acd0afd26516b9ed20 (diff)
Guarded allocator: Add explicit switch to the lockfree implementation
Previously the only way to use lockfree implementation was to start executable and never switch to guarded allocator. Surely, it is not possible to switch implementation once any allocation did happen, but some tests are desired to test lock-free implementation of the allocator. Those tests did not operate properly because the main entry point of tests are forcing guarded allocator to help catching bugs. This change makes it possible for those tests to ensure they do operate on lock-free implementation. There is no functional changes here, preparing boilerplate for an upcoming work on the allocator tests themselves.
Diffstat (limited to 'intern/guardedalloc')
-rw-r--r--intern/guardedalloc/MEM_guardedalloc.h17
-rw-r--r--intern/guardedalloc/intern/mallocn.c35
2 files changed, 51 insertions, 1 deletions
diff --git a/intern/guardedalloc/MEM_guardedalloc.h b/intern/guardedalloc/MEM_guardedalloc.h
index f12eb0ac340..0f30f7bd1a5 100644
--- a/intern/guardedalloc/MEM_guardedalloc.h
+++ b/intern/guardedalloc/MEM_guardedalloc.h
@@ -226,7 +226,22 @@ void MEM_use_memleak_detection(bool enabled);
* tests. */
void MEM_enable_fail_on_memleak(void);
-/* Switch allocator to slower but fully guarded mode. */
+/* Switch allocator to fast mode, with less tracking.
+ *
+ * Use in the production code where performance is the priority, and exact details about allocation
+ * is not. This allocator keeps track of number of allocation and amount of allocated bytes, but it
+ * does not track of names of allocated blocks.
+ *
+ * NOTE: The switch between allocator types can only happen before any allocation did happen. */
+void MEM_use_lockfree_allocator(void);
+
+/* Switch allocator to slow fully guarded mode.
+ *
+ * Use for debug purposes. This allocator contains lock section around every allocator call, which
+ * makes it slow. What is gained with this is the ability to have list of allocated blocks (in an
+ * addition to the trackign of number of allocations and amount of allocated bytes).
+ *
+ * NOTE: The switch between allocator types can only happen before any allocation did happen. */
void MEM_use_guarded_allocator(void);
#ifdef __cplusplus
diff --git a/intern/guardedalloc/intern/mallocn.c b/intern/guardedalloc/intern/mallocn.c
index e85f8eb03ed..bf03fec9b7b 100644
--- a/intern/guardedalloc/intern/mallocn.c
+++ b/intern/guardedalloc/intern/mallocn.c
@@ -36,6 +36,7 @@
const char *malloc_conf = "background_thread:true,dirty_decay_ms:4000";
#endif
+/* NOTE: Keep in sync with MEM_use_lockfree_allocator(). */
size_t (*MEM_allocN_len)(const void *vmemh) = MEM_lockfree_allocN_len;
void (*MEM_freeN)(void *vmemh) = MEM_lockfree_freeN;
void *(*MEM_dupallocN)(const void *vmemh) = MEM_lockfree_dupallocN;
@@ -95,6 +96,40 @@ void aligned_free(void *ptr)
#endif
}
+void MEM_use_lockfree_allocator(void)
+{
+ /* NOTE: Keep in sync with static initialization of the variables. */
+
+ /* TODO(sergey): Find a way to de-duplicate the logic. Maybe by requiring an explicit call
+ * to guarded allocator initialization at an application startup. */
+
+ MEM_allocN_len = MEM_lockfree_allocN_len;
+ MEM_freeN = MEM_lockfree_freeN;
+ MEM_dupallocN = MEM_lockfree_dupallocN;
+ MEM_reallocN_id = MEM_lockfree_reallocN_id;
+ MEM_recallocN_id = MEM_lockfree_recallocN_id;
+ MEM_callocN = MEM_lockfree_callocN;
+ MEM_calloc_arrayN = MEM_lockfree_calloc_arrayN;
+ MEM_mallocN = MEM_lockfree_mallocN;
+ MEM_malloc_arrayN = MEM_lockfree_malloc_arrayN;
+ MEM_mallocN_aligned = MEM_lockfree_mallocN_aligned;
+ MEM_printmemlist_pydict = MEM_lockfree_printmemlist_pydict;
+ MEM_printmemlist = MEM_lockfree_printmemlist;
+ MEM_callbackmemlist = MEM_lockfree_callbackmemlist;
+ MEM_printmemlist_stats = MEM_lockfree_printmemlist_stats;
+ MEM_set_error_callback = MEM_lockfree_set_error_callback;
+ MEM_consistency_check = MEM_lockfree_consistency_check;
+ MEM_set_memory_debug = MEM_lockfree_set_memory_debug;
+ MEM_get_memory_in_use = MEM_lockfree_get_memory_in_use;
+ MEM_get_memory_blocks_in_use = MEM_lockfree_get_memory_blocks_in_use;
+ MEM_reset_peak_memory = MEM_lockfree_reset_peak_memory;
+ MEM_get_peak_memory = MEM_lockfree_get_peak_memory;
+
+#ifndef NDEBUG
+ MEM_name_ptr = MEM_lockfree_name_ptr;
+#endif
+}
+
void MEM_use_guarded_allocator(void)
{
MEM_allocN_len = MEM_guarded_allocN_len;