Welcome to mirror list, hosted at ThFree Co, Russian Federation.

mallocn.c « intern « guardedalloc « intern - git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
blob: 40f406d0ae26bc26b5657acea580241025e1a9c5 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
/*
 * ***** BEGIN GPL LICENSE BLOCK *****
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version 2
 * of the License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
 *
 * Contributor(s): Brecht Van Lommel
 *                 Campbell Barton
 *
 * ***** END GPL LICENSE BLOCK *****
 */

/** \file guardedalloc/intern/mallocn.c
 *  \ingroup MEM
 *
 * Guarded memory allocation, and boundary-write detection.
 */

#include "MEM_guardedalloc.h"

/* to ensure strict conversions */
#include "../../source/blender/blenlib/BLI_strict_flags.h"

#include <assert.h>

#include "mallocn_intern.h"

size_t (*MEM_allocN_len)(const void *vmemh) = MEM_lockfree_allocN_len;
void (*MEM_freeN)(void *vmemh) = MEM_lockfree_freeN;
void *(*MEM_dupallocN)(const void *vmemh) = MEM_lockfree_dupallocN;
void *(*MEM_reallocN_id)(void *vmemh, size_t len, const char *str) = MEM_lockfree_reallocN_id;
void *(*MEM_recallocN_id)(void *vmemh, size_t len, const char *str) = MEM_lockfree_recallocN_id;
void *(*MEM_callocN)(size_t len, const char *str) = MEM_lockfree_callocN;
void *(*MEM_mallocN)(size_t len, const char *str) = MEM_lockfree_mallocN;
void *(*MEM_mallocN_aligned)(size_t len, size_t alignment, const char *str) = MEM_lockfree_mallocN_aligned;
void *(*MEM_mapallocN)(size_t len, const char *str) = MEM_lockfree_mapallocN;
void (*MEM_printmemlist_pydict)(void) = MEM_lockfree_printmemlist_pydict;
void (*MEM_printmemlist)(void) = MEM_lockfree_printmemlist;
void (*MEM_callbackmemlist)(void (*func)(void *)) = MEM_lockfree_callbackmemlist;
void (*MEM_printmemlist_stats)(void) = MEM_lockfree_printmemlist_stats;
void (*MEM_set_error_callback)(void (*func)(const char *)) = MEM_lockfree_set_error_callback;
bool (*MEM_check_memory_integrity)(void) = MEM_lockfree_check_memory_integrity;
void (*MEM_set_lock_callback)(void (*lock)(void), void (*unlock)(void)) = MEM_lockfree_set_lock_callback;
void (*MEM_set_memory_debug)(void) = MEM_lockfree_set_memory_debug;
uintptr_t (*MEM_get_memory_in_use)(void) = MEM_lockfree_get_memory_in_use;
uintptr_t (*MEM_get_mapped_memory_in_use)(void) = MEM_lockfree_get_mapped_memory_in_use;
unsigned int (*MEM_get_memory_blocks_in_use)(void) = MEM_lockfree_get_memory_blocks_in_use;
void (*MEM_reset_peak_memory)(void) = MEM_lockfree_reset_peak_memory;
uintptr_t (*MEM_get_peak_memory)(void) = MEM_lockfree_get_peak_memory;

#ifndef NDEBUG
const char *(*MEM_name_ptr)(void *vmemh) = MEM_lockfree_name_ptr;
#endif

void *aligned_malloc(size_t size, size_t alignment)
{
#ifdef _WIN32
	return _aligned_malloc(size, alignment);
#elif defined(__APPLE__)
	/* On Mac OS X, both the heap and the stack are guaranteed 16-byte aligned so
	 * they work natively with SSE types with no further work.
	 */
	assert(alignment == 16);
	return malloc(size);
#elif defined(__FreeBSD__) || defined(__NetBSD__)
	void *result;

	if (posix_memalign(&result, alignment, size)) {
		/* non-zero means allocation error
		 * either no allocation or bad alignment value
		 */
		return NULL;
	}
	return result;
#else  /* This is for Linux. */
	return memalign(alignment, size);
#endif
}

void aligned_free(void *ptr)
{
#ifdef _WIN32
	_aligned_free(ptr);
#else
	free(ptr);
#endif
}

void MEM_use_guarded_allocator(void)
{
	MEM_allocN_len = MEM_guarded_allocN_len;
	MEM_freeN = MEM_guarded_freeN;
	MEM_dupallocN = MEM_guarded_dupallocN;
	MEM_reallocN_id = MEM_guarded_reallocN_id;
	MEM_recallocN_id = MEM_guarded_recallocN_id;
	MEM_callocN = MEM_guarded_callocN;
	MEM_mallocN = MEM_guarded_mallocN;
	MEM_mallocN_aligned = MEM_guarded_mallocN_aligned;
	MEM_mapallocN = MEM_guarded_mapallocN;
	MEM_printmemlist_pydict = MEM_guarded_printmemlist_pydict;
	MEM_printmemlist = MEM_guarded_printmemlist;
	MEM_callbackmemlist = MEM_guarded_callbackmemlist;
	MEM_printmemlist_stats = MEM_guarded_printmemlist_stats;
	MEM_set_error_callback = MEM_guarded_set_error_callback;
	MEM_check_memory_integrity = MEM_guarded_check_memory_integrity;
	MEM_set_lock_callback = MEM_guarded_set_lock_callback;
	MEM_set_memory_debug = MEM_guarded_set_memory_debug;
	MEM_get_memory_in_use = MEM_guarded_get_memory_in_use;
	MEM_get_mapped_memory_in_use = MEM_guarded_get_mapped_memory_in_use;
	MEM_get_memory_blocks_in_use = MEM_guarded_get_memory_blocks_in_use;
	MEM_reset_peak_memory = MEM_guarded_reset_peak_memory;
	MEM_get_peak_memory = MEM_guarded_get_peak_memory;

#ifndef NDEBUG
	MEM_name_ptr = MEM_guarded_name_ptr;
#endif
}