Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/intern
diff options
context:
space:
mode:
authorSergey Sharybin <sergey.vfx@gmail.com>2016-11-15 14:16:26 +0300
committerSergey Sharybin <sergey.vfx@gmail.com>2016-11-15 14:16:26 +0300
commit4ee08e9533593b0e7cf7f50b3c4c61eb5598c13e (patch)
treeba0fca216507394e81ae60205c374455d79ef009 /intern
parent46b5cdaa4d1b9d78bbbd078e98f64de1e6c288a8 (diff)
Atomics: Make naming more obvious about which value is being returned
Diffstat (limited to 'intern')
-rw-r--r--intern/atomic/atomic_ops.h18
-rw-r--r--intern/atomic/intern/atomic_ops_ext.h26
-rw-r--r--intern/atomic/intern/atomic_ops_msvc.h8
-rw-r--r--intern/atomic/intern/atomic_ops_unix.h16
-rw-r--r--intern/cycles/kernel/kernel_passes.h16
-rw-r--r--intern/cycles/util/util_atomic.h2
-rw-r--r--intern/cycles/util/util_stats.h4
-rw-r--r--intern/guardedalloc/intern/mallocn_guarded_impl.c12
-rw-r--r--intern/guardedalloc/intern/mallocn_lockfree_impl.c24
9 files changed, 63 insertions, 63 deletions
diff --git a/intern/atomic/atomic_ops.h b/intern/atomic/atomic_ops.h
index f78eab7951f..c3926fdd68f 100644
--- a/intern/atomic/atomic_ops.h
+++ b/intern/atomic/atomic_ops.h
@@ -77,13 +77,13 @@
/* Function prototypes. */
#if (LG_SIZEOF_PTR == 8 || LG_SIZEOF_INT == 8)
-ATOMIC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x);
-ATOMIC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x);
+ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x);
+ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x);
ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _new);
#endif
-ATOMIC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x);
-ATOMIC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x);
+ATOMIC_INLINE uint32_t atomic_add_and_fetch_uint32(uint32_t *p, uint32_t x);
+ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x);
ATOMIC_INLINE uint32_t atomic_cas_uint32(uint32_t *v, uint32_t old, uint32_t _new);
ATOMIC_INLINE uint32_t atomic_fetch_and_add_uint32(uint32_t *p, uint32_t x);
@@ -93,18 +93,18 @@ ATOMIC_INLINE uint32_t atomic_fetch_and_and_uint32(uint32_t *p, uint32_t x);
ATOMIC_INLINE uint8_t atomic_fetch_and_or_uint8(uint8_t *p, uint8_t b);
ATOMIC_INLINE uint8_t atomic_fetch_and_and_uint8(uint8_t *p, uint8_t b);
-ATOMIC_INLINE size_t atomic_add_z(size_t *p, size_t x);
-ATOMIC_INLINE size_t atomic_sub_z(size_t *p, size_t x);
+ATOMIC_INLINE size_t atomic_add_and_fetch_z(size_t *p, size_t x);
+ATOMIC_INLINE size_t atomic_sub_and_fetch_z(size_t *p, size_t x);
ATOMIC_INLINE size_t atomic_cas_z(size_t *v, size_t old, size_t _new);
-ATOMIC_INLINE unsigned atomic_add_u(unsigned *p, unsigned x);
-ATOMIC_INLINE unsigned atomic_sub_u(unsigned *p, unsigned x);
+ATOMIC_INLINE unsigned atomic_add_and_fetch_u(unsigned *p, unsigned x);
+ATOMIC_INLINE unsigned atomic_sub_and_fetch_u(unsigned *p, unsigned x);
ATOMIC_INLINE unsigned atomic_cas_u(unsigned *v, unsigned old, unsigned _new);
/* WARNING! Float 'atomics' are really faked ones, those are actually closer to some kind of spinlock-sync'ed operation,
* which means they are only efficient if collisions are highly unlikely (i.e. if probability of two threads
* working on the same pointer at the same time is very low). */
-ATOMIC_INLINE float atomic_add_fl(float *p, const float x);
+ATOMIC_INLINE float atomic_add_and_fetch_fl(float *p, const float x);
/******************************************************************************/
/* Include system-dependent implementations. */
diff --git a/intern/atomic/intern/atomic_ops_ext.h b/intern/atomic/intern/atomic_ops_ext.h
index 4065299d2ea..74ed327c1b7 100644
--- a/intern/atomic/intern/atomic_ops_ext.h
+++ b/intern/atomic/intern/atomic_ops_ext.h
@@ -56,25 +56,25 @@
/******************************************************************************/
/* size_t operations. */
-ATOMIC_INLINE size_t atomic_add_z(size_t *p, size_t x)
+ATOMIC_INLINE size_t atomic_add_and_fetch_z(size_t *p, size_t x)
{
assert(sizeof(size_t) == LG_SIZEOF_PTR);
#if (LG_SIZEOF_PTR == 8)
- return (size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x);
+ return (size_t)atomic_add_and_fetch_uint64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_PTR == 4)
- return (size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x);
+ return (size_t)atomic_add_and_fetch_uint32((uint32_t *)p, (uint32_t)x);
#endif
}
-ATOMIC_INLINE size_t atomic_sub_z(size_t *p, size_t x)
+ATOMIC_INLINE size_t atomic_sub_and_fetch_z(size_t *p, size_t x)
{
assert(sizeof(size_t) == LG_SIZEOF_PTR);
#if (LG_SIZEOF_PTR == 8)
- return (size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)-((int64_t)x));
+ return (size_t)atomic_add_and_fetch_uint64((uint64_t *)p, (uint64_t)-((int64_t)x));
#elif (LG_SIZEOF_PTR == 4)
- return (size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)-((int32_t)x));
+ return (size_t)atomic_add_and_fetch_uint32((uint32_t *)p, (uint32_t)-((int32_t)x));
#endif
}
@@ -91,25 +91,25 @@ ATOMIC_INLINE size_t atomic_cas_z(size_t *v, size_t old, size_t _new)
/******************************************************************************/
/* unsigned operations. */
-ATOMIC_INLINE unsigned atomic_add_u(unsigned *p, unsigned x)
+ATOMIC_INLINE unsigned atomic_add_and_fetch_u(unsigned *p, unsigned x)
{
assert(sizeof(unsigned) == LG_SIZEOF_INT);
#if (LG_SIZEOF_INT == 8)
- return (unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x);
+ return (unsigned)atomic_add_and_fetch_uint64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_INT == 4)
- return (unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x);
+ return (unsigned)atomic_add_and_fetch_uint32((uint32_t *)p, (uint32_t)x);
#endif
}
-ATOMIC_INLINE unsigned atomic_sub_u(unsigned *p, unsigned x)
+ATOMIC_INLINE unsigned atomic_sub_and_fetch_u(unsigned *p, unsigned x)
{
assert(sizeof(unsigned) == LG_SIZEOF_INT);
#if (LG_SIZEOF_INT == 8)
- return (unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)-((int64_t)x));
+ return (unsigned)atomic_add_and_fetch_uint64((uint64_t *)p, (uint64_t)-((int64_t)x));
#elif (LG_SIZEOF_INT == 4)
- return (unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)-((int32_t)x));
+ return (unsigned)atomic_add_and_fetch_uint32((uint32_t *)p, (uint32_t)-((int32_t)x));
#endif
}
@@ -127,7 +127,7 @@ ATOMIC_INLINE unsigned atomic_cas_u(unsigned *v, unsigned old, unsigned _new)
/******************************************************************************/
/* float operations. */
-ATOMIC_INLINE float atomic_add_fl(float *p, const float x)
+ATOMIC_INLINE float atomic_add_and_fetch_fl(float *p, const float x)
{
assert(sizeof(float) == sizeof(uint32_t));
diff --git a/intern/atomic/intern/atomic_ops_msvc.h b/intern/atomic/intern/atomic_ops_msvc.h
index 3461719a4e7..e7aae4a74a0 100644
--- a/intern/atomic/intern/atomic_ops_msvc.h
+++ b/intern/atomic/intern/atomic_ops_msvc.h
@@ -43,12 +43,12 @@
/******************************************************************************/
/* 64-bit operations. */
#if (LG_SIZEOF_PTR == 8 || LG_SIZEOF_INT == 8)
-ATOMIC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x)
+ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x)
{
return InterlockedExchangeAdd64((int64_t *)p, (int64_t)x) + x;
}
-ATOMIC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x)
+ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x)
{
return InterlockedExchangeAdd64((int64_t *)p, -((int64_t)x)) - x;
}
@@ -61,12 +61,12 @@ ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _ne
/******************************************************************************/
/* 32-bit operations. */
-ATOMIC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x)
+ATOMIC_INLINE uint32_t atomic_add_and_fetch_uint32(uint32_t *p, uint32_t x)
{
return InterlockedExchangeAdd(p, x) + x;
}
-ATOMIC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x)
+ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x)
{
return InterlockedExchangeAdd(p, -((int32_t)x)) - x;
}
diff --git a/intern/atomic/intern/atomic_ops_unix.h b/intern/atomic/intern/atomic_ops_unix.h
index e63f09c76c5..3d00f91be25 100644
--- a/intern/atomic/intern/atomic_ops_unix.h
+++ b/intern/atomic/intern/atomic_ops_unix.h
@@ -58,12 +58,12 @@
/* 64-bit operations. */
#if (LG_SIZEOF_PTR == 8 || LG_SIZEOF_INT == 8)
# if (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
-ATOMIC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x)
+ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x)
{
return __sync_add_and_fetch(p, x);
}
-ATOMIC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x)
+ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x)
{
return __sync_sub_and_fetch(p, x);
}
@@ -73,7 +73,7 @@ ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _ne
return __sync_val_compare_and_swap(v, old, _new);
}
# elif (defined(__amd64__) || defined(__x86_64__))
-ATOMIC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x)
+ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x)
{
asm volatile (
"lock; xaddq %0, %1;"
@@ -83,7 +83,7 @@ ATOMIC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x)
return x;
}
-ATOMIC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x)
+ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x)
{
x = (uint64_t)(-(int64_t)x);
asm volatile (
@@ -112,12 +112,12 @@ ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _ne
/******************************************************************************/
/* 32-bit operations. */
#if (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
-ATOMIC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x)
+ATOMIC_INLINE uint32_t atomic_add_and_fetch_uint32(uint32_t *p, uint32_t x)
{
return __sync_add_and_fetch(p, x);
}
-ATOMIC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x)
+ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x)
{
return __sync_sub_and_fetch(p, x);
}
@@ -127,7 +127,7 @@ ATOMIC_INLINE uint32_t atomic_cas_uint32(uint32_t *v, uint32_t old, uint32_t _ne
return __sync_val_compare_and_swap(v, old, _new);
}
#elif (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
-ATOMIC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x)
+ATOMIC_INLINE uint32_t atomic_add_and_fetch_uint32(uint32_t *p, uint32_t x)
{
uint32_t ret = x;
asm volatile (
@@ -138,7 +138,7 @@ ATOMIC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x)
return ret+x;
}
-ATOMIC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x)
+ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x)
{
ret = (uint32_t)(-(int32_t)x);
asm volatile (
diff --git a/intern/cycles/kernel/kernel_passes.h b/intern/cycles/kernel/kernel_passes.h
index 20cf3fa931b..7aec47e4957 100644
--- a/intern/cycles/kernel/kernel_passes.h
+++ b/intern/cycles/kernel/kernel_passes.h
@@ -20,7 +20,7 @@ ccl_device_inline void kernel_write_pass_float(ccl_global float *buffer, int sam
{
ccl_global float *buf = buffer;
#if defined(__SPLIT_KERNEL__) && defined(__WORK_STEALING__)
- atomic_add_float(buf, value);
+ atomic_add_and_fetch_float(buf, value);
#else
*buf = (sample == 0)? value: *buf + value;
#endif // __SPLIT_KERNEL__ && __WORK_STEALING__
@@ -33,9 +33,9 @@ ccl_device_inline void kernel_write_pass_float3(ccl_global float *buffer, int sa
ccl_global float *buf_y = buffer + 1;
ccl_global float *buf_z = buffer + 2;
- atomic_add_float(buf_x, value.x);
- atomic_add_float(buf_y, value.y);
- atomic_add_float(buf_z, value.z);
+ atomic_add_and_fetch_float(buf_x, value.x);
+ atomic_add_and_fetch_float(buf_y, value.y);
+ atomic_add_and_fetch_float(buf_z, value.z);
#else
ccl_global float3 *buf = (ccl_global float3*)buffer;
*buf = (sample == 0)? value: *buf + value;
@@ -50,10 +50,10 @@ ccl_device_inline void kernel_write_pass_float4(ccl_global float *buffer, int sa
ccl_global float *buf_z = buffer + 2;
ccl_global float *buf_w = buffer + 3;
- atomic_add_float(buf_x, value.x);
- atomic_add_float(buf_y, value.y);
- atomic_add_float(buf_z, value.z);
- atomic_add_float(buf_w, value.w);
+ atomic_add_and_fetch_float(buf_x, value.x);
+ atomic_add_and_fetch_float(buf_y, value.y);
+ atomic_add_and_fetch_float(buf_z, value.z);
+ atomic_add_and_fetch_float(buf_w, value.w);
#else
ccl_global float4 *buf = (ccl_global float4*)buffer;
*buf = (sample == 0)? value: *buf + value;
diff --git a/intern/cycles/util/util_atomic.h b/intern/cycles/util/util_atomic.h
index 1d1e2963348..433e41fbbb6 100644
--- a/intern/cycles/util/util_atomic.h
+++ b/intern/cycles/util/util_atomic.h
@@ -39,7 +39,7 @@ ATOMIC_INLINE void atomic_update_max_z(size_t *maximum_value, size_t value)
/* Float atomics implementation credits:
* http://suhorukov.blogspot.in/2011/12/opencl-11-atomic-operations-on-floating.html
*/
-ccl_device_inline void atomic_add_float(volatile ccl_global float *source,
+ccl_device_inline void atomic_add_and_fetch_float(volatile ccl_global float *source,
const float operand)
{
union {
diff --git a/intern/cycles/util/util_stats.h b/intern/cycles/util/util_stats.h
index b970b017270..c21a8488c81 100644
--- a/intern/cycles/util/util_stats.h
+++ b/intern/cycles/util/util_stats.h
@@ -29,13 +29,13 @@ public:
explicit Stats(static_init_t) {}
void mem_alloc(size_t size) {
- atomic_add_z(&mem_used, size);
+ atomic_add_and_fetch_z(&mem_used, size);
atomic_update_max_z(&mem_peak, mem_used);
}
void mem_free(size_t size) {
assert(mem_used >= size);
- atomic_sub_z(&mem_used, size);
+ atomic_sub_and_fetch_z(&mem_used, size);
}
size_t mem_used;
diff --git a/intern/guardedalloc/intern/mallocn_guarded_impl.c b/intern/guardedalloc/intern/mallocn_guarded_impl.c
index 1933e9d3ee3..76b7e072321 100644
--- a/intern/guardedalloc/intern/mallocn_guarded_impl.c
+++ b/intern/guardedalloc/intern/mallocn_guarded_impl.c
@@ -505,8 +505,8 @@ static void make_memhead_header(MemHead *memh, size_t len, const char *str)
memt = (MemTail *)(((char *) memh) + sizeof(MemHead) + len);
memt->tag3 = MEMTAG3;
- atomic_add_u(&totblock, 1);
- atomic_add_z(&mem_in_use, len);
+ atomic_add_and_fetch_u(&totblock, 1);
+ atomic_add_and_fetch_z(&mem_in_use, len);
mem_lock_thread();
addtail(membase, &memh->next);
@@ -638,7 +638,7 @@ void *MEM_guarded_mapallocN(size_t len, const char *str)
if (memh != (MemHead *)-1) {
make_memhead_header(memh, len, str);
memh->mmap = 1;
- atomic_add_z(&mmap_in_use, len);
+ atomic_add_and_fetch_z(&mmap_in_use, len);
mem_lock_thread();
peak_mem = mmap_in_use > peak_mem ? mmap_in_use : peak_mem;
mem_unlock_thread();
@@ -1007,8 +1007,8 @@ static void rem_memblock(MemHead *memh)
}
mem_unlock_thread();
- atomic_sub_u(&totblock, 1);
- atomic_sub_z(&mem_in_use, memh->len);
+ atomic_sub_and_fetch_u(&totblock, 1);
+ atomic_sub_and_fetch_z(&mem_in_use, memh->len);
#ifdef DEBUG_MEMDUPLINAME
if (memh->need_free_name)
@@ -1016,7 +1016,7 @@ static void rem_memblock(MemHead *memh)
#endif
if (memh->mmap) {
- atomic_sub_z(&mmap_in_use, memh->len);
+ atomic_sub_and_fetch_z(&mmap_in_use, memh->len);
#if defined(WIN32)
/* our windows mmap implementation is not thread safe */
mem_lock_thread();
diff --git a/intern/guardedalloc/intern/mallocn_lockfree_impl.c b/intern/guardedalloc/intern/mallocn_lockfree_impl.c
index a80d67c3e80..ce8a5b29ece 100644
--- a/intern/guardedalloc/intern/mallocn_lockfree_impl.c
+++ b/intern/guardedalloc/intern/mallocn_lockfree_impl.c
@@ -142,11 +142,11 @@ void MEM_lockfree_freeN(void *vmemh)
return;
}
- atomic_sub_u(&totblock, 1);
- atomic_sub_z(&mem_in_use, len);
+ atomic_sub_and_fetch_u(&totblock, 1);
+ atomic_sub_and_fetch_z(&mem_in_use, len);
if (MEMHEAD_IS_MMAP(memh)) {
- atomic_sub_z(&mmap_in_use, len);
+ atomic_sub_and_fetch_z(&mmap_in_use, len);
#if defined(WIN32)
/* our windows mmap implementation is not thread safe */
mem_lock_thread();
@@ -287,8 +287,8 @@ void *MEM_lockfree_callocN(size_t len, const char *str)
if (LIKELY(memh)) {
memh->len = len;
- atomic_add_u(&totblock, 1);
- atomic_add_z(&mem_in_use, len);
+ atomic_add_and_fetch_u(&totblock, 1);
+ atomic_add_and_fetch_z(&mem_in_use, len);
update_maximum(&peak_mem, mem_in_use);
return PTR_FROM_MEMHEAD(memh);
@@ -312,8 +312,8 @@ void *MEM_lockfree_mallocN(size_t len, const char *str)
}
memh->len = len;
- atomic_add_u(&totblock, 1);
- atomic_add_z(&mem_in_use, len);
+ atomic_add_and_fetch_u(&totblock, 1);
+ atomic_add_and_fetch_z(&mem_in_use, len);
update_maximum(&peak_mem, mem_in_use);
return PTR_FROM_MEMHEAD(memh);
@@ -361,8 +361,8 @@ void *MEM_lockfree_mallocN_aligned(size_t len, size_t alignment, const char *str
memh->len = len | (size_t) MEMHEAD_ALIGN_FLAG;
memh->alignment = (short) alignment;
- atomic_add_u(&totblock, 1);
- atomic_add_z(&mem_in_use, len);
+ atomic_add_and_fetch_u(&totblock, 1);
+ atomic_add_and_fetch_z(&mem_in_use, len);
update_maximum(&peak_mem, mem_in_use);
return PTR_FROM_MEMHEAD(memh);
@@ -396,9 +396,9 @@ void *MEM_lockfree_mapallocN(size_t len, const char *str)
if (memh != (MemHead *)-1) {
memh->len = len | (size_t) MEMHEAD_MMAP_FLAG;
- atomic_add_u(&totblock, 1);
- atomic_add_z(&mem_in_use, len);
- atomic_add_z(&mmap_in_use, len);
+ atomic_add_and_fetch_u(&totblock, 1);
+ atomic_add_and_fetch_z(&mem_in_use, len);
+ atomic_add_and_fetch_z(&mmap_in_use, len);
update_maximum(&peak_mem, mem_in_use);
update_maximum(&peak_mem, mmap_in_use);