From a284d040939408a07def751db7d255e3a26e756b Mon Sep 17 00:00:00 2001 From: Sergey Sharybin Date: Tue, 15 Nov 2016 13:41:08 +0100 Subject: Atomics: Add some extra utility functions Also fixed semantic of fetch-and-add in assembler implementation, it seemed to not match the naming. --- intern/atomic/atomic_ops.h | 6 +++++ intern/atomic/intern/atomic_ops_ext.h | 44 ++++++++++++++++++++++++++++++++++ intern/atomic/intern/atomic_ops_msvc.h | 10 ++++++++ intern/atomic/intern/atomic_ops_unix.h | 24 +++++++++++++++++-- 4 files changed, 82 insertions(+), 2 deletions(-) (limited to 'intern/atomic') diff --git a/intern/atomic/atomic_ops.h b/intern/atomic/atomic_ops.h index c3926fdd68f..1107deddf94 100644 --- a/intern/atomic/atomic_ops.h +++ b/intern/atomic/atomic_ops.h @@ -79,6 +79,8 @@ #if (LG_SIZEOF_PTR == 8 || LG_SIZEOF_INT == 8) ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x); ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x); +ATOMIC_INLINE uint64_t atomic_fetch_and_add_uint64(uint64_t *p, uint64_t x); +ATOMIC_INLINE uint64_t atomic_fetch_and_sub_uint64(uint64_t *p, uint64_t x); ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _new); #endif @@ -95,10 +97,14 @@ ATOMIC_INLINE uint8_t atomic_fetch_and_and_uint8(uint8_t *p, uint8_t b); ATOMIC_INLINE size_t atomic_add_and_fetch_z(size_t *p, size_t x); ATOMIC_INLINE size_t atomic_sub_and_fetch_z(size_t *p, size_t x); +ATOMIC_INLINE size_t atomic_fetch_and_add_z(size_t *p, size_t x); +ATOMIC_INLINE size_t atomic_fetch_and_sub_z(size_t *p, size_t x); ATOMIC_INLINE size_t atomic_cas_z(size_t *v, size_t old, size_t _new); ATOMIC_INLINE unsigned atomic_add_and_fetch_u(unsigned *p, unsigned x); ATOMIC_INLINE unsigned atomic_sub_and_fetch_u(unsigned *p, unsigned x); +ATOMIC_INLINE unsigned atomic_fetch_and_add_u(unsigned *p, unsigned x); +ATOMIC_INLINE unsigned atomic_fetch_and_sub_u(unsigned *p, unsigned x); ATOMIC_INLINE unsigned atomic_cas_u(unsigned *v, unsigned old, unsigned _new); /* WARNING! Float 'atomics' are really faked ones, those are actually closer to some kind of spinlock-sync'ed operation, diff --git a/intern/atomic/intern/atomic_ops_ext.h b/intern/atomic/intern/atomic_ops_ext.h index 74ed327c1b7..8421aa72192 100644 --- a/intern/atomic/intern/atomic_ops_ext.h +++ b/intern/atomic/intern/atomic_ops_ext.h @@ -78,6 +78,28 @@ ATOMIC_INLINE size_t atomic_sub_and_fetch_z(size_t *p, size_t x) #endif } +ATOMIC_INLINE size_t atomic_fetch_and_add_z(size_t *p, size_t x) +{ + assert(sizeof(size_t) == LG_SIZEOF_PTR); + +#if (LG_SIZEOF_PTR == 8) + return (size_t)atomic_fetch_and_add_uint64((uint64_t *)p, (uint64_t)x); +#elif (LG_SIZEOF_PTR == 4) + return (size_t)atomic_fetch_and_add_uint32((uint32_t *)p, (uint32_t)x); +#endif +} + +ATOMIC_INLINE size_t atomic_fetch_and_sub_z(size_t *p, size_t x) +{ + assert(sizeof(size_t) == LG_SIZEOF_PTR); + +#if (LG_SIZEOF_PTR == 8) + return (size_t)atomic_fetch_and_add_uint64((uint64_t *)p, (uint64_t)-((int64_t)x)); +#elif (LG_SIZEOF_PTR == 4) + return (size_t)atomic_fetch_and_add_uint32((uint32_t *)p, (uint32_t)-((int32_t)x)); +#endif +} + ATOMIC_INLINE size_t atomic_cas_z(size_t *v, size_t old, size_t _new) { assert(sizeof(size_t) == LG_SIZEOF_PTR); @@ -113,6 +135,28 @@ ATOMIC_INLINE unsigned atomic_sub_and_fetch_u(unsigned *p, unsigned x) #endif } +ATOMIC_INLINE unsigned atomic_fetch_and_add_u(unsigned *p, unsigned x) +{ + assert(sizeof(unsigned) == LG_SIZEOF_INT); + +#if (LG_SIZEOF_INT == 8) + return (unsigned)atomic_fetch_and_add_uint64((uint64_t *)p, (uint64_t)x); +#elif (LG_SIZEOF_INT == 4) + return (unsigned)atomic_fetch_and_add_uint32((uint32_t *)p, (uint32_t)x); +#endif +} + +ATOMIC_INLINE unsigned atomic_fetch_and_sub_u(unsigned *p, unsigned x) +{ + assert(sizeof(unsigned) == LG_SIZEOF_INT); + +#if (LG_SIZEOF_INT == 8) + return (unsigned)atomic_fetch_and_add_uint64((uint64_t *)p, (uint64_t)-((int64_t)x)); +#elif (LG_SIZEOF_INT == 4) + return (unsigned)atomic_fetch_and_add_uint32((uint32_t *)p, (uint32_t)-((int32_t)x)); +#endif +} + ATOMIC_INLINE unsigned atomic_cas_u(unsigned *v, unsigned old, unsigned _new) { assert(sizeof(unsigned) == LG_SIZEOF_INT); diff --git a/intern/atomic/intern/atomic_ops_msvc.h b/intern/atomic/intern/atomic_ops_msvc.h index e7aae4a74a0..034ac1e3e53 100644 --- a/intern/atomic/intern/atomic_ops_msvc.h +++ b/intern/atomic/intern/atomic_ops_msvc.h @@ -57,6 +57,16 @@ ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _ne { return InterlockedCompareExchange64((int64_t *)v, _new, old); } + +ATOMIC_INLINE uint64_t atomic_fetch_and_add_uint64(uint64_t *p, uint64_t x) +{ + return InterlockedExchangeAdd64((int64_t *)p, (int64_t)x); +} + +ATOMIC_INLINE uint64_t atomic_fetch_and_sub_uint64(uint64_t *p, uint64_t x) +{ + return InterlockedExchangeAdd64((int64_t *)p, -((int64_t)x)); +} #endif /******************************************************************************/ diff --git a/intern/atomic/intern/atomic_ops_unix.h b/intern/atomic/intern/atomic_ops_unix.h index 3d00f91be25..0a3322ad2b1 100644 --- a/intern/atomic/intern/atomic_ops_unix.h +++ b/intern/atomic/intern/atomic_ops_unix.h @@ -68,12 +68,22 @@ ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x) return __sync_sub_and_fetch(p, x); } +ATOMIC_INLINE uint64_t atomic_fetch_and_add_uint64(uint64_t *p, uint64_t x) +{ + return __sync_fetch_and_add(p, x); +} + +ATOMIC_INLINE uint64_t atomic_fetch_and_sub_uint64(uint64_t *p, uint64_t x) +{ + return __sync_fetch_and_sub(p, x); +} + ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _new) { return __sync_val_compare_and_swap(v, old, _new); } # elif (defined(__amd64__) || defined(__x86_64__)) -ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x) +ATOMIC_INLINE uint64_t atomic_fetch_and_add_uint64(uint64_t *p, uint64_t x) { asm volatile ( "lock; xaddq %0, %1;" @@ -83,7 +93,7 @@ ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x) return x; } -ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x) +ATOMIC_INLINE uint64_t atomic_fetch_and_sub_uint64(uint64_t *p, uint64_t x) { x = (uint64_t)(-(int64_t)x); asm volatile ( @@ -94,6 +104,16 @@ ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x) return x; } +ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x) +{ + return atomic_fetch_and_add_uint64(p, x) + x; +} + +ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x) +{ + return atomic_fetch_and_sub_uint64(p, x) - x; +} + ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _new) { uint64_t ret; -- cgit v1.2.3