Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSergey Sharybin <sergey.vfx@gmail.com>2016-11-15 15:41:08 +0300
committerSergey Sharybin <sergey.vfx@gmail.com>2016-11-15 15:41:08 +0300
commita284d040939408a07def751db7d255e3a26e756b (patch)
treee1ca52819953a54212082a0e98a7880fd0dc33be /intern/atomic
parent4ee08e9533593b0e7cf7f50b3c4c61eb5598c13e (diff)
Atomics: Add some extra utility functions
Also fixed semantic of fetch-and-add in assembler implementation, it seemed to not match the naming.
Diffstat (limited to 'intern/atomic')
-rw-r--r--intern/atomic/atomic_ops.h6
-rw-r--r--intern/atomic/intern/atomic_ops_ext.h44
-rw-r--r--intern/atomic/intern/atomic_ops_msvc.h10
-rw-r--r--intern/atomic/intern/atomic_ops_unix.h24
4 files changed, 82 insertions, 2 deletions
diff --git a/intern/atomic/atomic_ops.h b/intern/atomic/atomic_ops.h
index c3926fdd68f..1107deddf94 100644
--- a/intern/atomic/atomic_ops.h
+++ b/intern/atomic/atomic_ops.h
@@ -79,6 +79,8 @@
#if (LG_SIZEOF_PTR == 8 || LG_SIZEOF_INT == 8)
ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x);
ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x);
+ATOMIC_INLINE uint64_t atomic_fetch_and_add_uint64(uint64_t *p, uint64_t x);
+ATOMIC_INLINE uint64_t atomic_fetch_and_sub_uint64(uint64_t *p, uint64_t x);
ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _new);
#endif
@@ -95,10 +97,14 @@ ATOMIC_INLINE uint8_t atomic_fetch_and_and_uint8(uint8_t *p, uint8_t b);
ATOMIC_INLINE size_t atomic_add_and_fetch_z(size_t *p, size_t x);
ATOMIC_INLINE size_t atomic_sub_and_fetch_z(size_t *p, size_t x);
+ATOMIC_INLINE size_t atomic_fetch_and_add_z(size_t *p, size_t x);
+ATOMIC_INLINE size_t atomic_fetch_and_sub_z(size_t *p, size_t x);
ATOMIC_INLINE size_t atomic_cas_z(size_t *v, size_t old, size_t _new);
ATOMIC_INLINE unsigned atomic_add_and_fetch_u(unsigned *p, unsigned x);
ATOMIC_INLINE unsigned atomic_sub_and_fetch_u(unsigned *p, unsigned x);
+ATOMIC_INLINE unsigned atomic_fetch_and_add_u(unsigned *p, unsigned x);
+ATOMIC_INLINE unsigned atomic_fetch_and_sub_u(unsigned *p, unsigned x);
ATOMIC_INLINE unsigned atomic_cas_u(unsigned *v, unsigned old, unsigned _new);
/* WARNING! Float 'atomics' are really faked ones, those are actually closer to some kind of spinlock-sync'ed operation,
diff --git a/intern/atomic/intern/atomic_ops_ext.h b/intern/atomic/intern/atomic_ops_ext.h
index 74ed327c1b7..8421aa72192 100644
--- a/intern/atomic/intern/atomic_ops_ext.h
+++ b/intern/atomic/intern/atomic_ops_ext.h
@@ -78,6 +78,28 @@ ATOMIC_INLINE size_t atomic_sub_and_fetch_z(size_t *p, size_t x)
#endif
}
+ATOMIC_INLINE size_t atomic_fetch_and_add_z(size_t *p, size_t x)
+{
+ assert(sizeof(size_t) == LG_SIZEOF_PTR);
+
+#if (LG_SIZEOF_PTR == 8)
+ return (size_t)atomic_fetch_and_add_uint64((uint64_t *)p, (uint64_t)x);
+#elif (LG_SIZEOF_PTR == 4)
+ return (size_t)atomic_fetch_and_add_uint32((uint32_t *)p, (uint32_t)x);
+#endif
+}
+
+ATOMIC_INLINE size_t atomic_fetch_and_sub_z(size_t *p, size_t x)
+{
+ assert(sizeof(size_t) == LG_SIZEOF_PTR);
+
+#if (LG_SIZEOF_PTR == 8)
+ return (size_t)atomic_fetch_and_add_uint64((uint64_t *)p, (uint64_t)-((int64_t)x));
+#elif (LG_SIZEOF_PTR == 4)
+ return (size_t)atomic_fetch_and_add_uint32((uint32_t *)p, (uint32_t)-((int32_t)x));
+#endif
+}
+
ATOMIC_INLINE size_t atomic_cas_z(size_t *v, size_t old, size_t _new)
{
assert(sizeof(size_t) == LG_SIZEOF_PTR);
@@ -113,6 +135,28 @@ ATOMIC_INLINE unsigned atomic_sub_and_fetch_u(unsigned *p, unsigned x)
#endif
}
+ATOMIC_INLINE unsigned atomic_fetch_and_add_u(unsigned *p, unsigned x)
+{
+ assert(sizeof(unsigned) == LG_SIZEOF_INT);
+
+#if (LG_SIZEOF_INT == 8)
+ return (unsigned)atomic_fetch_and_add_uint64((uint64_t *)p, (uint64_t)x);
+#elif (LG_SIZEOF_INT == 4)
+ return (unsigned)atomic_fetch_and_add_uint32((uint32_t *)p, (uint32_t)x);
+#endif
+}
+
+ATOMIC_INLINE unsigned atomic_fetch_and_sub_u(unsigned *p, unsigned x)
+{
+ assert(sizeof(unsigned) == LG_SIZEOF_INT);
+
+#if (LG_SIZEOF_INT == 8)
+ return (unsigned)atomic_fetch_and_add_uint64((uint64_t *)p, (uint64_t)-((int64_t)x));
+#elif (LG_SIZEOF_INT == 4)
+ return (unsigned)atomic_fetch_and_add_uint32((uint32_t *)p, (uint32_t)-((int32_t)x));
+#endif
+}
+
ATOMIC_INLINE unsigned atomic_cas_u(unsigned *v, unsigned old, unsigned _new)
{
assert(sizeof(unsigned) == LG_SIZEOF_INT);
diff --git a/intern/atomic/intern/atomic_ops_msvc.h b/intern/atomic/intern/atomic_ops_msvc.h
index e7aae4a74a0..034ac1e3e53 100644
--- a/intern/atomic/intern/atomic_ops_msvc.h
+++ b/intern/atomic/intern/atomic_ops_msvc.h
@@ -57,6 +57,16 @@ ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _ne
{
return InterlockedCompareExchange64((int64_t *)v, _new, old);
}
+
+ATOMIC_INLINE uint64_t atomic_fetch_and_add_uint64(uint64_t *p, uint64_t x)
+{
+ return InterlockedExchangeAdd64((int64_t *)p, (int64_t)x);
+}
+
+ATOMIC_INLINE uint64_t atomic_fetch_and_sub_uint64(uint64_t *p, uint64_t x)
+{
+ return InterlockedExchangeAdd64((int64_t *)p, -((int64_t)x));
+}
#endif
/******************************************************************************/
diff --git a/intern/atomic/intern/atomic_ops_unix.h b/intern/atomic/intern/atomic_ops_unix.h
index 3d00f91be25..0a3322ad2b1 100644
--- a/intern/atomic/intern/atomic_ops_unix.h
+++ b/intern/atomic/intern/atomic_ops_unix.h
@@ -68,12 +68,22 @@ ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x)
return __sync_sub_and_fetch(p, x);
}
+ATOMIC_INLINE uint64_t atomic_fetch_and_add_uint64(uint64_t *p, uint64_t x)
+{
+ return __sync_fetch_and_add(p, x);
+}
+
+ATOMIC_INLINE uint64_t atomic_fetch_and_sub_uint64(uint64_t *p, uint64_t x)
+{
+ return __sync_fetch_and_sub(p, x);
+}
+
ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _new)
{
return __sync_val_compare_and_swap(v, old, _new);
}
# elif (defined(__amd64__) || defined(__x86_64__))
-ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x)
+ATOMIC_INLINE uint64_t atomic_fetch_and_add_uint64(uint64_t *p, uint64_t x)
{
asm volatile (
"lock; xaddq %0, %1;"
@@ -83,7 +93,7 @@ ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x)
return x;
}
-ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x)
+ATOMIC_INLINE uint64_t atomic_fetch_and_sub_uint64(uint64_t *p, uint64_t x)
{
x = (uint64_t)(-(int64_t)x);
asm volatile (
@@ -94,6 +104,16 @@ ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x)
return x;
}
+ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x)
+{
+ return atomic_fetch_and_add_uint64(p, x) + x;
+}
+
+ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x)
+{
+ return atomic_fetch_and_sub_uint64(p, x) - x;
+}
+
ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _new)
{
uint64_t ret;