Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'intern/atomic/intern/atomic_ops_unix.h')
-rw-r--r--intern/atomic/intern/atomic_ops_unix.h161
1 files changed, 158 insertions, 3 deletions
diff --git a/intern/atomic/intern/atomic_ops_unix.h b/intern/atomic/intern/atomic_ops_unix.h
index 0a3322ad2b1..783a30f743b 100644
--- a/intern/atomic/intern/atomic_ops_unix.h
+++ b/intern/atomic/intern/atomic_ops_unix.h
@@ -58,6 +58,7 @@
/* 64-bit operations. */
#if (LG_SIZEOF_PTR == 8 || LG_SIZEOF_INT == 8)
# if (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
+/* Unsigned */
ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x)
{
return __sync_add_and_fetch(p, x);
@@ -82,7 +83,35 @@ ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _ne
{
return __sync_val_compare_and_swap(v, old, _new);
}
+
+/* Signed */
+ATOMIC_INLINE int64_t atomic_add_and_fetch_int64(int64_t *p, int64_t x)
+{
+ return __sync_add_and_fetch(p, x);
+}
+
+ATOMIC_INLINE int64_t atomic_sub_and_fetch_int64(int64_t *p, int64_t x)
+{
+ return __sync_sub_and_fetch(p, x);
+}
+
+ATOMIC_INLINE int64_t atomic_fetch_and_add_int64(int64_t *p, int64_t x)
+{
+ return __sync_fetch_and_add(p, x);
+}
+
+ATOMIC_INLINE int64_t atomic_fetch_and_sub_int64(int64_t *p, int64_t x)
+{
+ return __sync_fetch_and_sub(p, x);
+}
+
+ATOMIC_INLINE int64_t atomic_cas_int64(int64_t *v, int64_t old, int64_t _new)
+{
+ return __sync_val_compare_and_swap(v, old, _new);
+}
+
# elif (defined(__amd64__) || defined(__x86_64__))
+/* Unsigned */
ATOMIC_INLINE uint64_t atomic_fetch_and_add_uint64(uint64_t *p, uint64_t x)
{
asm volatile (
@@ -124,6 +153,49 @@ ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _ne
: "memory");
return ret;
}
+
+/* Signed */
+ATOMIC_INLINE int64_t atomic_fetch_and_add_int64(int64_t *p, int64_t x)
+{
+ asm volatile (
+ "lock; xaddq %0, %1;"
+ : "+r" (x), "=m" (*p) /* Outputs. */
+ : "m" (*p) /* Inputs. */
+ );
+ return x;
+}
+
+ATOMIC_INLINE int64_t atomic_fetch_and_sub_int64(int64_t *p, int64_t x)
+{
+ x = -x;
+ asm volatile (
+ "lock; xaddq %0, %1;"
+ : "+r" (x), "=m" (*p) /* Outputs. */
+ : "m" (*p) /* Inputs. */
+ );
+ return x;
+}
+
+ATOMIC_INLINE int64_t atomic_add_and_fetch_int64(int64_t *p, int64_t x)
+{
+ return atomic_fetch_and_add_int64(p, x) + x;
+}
+
+ATOMIC_INLINE int64_t atomic_sub_and_fetch_int64(int64_t *p, int64_t x)
+{
+ return atomic_fetch_and_sub_int64(p, x) - x;
+}
+
+ATOMIC_INLINE int64_t atomic_cas_int64(int64_t *v, int64_t old, int64_t _new)
+{
+ int64_t ret;
+ asm volatile (
+ "lock; cmpxchgq %2,%1"
+ : "=a" (ret), "+m" (*v)
+ : "r" (_new), "0" (old)
+ : "memory");
+ return ret;
+}
# else
# error "Missing implementation for 64-bit atomic operations"
# endif
@@ -132,6 +204,7 @@ ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _ne
/******************************************************************************/
/* 32-bit operations. */
#if (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
+/* Unsigned */
ATOMIC_INLINE uint32_t atomic_add_and_fetch_uint32(uint32_t *p, uint32_t x)
{
return __sync_add_and_fetch(p, x);
@@ -146,7 +219,25 @@ ATOMIC_INLINE uint32_t atomic_cas_uint32(uint32_t *v, uint32_t old, uint32_t _ne
{
return __sync_val_compare_and_swap(v, old, _new);
}
+
+/* Signed */
+ATOMIC_INLINE int32_t atomic_add_and_fetch_int32(int32_t *p, int32_t x)
+{
+ return __sync_add_and_fetch(p, x);
+}
+
+ATOMIC_INLINE int32_t atomic_sub_and_fetch_int32(int32_t *p, int32_t x)
+{
+ return __sync_sub_and_fetch(p, x);
+}
+
+ATOMIC_INLINE int32_t atomic_cas_int32(int32_t *v, int32_t old, int32_t _new)
+{
+ return __sync_val_compare_and_swap(v, old, _new);
+}
+
#elif (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
+/* Unsigned */
ATOMIC_INLINE uint32_t atomic_add_and_fetch_uint32(uint32_t *p, uint32_t x)
{
uint32_t ret = x;
@@ -155,18 +246,18 @@ ATOMIC_INLINE uint32_t atomic_add_and_fetch_uint32(uint32_t *p, uint32_t x)
: "+r" (ret), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
- return ret+x;
+ return ret + x;
}
ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x)
{
- ret = (uint32_t)(-(int32_t)x);
+ uint32_t ret = (uint32_t)(-(int32_t)x);
asm volatile (
"lock; xaddl %0, %1;"
: "+r" (ret), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
- return ret-x;
+ return ret - x;
}
ATOMIC_INLINE uint32_t atomic_cas_uint32(uint32_t *v, uint32_t old, uint32_t _new)
@@ -179,11 +270,47 @@ ATOMIC_INLINE uint32_t atomic_cas_uint32(uint32_t *v, uint32_t old, uint32_t _ne
: "memory");
return ret;
}
+
+/* Signed */
+ATOMIC_INLINE int32_t atomic_add_and_fetch_int32(int32_t *p, int32_t x)
+{
+ int32_t ret = x;
+ asm volatile (
+ "lock; xaddl %0, %1;"
+ : "+r" (ret), "=m" (*p) /* Outputs. */
+ : "m" (*p) /* Inputs. */
+ );
+ return ret + x;
+}
+
+ATOMIC_INLINE int32_t atomic_sub_and_fetch_int32(int32_t *p, int32_t x)
+{
+ int32_t ret = -x;
+ asm volatile (
+ "lock; xaddl %0, %1;"
+ : "+r" (ret), "=m" (*p) /* Outputs. */
+ : "m" (*p) /* Inputs. */
+ );
+ return ret - x;
+}
+
+ATOMIC_INLINE int32_t atomic_cas_int32(int32_t *v, int32_t old, int32_t _new)
+{
+ int32_t ret;
+ asm volatile (
+ "lock; cmpxchgl %2,%1"
+ : "=a" (ret), "+m" (*v)
+ : "r" (_new), "0" (old)
+ : "memory");
+ return ret;
+}
+
#else
# error "Missing implementation for 32-bit atomic operations"
#endif
#if (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
+/* Unsigned */
ATOMIC_INLINE uint32_t atomic_fetch_and_add_uint32(uint32_t *p, uint32_t x)
{
return __sync_fetch_and_add(p, x);
@@ -199,6 +326,22 @@ ATOMIC_INLINE uint32_t atomic_fetch_and_and_uint32(uint32_t *p, uint32_t x)
return __sync_fetch_and_and(p, x);
}
+/* Signed */
+ATOMIC_INLINE int32_t atomic_fetch_and_add_int32(int32_t *p, int32_t x)
+{
+ return __sync_fetch_and_add(p, x);
+}
+
+ATOMIC_INLINE int32_t atomic_fetch_and_or_int32(int32_t *p, int32_t x)
+{
+ return __sync_fetch_and_or(p, x);
+}
+
+ATOMIC_INLINE int32_t atomic_fetch_and_and_int32(int32_t *p, int32_t x)
+{
+ return __sync_fetch_and_and(p, x);
+}
+
#else
# error "Missing implementation for 32-bit atomic operations"
#endif
@@ -206,6 +349,7 @@ ATOMIC_INLINE uint32_t atomic_fetch_and_and_uint32(uint32_t *p, uint32_t x)
/******************************************************************************/
/* 8-bit operations. */
#if (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1) || defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_1))
+/* Unsigned */
ATOMIC_INLINE uint8_t atomic_fetch_and_and_uint8(uint8_t *p, uint8_t b)
{
return __sync_fetch_and_and(p, b);
@@ -214,6 +358,17 @@ ATOMIC_INLINE uint8_t atomic_fetch_and_or_uint8(uint8_t *p, uint8_t b)
{
return __sync_fetch_and_or(p, b);
}
+
+/* Signed */
+ATOMIC_INLINE int8_t atomic_fetch_and_and_int8(int8_t *p, int8_t b)
+{
+ return __sync_fetch_and_and(p, b);
+}
+ATOMIC_INLINE int8_t atomic_fetch_and_or_int8(int8_t *p, int8_t b)
+{
+ return __sync_fetch_and_or(p, b);
+}
+
#else
# error "Missing implementation for 8-bit atomic operations"
#endif