Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCampbell Barton <ideasman42@gmail.com>2019-04-17 07:17:24 +0300
committerCampbell Barton <ideasman42@gmail.com>2019-04-17 07:21:24 +0300
commite12c08e8d170b7ca40f204a5b0423c23a9fbc2c1 (patch)
tree8cf3453d12edb177a218ef8009357518ec6cab6a /intern/atomic
parentb3dabc200a4b0399ec6b81f2ff2730d07b44fcaa (diff)
ClangFormat: apply to source, most of intern
Apply clang format as proposed in T53211. For details on usage and instructions for migrating branches without conflicts, see: https://wiki.blender.org/wiki/Tools/ClangFormat
Diffstat (limited to 'intern/atomic')
-rw-r--r--intern/atomic/atomic_ops.h4
-rw-r--r--intern/atomic/intern/atomic_ops_ext.h85
-rw-r--r--intern/atomic/intern/atomic_ops_msvc.h65
-rw-r--r--intern/atomic/intern/atomic_ops_unix.h200
-rw-r--r--intern/atomic/intern/atomic_ops_utils.h29
5 files changed, 180 insertions, 203 deletions
diff --git a/intern/atomic/atomic_ops.h b/intern/atomic/atomic_ops.h
index 6cab507a8cb..07b7ce38b94 100644
--- a/intern/atomic/atomic_ops.h
+++ b/intern/atomic/atomic_ops.h
@@ -113,7 +113,8 @@ ATOMIC_INLINE size_t atomic_sub_and_fetch_z(size_t *p, size_t x);
ATOMIC_INLINE size_t atomic_fetch_and_add_z(size_t *p, size_t x);
ATOMIC_INLINE size_t atomic_fetch_and_sub_z(size_t *p, size_t x);
ATOMIC_INLINE size_t atomic_cas_z(size_t *v, size_t old, size_t _new);
-ATOMIC_INLINE size_t atomic_fetch_and_update_max_z(size_t *p, size_t x); /* Uses CAS loop, see warning below. */
+ATOMIC_INLINE size_t
+atomic_fetch_and_update_max_z(size_t *p, size_t x); /* Uses CAS loop, see warning below. */
ATOMIC_INLINE unsigned int atomic_add_and_fetch_u(unsigned int *p, unsigned int x);
ATOMIC_INLINE unsigned int atomic_sub_and_fetch_u(unsigned int *p, unsigned int x);
@@ -123,7 +124,6 @@ ATOMIC_INLINE unsigned int atomic_cas_u(unsigned int *v, unsigned int old, unsig
ATOMIC_INLINE void *atomic_cas_ptr(void **v, void *old, void *_new);
-
ATOMIC_INLINE float atomic_cas_float(float *v, float old, float _new);
/* WARNING! Float 'atomics' are really faked ones, those are actually closer to some kind of spinlock-sync'ed operation,
diff --git a/intern/atomic/intern/atomic_ops_ext.h b/intern/atomic/intern/atomic_ops_ext.h
index 56ef3c33fce..4dbc1153506 100644
--- a/intern/atomic/intern/atomic_ops_ext.h
+++ b/intern/atomic/intern/atomic_ops_ext.h
@@ -56,105 +56,106 @@ ATOMIC_STATIC_ASSERT(sizeof(size_t) == LG_SIZEOF_PTR, "sizeof(size_t) != LG_SIZE
ATOMIC_INLINE size_t atomic_add_and_fetch_z(size_t *p, size_t x)
{
#if (LG_SIZEOF_PTR == 8)
- return (size_t)atomic_add_and_fetch_uint64((uint64_t *)p, (uint64_t)x);
+ return (size_t)atomic_add_and_fetch_uint64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_PTR == 4)
- return (size_t)atomic_add_and_fetch_uint32((uint32_t *)p, (uint32_t)x);
+ return (size_t)atomic_add_and_fetch_uint32((uint32_t *)p, (uint32_t)x);
#endif
}
ATOMIC_INLINE size_t atomic_sub_and_fetch_z(size_t *p, size_t x)
{
#if (LG_SIZEOF_PTR == 8)
- return (size_t)atomic_add_and_fetch_uint64((uint64_t *)p, (uint64_t)-((int64_t)x));
+ return (size_t)atomic_add_and_fetch_uint64((uint64_t *)p, (uint64_t) - ((int64_t)x));
#elif (LG_SIZEOF_PTR == 4)
- return (size_t)atomic_add_and_fetch_uint32((uint32_t *)p, (uint32_t)-((int32_t)x));
+ return (size_t)atomic_add_and_fetch_uint32((uint32_t *)p, (uint32_t) - ((int32_t)x));
#endif
}
ATOMIC_INLINE size_t atomic_fetch_and_add_z(size_t *p, size_t x)
{
#if (LG_SIZEOF_PTR == 8)
- return (size_t)atomic_fetch_and_add_uint64((uint64_t *)p, (uint64_t)x);
+ return (size_t)atomic_fetch_and_add_uint64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_PTR == 4)
- return (size_t)atomic_fetch_and_add_uint32((uint32_t *)p, (uint32_t)x);
+ return (size_t)atomic_fetch_and_add_uint32((uint32_t *)p, (uint32_t)x);
#endif
}
ATOMIC_INLINE size_t atomic_fetch_and_sub_z(size_t *p, size_t x)
{
#if (LG_SIZEOF_PTR == 8)
- return (size_t)atomic_fetch_and_add_uint64((uint64_t *)p, (uint64_t)-((int64_t)x));
+ return (size_t)atomic_fetch_and_add_uint64((uint64_t *)p, (uint64_t) - ((int64_t)x));
#elif (LG_SIZEOF_PTR == 4)
- return (size_t)atomic_fetch_and_add_uint32((uint32_t *)p, (uint32_t)-((int32_t)x));
+ return (size_t)atomic_fetch_and_add_uint32((uint32_t *)p, (uint32_t) - ((int32_t)x));
#endif
}
ATOMIC_INLINE size_t atomic_cas_z(size_t *v, size_t old, size_t _new)
{
#if (LG_SIZEOF_PTR == 8)
- return (size_t)atomic_cas_uint64((uint64_t *)v, (uint64_t)old, (uint64_t)_new);
+ return (size_t)atomic_cas_uint64((uint64_t *)v, (uint64_t)old, (uint64_t)_new);
#elif (LG_SIZEOF_PTR == 4)
- return (size_t)atomic_cas_uint32((uint32_t *)v, (uint32_t)old, (uint32_t)_new);
+ return (size_t)atomic_cas_uint32((uint32_t *)v, (uint32_t)old, (uint32_t)_new);
#endif
}
ATOMIC_INLINE size_t atomic_fetch_and_update_max_z(size_t *p, size_t x)
{
- size_t prev_value;
- while((prev_value = *p) < x) {
- if(atomic_cas_z(p, prev_value, x) == prev_value) {
- break;
- }
- }
- return prev_value;
+ size_t prev_value;
+ while ((prev_value = *p) < x) {
+ if (atomic_cas_z(p, prev_value, x) == prev_value) {
+ break;
+ }
+ }
+ return prev_value;
}
/******************************************************************************/
/* unsigned operations. */
-ATOMIC_STATIC_ASSERT(sizeof(unsigned int) == LG_SIZEOF_INT, "sizeof(unsigned int) != LG_SIZEOF_INT");
+ATOMIC_STATIC_ASSERT(sizeof(unsigned int) == LG_SIZEOF_INT,
+ "sizeof(unsigned int) != LG_SIZEOF_INT");
ATOMIC_INLINE unsigned int atomic_add_and_fetch_u(unsigned int *p, unsigned int x)
{
#if (LG_SIZEOF_INT == 8)
- return (unsigned int)atomic_add_and_fetch_uint64((uint64_t *)p, (uint64_t)x);
+ return (unsigned int)atomic_add_and_fetch_uint64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_INT == 4)
- return (unsigned int)atomic_add_and_fetch_uint32((uint32_t *)p, (uint32_t)x);
+ return (unsigned int)atomic_add_and_fetch_uint32((uint32_t *)p, (uint32_t)x);
#endif
}
ATOMIC_INLINE unsigned int atomic_sub_and_fetch_u(unsigned int *p, unsigned int x)
{
#if (LG_SIZEOF_INT == 8)
- return (unsigned int)atomic_add_and_fetch_uint64((uint64_t *)p, (uint64_t)-((int64_t)x));
+ return (unsigned int)atomic_add_and_fetch_uint64((uint64_t *)p, (uint64_t) - ((int64_t)x));
#elif (LG_SIZEOF_INT == 4)
- return (unsigned int)atomic_add_and_fetch_uint32((uint32_t *)p, (uint32_t)-((int32_t)x));
+ return (unsigned int)atomic_add_and_fetch_uint32((uint32_t *)p, (uint32_t) - ((int32_t)x));
#endif
}
ATOMIC_INLINE unsigned int atomic_fetch_and_add_u(unsigned int *p, unsigned int x)
{
#if (LG_SIZEOF_INT == 8)
- return (unsigned int)atomic_fetch_and_add_uint64((uint64_t *)p, (uint64_t)x);
+ return (unsigned int)atomic_fetch_and_add_uint64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_INT == 4)
- return (unsigned int)atomic_fetch_and_add_uint32((uint32_t *)p, (uint32_t)x);
+ return (unsigned int)atomic_fetch_and_add_uint32((uint32_t *)p, (uint32_t)x);
#endif
}
ATOMIC_INLINE unsigned int atomic_fetch_and_sub_u(unsigned int *p, unsigned int x)
{
#if (LG_SIZEOF_INT == 8)
- return (unsigned int)atomic_fetch_and_add_uint64((uint64_t *)p, (uint64_t)-((int64_t)x));
+ return (unsigned int)atomic_fetch_and_add_uint64((uint64_t *)p, (uint64_t) - ((int64_t)x));
#elif (LG_SIZEOF_INT == 4)
- return (unsigned int)atomic_fetch_and_add_uint32((uint32_t *)p, (uint32_t)-((int32_t)x));
+ return (unsigned int)atomic_fetch_and_add_uint32((uint32_t *)p, (uint32_t) - ((int32_t)x));
#endif
}
ATOMIC_INLINE unsigned int atomic_cas_u(unsigned int *v, unsigned int old, unsigned int _new)
{
#if (LG_SIZEOF_INT == 8)
- return (unsigned int)atomic_cas_uint64((uint64_t *)v, (uint64_t)old, (uint64_t)_new);
+ return (unsigned int)atomic_cas_uint64((uint64_t *)v, (uint64_t)old, (uint64_t)_new);
#elif (LG_SIZEOF_INT == 4)
- return (unsigned int)atomic_cas_uint32((uint32_t *)v, (uint32_t)old, (uint32_t)_new);
+ return (unsigned int)atomic_cas_uint32((uint32_t *)v, (uint32_t)old, (uint32_t)_new);
#endif
}
@@ -162,12 +163,12 @@ ATOMIC_INLINE unsigned int atomic_cas_u(unsigned int *v, unsigned int old, unsig
/* Char operations. */
ATOMIC_INLINE char atomic_fetch_and_or_char(char *p, char b)
{
- return (char)atomic_fetch_and_or_uint8((uint8_t *)p, (uint8_t)b);
+ return (char)atomic_fetch_and_or_uint8((uint8_t *)p, (uint8_t)b);
}
ATOMIC_INLINE char atomic_fetch_and_and_char(char *p, char b)
{
- return (char)atomic_fetch_and_and_uint8((uint8_t *)p, (uint8_t)b);
+ return (char)atomic_fetch_and_and_uint8((uint8_t *)p, (uint8_t)b);
}
/******************************************************************************/
@@ -176,9 +177,9 @@ ATOMIC_INLINE char atomic_fetch_and_and_char(char *p, char b)
ATOMIC_INLINE void *atomic_cas_ptr(void **v, void *old, void *_new)
{
#if (LG_SIZEOF_PTR == 8)
- return (void *)atomic_cas_uint64((uint64_t *)v, *(uint64_t *)&old, *(uint64_t *)&_new);
+ return (void *)atomic_cas_uint64((uint64_t *)v, *(uint64_t *)&old, *(uint64_t *)&_new);
#elif (LG_SIZEOF_PTR == 4)
- return (void *)atomic_cas_uint32((uint32_t *)v, *(uint32_t *)&old, *(uint32_t *)&_new);
+ return (void *)atomic_cas_uint32((uint32_t *)v, *(uint32_t *)&old, *(uint32_t *)&_new);
#endif
}
@@ -188,22 +189,22 @@ ATOMIC_STATIC_ASSERT(sizeof(float) == sizeof(uint32_t), "sizeof(float) != sizeof
ATOMIC_INLINE float atomic_cas_float(float *v, float old, float _new)
{
- uint32_t ret = atomic_cas_uint32((uint32_t *)v, *(uint32_t *)&old, *(uint32_t *)&_new);
- return *(float *)&ret;
+ uint32_t ret = atomic_cas_uint32((uint32_t *)v, *(uint32_t *)&old, *(uint32_t *)&_new);
+ return *(float *)&ret;
}
ATOMIC_INLINE float atomic_add_and_fetch_fl(float *p, const float x)
{
- float oldval, newval;
- uint32_t prevval;
+ float oldval, newval;
+ uint32_t prevval;
- do { /* Note that since collisions are unlikely, loop will nearly always run once. */
- oldval = *p;
- newval = oldval + x;
- prevval = atomic_cas_uint32((uint32_t *)p, *(uint32_t *)(&oldval), *(uint32_t *)(&newval));
- } while (_ATOMIC_UNLIKELY(prevval != *(uint32_t *)(&oldval)));
+ do { /* Note that since collisions are unlikely, loop will nearly always run once. */
+ oldval = *p;
+ newval = oldval + x;
+ prevval = atomic_cas_uint32((uint32_t *)p, *(uint32_t *)(&oldval), *(uint32_t *)(&newval));
+ } while (_ATOMIC_UNLIKELY(prevval != *(uint32_t *)(&oldval)));
- return newval;
+ return newval;
}
#endif /* __ATOMIC_OPS_EXT_H__ */
diff --git a/intern/atomic/intern/atomic_ops_msvc.h b/intern/atomic/intern/atomic_ops_msvc.h
index 39ed4efe3a8..d9655defa81 100644
--- a/intern/atomic/intern/atomic_ops_msvc.h
+++ b/intern/atomic/intern/atomic_ops_msvc.h
@@ -40,7 +40,7 @@
#include <windows.h>
#include <intrin.h>
-#if defined (__clang__)
+#if defined(__clang__)
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wincompatible-pointer-types"
#endif
@@ -50,53 +50,53 @@
/* Unsigned */
ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x)
{
- return InterlockedExchangeAdd64((int64_t *)p, (int64_t)x) + x;
+ return InterlockedExchangeAdd64((int64_t *)p, (int64_t)x) + x;
}
ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x)
{
- return InterlockedExchangeAdd64((int64_t *)p, -((int64_t)x)) - x;
+ return InterlockedExchangeAdd64((int64_t *)p, -((int64_t)x)) - x;
}
ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _new)
{
- return InterlockedCompareExchange64((int64_t *)v, _new, old);
+ return InterlockedCompareExchange64((int64_t *)v, _new, old);
}
ATOMIC_INLINE uint64_t atomic_fetch_and_add_uint64(uint64_t *p, uint64_t x)
{
- return InterlockedExchangeAdd64((int64_t *)p, (int64_t)x);
+ return InterlockedExchangeAdd64((int64_t *)p, (int64_t)x);
}
ATOMIC_INLINE uint64_t atomic_fetch_and_sub_uint64(uint64_t *p, uint64_t x)
{
- return InterlockedExchangeAdd64((int64_t *)p, -((int64_t)x));
+ return InterlockedExchangeAdd64((int64_t *)p, -((int64_t)x));
}
/* Signed */
ATOMIC_INLINE int64_t atomic_add_and_fetch_int64(int64_t *p, int64_t x)
{
- return InterlockedExchangeAdd64(p, x) + x;
+ return InterlockedExchangeAdd64(p, x) + x;
}
ATOMIC_INLINE int64_t atomic_sub_and_fetch_int64(int64_t *p, int64_t x)
{
- return InterlockedExchangeAdd64(p, -x) - x;
+ return InterlockedExchangeAdd64(p, -x) - x;
}
ATOMIC_INLINE int64_t atomic_cas_int64(int64_t *v, int64_t old, int64_t _new)
{
- return InterlockedCompareExchange64(v, _new, old);
+ return InterlockedCompareExchange64(v, _new, old);
}
ATOMIC_INLINE int64_t atomic_fetch_and_add_int64(int64_t *p, int64_t x)
{
- return InterlockedExchangeAdd64(p, x);
+ return InterlockedExchangeAdd64(p, x);
}
ATOMIC_INLINE int64_t atomic_fetch_and_sub_int64(int64_t *p, int64_t x)
{
- return InterlockedExchangeAdd64(p, -x);
+ return InterlockedExchangeAdd64(p, -x);
}
#endif
@@ -105,63 +105,63 @@ ATOMIC_INLINE int64_t atomic_fetch_and_sub_int64(int64_t *p, int64_t x)
/* Unsigned */
ATOMIC_INLINE uint32_t atomic_add_and_fetch_uint32(uint32_t *p, uint32_t x)
{
- return InterlockedExchangeAdd(p, x) + x;
+ return InterlockedExchangeAdd(p, x) + x;
}
ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x)
{
- return InterlockedExchangeAdd(p, -((int32_t)x)) - x;
+ return InterlockedExchangeAdd(p, -((int32_t)x)) - x;
}
ATOMIC_INLINE uint32_t atomic_cas_uint32(uint32_t *v, uint32_t old, uint32_t _new)
{
- return InterlockedCompareExchange((long *)v, _new, old);
+ return InterlockedCompareExchange((long *)v, _new, old);
}
ATOMIC_INLINE uint32_t atomic_fetch_and_add_uint32(uint32_t *p, uint32_t x)
{
- return InterlockedExchangeAdd(p, x);
+ return InterlockedExchangeAdd(p, x);
}
ATOMIC_INLINE uint32_t atomic_fetch_and_or_uint32(uint32_t *p, uint32_t x)
{
- return InterlockedOr((long *)p, x);
+ return InterlockedOr((long *)p, x);
}
ATOMIC_INLINE uint32_t atomic_fetch_and_and_uint32(uint32_t *p, uint32_t x)
{
- return InterlockedAnd((long *)p, x);
+ return InterlockedAnd((long *)p, x);
}
/* Signed */
ATOMIC_INLINE int32_t atomic_add_and_fetch_int32(int32_t *p, int32_t x)
{
- return InterlockedExchangeAdd((long *)p, x) + x;
+ return InterlockedExchangeAdd((long *)p, x) + x;
}
ATOMIC_INLINE int32_t atomic_sub_and_fetch_int32(int32_t *p, int32_t x)
{
- return InterlockedExchangeAdd((long *)p, -x) - x;
+ return InterlockedExchangeAdd((long *)p, -x) - x;
}
ATOMIC_INLINE int32_t atomic_cas_int32(int32_t *v, int32_t old, int32_t _new)
{
- return InterlockedCompareExchange((long *)v, _new, old);
+ return InterlockedCompareExchange((long *)v, _new, old);
}
ATOMIC_INLINE int32_t atomic_fetch_and_add_int32(int32_t *p, int32_t x)
{
- return InterlockedExchangeAdd((long *)p, x);
+ return InterlockedExchangeAdd((long *)p, x);
}
ATOMIC_INLINE int32_t atomic_fetch_and_or_int32(int32_t *p, int32_t x)
{
- return InterlockedOr((long *)p, x);
+ return InterlockedOr((long *)p, x);
}
ATOMIC_INLINE int32_t atomic_fetch_and_and_int32(int32_t *p, int32_t x)
{
- return InterlockedAnd((long *)p, x);
+ return InterlockedAnd((long *)p, x);
}
/******************************************************************************/
@@ -172,9 +172,9 @@ ATOMIC_INLINE int32_t atomic_fetch_and_and_int32(int32_t *p, int32_t x)
ATOMIC_INLINE uint8_t atomic_fetch_and_and_uint8(uint8_t *p, uint8_t b)
{
#if (LG_SIZEOF_PTR == 8 || LG_SIZEOF_INT == 8)
- return InterlockedAnd8((char *)p, (char)b);
+ return InterlockedAnd8((char *)p, (char)b);
#else
- return _InterlockedAnd8((char *)p, (char)b);
+ return _InterlockedAnd8((char *)p, (char)b);
#endif
}
@@ -182,9 +182,9 @@ ATOMIC_INLINE uint8_t atomic_fetch_and_and_uint8(uint8_t *p, uint8_t b)
ATOMIC_INLINE uint8_t atomic_fetch_and_or_uint8(uint8_t *p, uint8_t b)
{
#if (LG_SIZEOF_PTR == 8 || LG_SIZEOF_INT == 8)
- return InterlockedOr8((char *)p, (char)b);
+ return InterlockedOr8((char *)p, (char)b);
#else
- return _InterlockedOr8((char *)p, (char)b);
+ return _InterlockedOr8((char *)p, (char)b);
#endif
}
@@ -193,9 +193,9 @@ ATOMIC_INLINE uint8_t atomic_fetch_and_or_uint8(uint8_t *p, uint8_t b)
ATOMIC_INLINE int8_t atomic_fetch_and_and_int8(int8_t *p, int8_t b)
{
#if (LG_SIZEOF_PTR == 8 || LG_SIZEOF_INT == 8)
- return InterlockedAnd8((char *)p, (char)b);
+ return InterlockedAnd8((char *)p, (char)b);
#else
- return _InterlockedAnd8((char *)p, (char)b);
+ return _InterlockedAnd8((char *)p, (char)b);
#endif
}
@@ -203,14 +203,13 @@ ATOMIC_INLINE int8_t atomic_fetch_and_and_int8(int8_t *p, int8_t b)
ATOMIC_INLINE int8_t atomic_fetch_and_or_int8(int8_t *p, int8_t b)
{
#if (LG_SIZEOF_PTR == 8 || LG_SIZEOF_INT == 8)
- return InterlockedOr8((char *)p, (char)b);
+ return InterlockedOr8((char *)p, (char)b);
#else
- return _InterlockedOr8((char *)p, (char)b);
+ return _InterlockedOr8((char *)p, (char)b);
#endif
}
-
-#if defined (__clang__)
+#if defined(__clang__)
# pragma GCC diagnostic pop
#endif
diff --git a/intern/atomic/intern/atomic_ops_unix.h b/intern/atomic/intern/atomic_ops_unix.h
index cda251f342b..e1126cab0c2 100644
--- a/intern/atomic/intern/atomic_ops_unix.h
+++ b/intern/atomic/intern/atomic_ops_unix.h
@@ -56,140 +56,128 @@
/* Unsigned */
ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x)
{
- return __sync_add_and_fetch(p, x);
+ return __sync_add_and_fetch(p, x);
}
ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x)
{
- return __sync_sub_and_fetch(p, x);
+ return __sync_sub_and_fetch(p, x);
}
ATOMIC_INLINE uint64_t atomic_fetch_and_add_uint64(uint64_t *p, uint64_t x)
{
- return __sync_fetch_and_add(p, x);
+ return __sync_fetch_and_add(p, x);
}
ATOMIC_INLINE uint64_t atomic_fetch_and_sub_uint64(uint64_t *p, uint64_t x)
{
- return __sync_fetch_and_sub(p, x);
+ return __sync_fetch_and_sub(p, x);
}
ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _new)
{
- return __sync_val_compare_and_swap(v, old, _new);
+ return __sync_val_compare_and_swap(v, old, _new);
}
/* Signed */
ATOMIC_INLINE int64_t atomic_add_and_fetch_int64(int64_t *p, int64_t x)
{
- return __sync_add_and_fetch(p, x);
+ return __sync_add_and_fetch(p, x);
}
ATOMIC_INLINE int64_t atomic_sub_and_fetch_int64(int64_t *p, int64_t x)
{
- return __sync_sub_and_fetch(p, x);
+ return __sync_sub_and_fetch(p, x);
}
ATOMIC_INLINE int64_t atomic_fetch_and_add_int64(int64_t *p, int64_t x)
{
- return __sync_fetch_and_add(p, x);
+ return __sync_fetch_and_add(p, x);
}
ATOMIC_INLINE int64_t atomic_fetch_and_sub_int64(int64_t *p, int64_t x)
{
- return __sync_fetch_and_sub(p, x);
+ return __sync_fetch_and_sub(p, x);
}
ATOMIC_INLINE int64_t atomic_cas_int64(int64_t *v, int64_t old, int64_t _new)
{
- return __sync_val_compare_and_swap(v, old, _new);
+ return __sync_val_compare_and_swap(v, old, _new);
}
# elif (defined(__amd64__) || defined(__x86_64__))
/* Unsigned */
ATOMIC_INLINE uint64_t atomic_fetch_and_add_uint64(uint64_t *p, uint64_t x)
{
- asm volatile (
- "lock; xaddq %0, %1;"
- : "+r" (x), "=m" (*p) /* Outputs. */
- : "m" (*p) /* Inputs. */
- );
- return x;
+ asm volatile("lock; xaddq %0, %1;"
+ : "+r"(x), "=m"(*p) /* Outputs. */
+ : "m"(*p) /* Inputs. */
+ );
+ return x;
}
ATOMIC_INLINE uint64_t atomic_fetch_and_sub_uint64(uint64_t *p, uint64_t x)
{
- x = (uint64_t)(-(int64_t)x);
- asm volatile (
- "lock; xaddq %0, %1;"
- : "+r" (x), "=m" (*p) /* Outputs. */
- : "m" (*p) /* Inputs. */
- );
- return x;
+ x = (uint64_t)(-(int64_t)x);
+ asm volatile("lock; xaddq %0, %1;"
+ : "+r"(x), "=m"(*p) /* Outputs. */
+ : "m"(*p) /* Inputs. */
+ );
+ return x;
}
ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x)
{
- return atomic_fetch_and_add_uint64(p, x) + x;
+ return atomic_fetch_and_add_uint64(p, x) + x;
}
ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x)
{
- return atomic_fetch_and_sub_uint64(p, x) - x;
+ return atomic_fetch_and_sub_uint64(p, x) - x;
}
ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _new)
{
- uint64_t ret;
- asm volatile (
- "lock; cmpxchgq %2,%1"
- : "=a" (ret), "+m" (*v)
- : "r" (_new), "0" (old)
- : "memory");
- return ret;
+ uint64_t ret;
+ asm volatile("lock; cmpxchgq %2,%1" : "=a"(ret), "+m"(*v) : "r"(_new), "0"(old) : "memory");
+ return ret;
}
/* Signed */
ATOMIC_INLINE int64_t atomic_fetch_and_add_int64(int64_t *p, int64_t x)
{
- asm volatile (
- "lock; xaddq %0, %1;"
- : "+r" (x), "=m" (*p) /* Outputs. */
- : "m" (*p) /* Inputs. */
- );
- return x;
+ asm volatile("lock; xaddq %0, %1;"
+ : "+r"(x), "=m"(*p) /* Outputs. */
+ : "m"(*p) /* Inputs. */
+ );
+ return x;
}
ATOMIC_INLINE int64_t atomic_fetch_and_sub_int64(int64_t *p, int64_t x)
{
- x = -x;
- asm volatile (
- "lock; xaddq %0, %1;"
- : "+r" (x), "=m" (*p) /* Outputs. */
- : "m" (*p) /* Inputs. */
- );
- return x;
+ x = -x;
+ asm volatile("lock; xaddq %0, %1;"
+ : "+r"(x), "=m"(*p) /* Outputs. */
+ : "m"(*p) /* Inputs. */
+ );
+ return x;
}
ATOMIC_INLINE int64_t atomic_add_and_fetch_int64(int64_t *p, int64_t x)
{
- return atomic_fetch_and_add_int64(p, x) + x;
+ return atomic_fetch_and_add_int64(p, x) + x;
}
ATOMIC_INLINE int64_t atomic_sub_and_fetch_int64(int64_t *p, int64_t x)
{
- return atomic_fetch_and_sub_int64(p, x) - x;
+ return atomic_fetch_and_sub_int64(p, x) - x;
}
ATOMIC_INLINE int64_t atomic_cas_int64(int64_t *v, int64_t old, int64_t _new)
{
- int64_t ret;
- asm volatile (
- "lock; cmpxchgq %2,%1"
- : "=a" (ret), "+m" (*v)
- : "r" (_new), "0" (old)
- : "memory");
- return ret;
+ int64_t ret;
+ asm volatile("lock; cmpxchgq %2,%1" : "=a"(ret), "+m"(*v) : "r"(_new), "0"(old) : "memory");
+ return ret;
}
# else
# error "Missing implementation for 64-bit atomic operations"
@@ -202,102 +190,90 @@ ATOMIC_INLINE int64_t atomic_cas_int64(int64_t *v, int64_t old, int64_t _new)
/* Unsigned */
ATOMIC_INLINE uint32_t atomic_add_and_fetch_uint32(uint32_t *p, uint32_t x)
{
- return __sync_add_and_fetch(p, x);
+ return __sync_add_and_fetch(p, x);
}
ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x)
{
- return __sync_sub_and_fetch(p, x);
+ return __sync_sub_and_fetch(p, x);
}
ATOMIC_INLINE uint32_t atomic_cas_uint32(uint32_t *v, uint32_t old, uint32_t _new)
{
- return __sync_val_compare_and_swap(v, old, _new);
+ return __sync_val_compare_and_swap(v, old, _new);
}
/* Signed */
ATOMIC_INLINE int32_t atomic_add_and_fetch_int32(int32_t *p, int32_t x)
{
- return __sync_add_and_fetch(p, x);
+ return __sync_add_and_fetch(p, x);
}
ATOMIC_INLINE int32_t atomic_sub_and_fetch_int32(int32_t *p, int32_t x)
{
- return __sync_sub_and_fetch(p, x);
+ return __sync_sub_and_fetch(p, x);
}
ATOMIC_INLINE int32_t atomic_cas_int32(int32_t *v, int32_t old, int32_t _new)
{
- return __sync_val_compare_and_swap(v, old, _new);
+ return __sync_val_compare_and_swap(v, old, _new);
}
#elif (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
/* Unsigned */
ATOMIC_INLINE uint32_t atomic_add_and_fetch_uint32(uint32_t *p, uint32_t x)
{
- uint32_t ret = x;
- asm volatile (
- "lock; xaddl %0, %1;"
- : "+r" (ret), "=m" (*p) /* Outputs. */
- : "m" (*p) /* Inputs. */
- );
- return ret + x;
+ uint32_t ret = x;
+ asm volatile("lock; xaddl %0, %1;"
+ : "+r"(ret), "=m"(*p) /* Outputs. */
+ : "m"(*p) /* Inputs. */
+ );
+ return ret + x;
}
ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x)
{
- uint32_t ret = (uint32_t)(-(int32_t)x);
- asm volatile (
- "lock; xaddl %0, %1;"
- : "+r" (ret), "=m" (*p) /* Outputs. */
- : "m" (*p) /* Inputs. */
- );
- return ret - x;
+ uint32_t ret = (uint32_t)(-(int32_t)x);
+ asm volatile("lock; xaddl %0, %1;"
+ : "+r"(ret), "=m"(*p) /* Outputs. */
+ : "m"(*p) /* Inputs. */
+ );
+ return ret - x;
}
ATOMIC_INLINE uint32_t atomic_cas_uint32(uint32_t *v, uint32_t old, uint32_t _new)
{
- uint32_t ret;
- asm volatile (
- "lock; cmpxchgl %2,%1"
- : "=a" (ret), "+m" (*v)
- : "r" (_new), "0" (old)
- : "memory");
- return ret;
+ uint32_t ret;
+ asm volatile("lock; cmpxchgl %2,%1" : "=a"(ret), "+m"(*v) : "r"(_new), "0"(old) : "memory");
+ return ret;
}
/* Signed */
ATOMIC_INLINE int32_t atomic_add_and_fetch_int32(int32_t *p, int32_t x)
{
- int32_t ret = x;
- asm volatile (
- "lock; xaddl %0, %1;"
- : "+r" (ret), "=m" (*p) /* Outputs. */
- : "m" (*p) /* Inputs. */
- );
- return ret + x;
+ int32_t ret = x;
+ asm volatile("lock; xaddl %0, %1;"
+ : "+r"(ret), "=m"(*p) /* Outputs. */
+ : "m"(*p) /* Inputs. */
+ );
+ return ret + x;
}
ATOMIC_INLINE int32_t atomic_sub_and_fetch_int32(int32_t *p, int32_t x)
{
- int32_t ret = -x;
- asm volatile (
- "lock; xaddl %0, %1;"
- : "+r" (ret), "=m" (*p) /* Outputs. */
- : "m" (*p) /* Inputs. */
- );
- return ret - x;
+ int32_t ret = -x;
+ asm volatile("lock; xaddl %0, %1;"
+ : "+r"(ret), "=m"(*p) /* Outputs. */
+ : "m"(*p) /* Inputs. */
+ );
+ return ret - x;
}
ATOMIC_INLINE int32_t atomic_cas_int32(int32_t *v, int32_t old, int32_t _new)
{
- int32_t ret;
- asm volatile (
- "lock; cmpxchgl %2,%1"
- : "=a" (ret), "+m" (*v)
- : "r" (_new), "0" (old)
- : "memory");
- return ret;
+ int32_t ret;
+ asm volatile("lock; cmpxchgl %2,%1" : "=a"(ret), "+m"(*v) : "r"(_new), "0"(old) : "memory");
+ return ret;
}
#else
@@ -308,33 +284,33 @@ ATOMIC_INLINE int32_t atomic_cas_int32(int32_t *v, int32_t old, int32_t _new)
/* Unsigned */
ATOMIC_INLINE uint32_t atomic_fetch_and_add_uint32(uint32_t *p, uint32_t x)
{
- return __sync_fetch_and_add(p, x);
+ return __sync_fetch_and_add(p, x);
}
ATOMIC_INLINE uint32_t atomic_fetch_and_or_uint32(uint32_t *p, uint32_t x)
{
- return __sync_fetch_and_or(p, x);
+ return __sync_fetch_and_or(p, x);
}
ATOMIC_INLINE uint32_t atomic_fetch_and_and_uint32(uint32_t *p, uint32_t x)
{
- return __sync_fetch_and_and(p, x);
+ return __sync_fetch_and_and(p, x);
}
/* Signed */
ATOMIC_INLINE int32_t atomic_fetch_and_add_int32(int32_t *p, int32_t x)
{
- return __sync_fetch_and_add(p, x);
+ return __sync_fetch_and_add(p, x);
}
ATOMIC_INLINE int32_t atomic_fetch_and_or_int32(int32_t *p, int32_t x)
{
- return __sync_fetch_and_or(p, x);
+ return __sync_fetch_and_or(p, x);
}
ATOMIC_INLINE int32_t atomic_fetch_and_and_int32(int32_t *p, int32_t x)
{
- return __sync_fetch_and_and(p, x);
+ return __sync_fetch_and_and(p, x);
}
#else
@@ -347,21 +323,21 @@ ATOMIC_INLINE int32_t atomic_fetch_and_and_int32(int32_t *p, int32_t x)
/* Unsigned */
ATOMIC_INLINE uint8_t atomic_fetch_and_and_uint8(uint8_t *p, uint8_t b)
{
- return __sync_fetch_and_and(p, b);
+ return __sync_fetch_and_and(p, b);
}
ATOMIC_INLINE uint8_t atomic_fetch_and_or_uint8(uint8_t *p, uint8_t b)
{
- return __sync_fetch_and_or(p, b);
+ return __sync_fetch_and_or(p, b);
}
/* Signed */
ATOMIC_INLINE int8_t atomic_fetch_and_and_int8(int8_t *p, int8_t b)
{
- return __sync_fetch_and_and(p, b);
+ return __sync_fetch_and_and(p, b);
}
ATOMIC_INLINE int8_t atomic_fetch_and_or_int8(int8_t *p, int8_t b)
{
- return __sync_fetch_and_or(p, b);
+ return __sync_fetch_and_or(p, b);
}
#else
diff --git a/intern/atomic/intern/atomic_ops_utils.h b/intern/atomic/intern/atomic_ops_utils.h
index ff71d113f45..4010051607c 100644
--- a/intern/atomic/intern/atomic_ops_utils.h
+++ b/intern/atomic/intern/atomic_ops_utils.h
@@ -62,11 +62,11 @@
#endif
#ifdef __GNUC__
-# define _ATOMIC_LIKELY(x) __builtin_expect(!!(x), 1)
-# define _ATOMIC_UNLIKELY(x) __builtin_expect(!!(x), 0)
+# define _ATOMIC_LIKELY(x) __builtin_expect(!!(x), 1)
+# define _ATOMIC_UNLIKELY(x) __builtin_expect(!!(x), 0)
#else
-# define _ATOMIC_LIKELY(x) (x)
-# define _ATOMIC_UNLIKELY(x) (x)
+# define _ATOMIC_LIKELY(x) (x)
+# define _ATOMIC_UNLIKELY(x) (x)
#endif
#if defined(__SIZEOF_POINTER__)
@@ -77,7 +77,7 @@
# elif (UINTPTR_MAX == 0xFFFFFFFFFFFFFFFF)
# define LG_SIZEOF_PTR 8
# endif
-#elif defined(__WORDSIZE) /* Fallback for older glibc and cpp */
+#elif defined(__WORDSIZE) /* Fallback for older glibc and cpp */
# if (__WORDSIZE == 32)
# define LG_SIZEOF_PTR 4
# elif (__WORDSIZE == 64)
@@ -100,9 +100,8 @@
/* Copied from BLI_utils... */
/* C++ can't use _Static_assert, expects static_assert() but c++0x only,
* Coverity also errors out. */
-#if (!defined(__cplusplus)) && \
- (!defined(__COVERITY__)) && \
- (defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 406)) /* gcc4.6+ only */
+#if (!defined(__cplusplus)) && (!defined(__COVERITY__)) && \
+ (defined(__GNUC__) && ((__GNUC__ * 100 + __GNUC_MINOR__) >= 406)) /* gcc4.6+ only */
# define ATOMIC_STATIC_ASSERT(a, msg) __extension__ _Static_assert(a, msg);
#else
/* Code adapted from http://www.pixelbeat.org/programming/gcc/static_assert.html */
@@ -110,17 +109,19 @@
* expand __LINE__ with one indirection before doing the actual concatenation. */
# define ATOMIC_ASSERT_CONCAT_(a, b) a##b
# define ATOMIC_ASSERT_CONCAT(a, b) ATOMIC_ASSERT_CONCAT_(a, b)
- /* These can't be used after statements in c89. */
-# if defined(__COUNTER__) /* MSVC */
+/* These can't be used after statements in c89. */
+# if defined(__COUNTER__) /* MSVC */
# define ATOMIC_STATIC_ASSERT(a, msg) \
- ; enum { ATOMIC_ASSERT_CONCAT(static_assert_, __COUNTER__) = 1 / (int)(!!(a)) };
-# else /* older gcc, clang... */
- /* This can't be used twice on the same line so ensure if using in headers
+ ; \
+ enum { ATOMIC_ASSERT_CONCAT(static_assert_, __COUNTER__) = 1 / (int)(!!(a)) };
+# else /* older gcc, clang... */
+/* This can't be used twice on the same line so ensure if using in headers
* that the headers are not included twice (by wrapping in #ifndef...#endif)
* Note it doesn't cause an issue when used on same line of separate modules
* compiled with gcc -combine -fwhole-program. */
# define ATOMIC_STATIC_ASSERT(a, msg) \
- ; enum { ATOMIC_ASSERT_CONCAT(assert_line_, __LINE__) = 1 / (int)(!!(a)) };
+ ; \
+ enum { ATOMIC_ASSERT_CONCAT(assert_line_, __LINE__) = 1 / (int)(!!(a)) };
# endif
#endif