Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/Unity-Technologies/libatomic_ops.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIvan Maidanski <ivmai@mail.ru>2013-03-16 00:23:26 +0400
committerIvan Maidanski <ivmai@mail.ru>2013-03-16 00:23:26 +0400
commitf5f47f7b9c70df2e0d371dd2f303a0dec1a37223 (patch)
tree26fcd8457174ea65afd9350c8987a3339d0cb06d
parenta965c42c90e5d5faf84d57cefae27bb2c5fce30a (diff)
parent93926bcd4ed9c89d4e7fb87c02ca25aa6afc4214 (diff)
Merge branch 'add-aarch64-support'
-rw-r--r--AUTHORS1
-rw-r--r--TODO3
-rw-r--r--src/Makefile.am48
-rw-r--r--src/atomic_ops.h4
-rw-r--r--src/atomic_ops/sysdeps/gcc/aarch64.h176
-rw-r--r--src/atomic_ops/sysdeps/gcc/arm.h1
-rw-r--r--src/atomic_ops/sysdeps/gcc/generic-arithm.h704
-rw-r--r--src/atomic_ops/sysdeps/gcc/generic-arithm.template44
-rw-r--r--src/atomic_ops/sysdeps/gcc/generic-small.h280
-rw-r--r--src/atomic_ops/sysdeps/gcc/generic-small.template70
-rw-r--r--src/atomic_ops/sysdeps/gcc/generic.h154
-rw-r--r--src/atomic_ops/sysdeps/gcc/x86.h7
-rw-r--r--src/atomic_ops/sysdeps/standard_ao_double_t.h2
13 files changed, 1488 insertions, 6 deletions
diff --git a/AUTHORS b/AUTHORS
index 370d1c0..15a6008 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -35,3 +35,4 @@ Takashi Yoshii <takashi.yoshii.zj@renesas.com>
Thiemo Seufer <ica2_ts@csv.ica.uni-stuttgart.de>
Thorsten Glaser <tg@debian.org>
Tony Mantler <nicoya@ubb.ca>
+Yvan Roux <yvan.roux@linaro.org>
diff --git a/TODO b/TODO
index b987d36..8a95ddb 100644
--- a/TODO
+++ b/TODO
@@ -1,8 +1,5 @@
== TODO tasks ==
-Move all primitives based on __sync intrinsic from gcc/x86.h to gcc/generic.h;
-offer more sync-base alternatives if AO_USE_SYNC_BUILTINS defined.
-
Add C++0x ATM (atomic memory operations) layer.
diff --git a/src/Makefile.am b/src/Makefile.am
index c99ffbf..7d586e9 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -17,6 +17,8 @@ EXTRA_DIST = Makefile.msft atomic_ops/sysdeps/README \
atomic_ops/generalize-arithm.template \
atomic_ops/generalize-small.template \
atomic_ops/sysdeps/ao_t_is_int.template \
+ atomic_ops/sysdeps/gcc/generic-arithm.template \
+ atomic_ops/sysdeps/gcc/generic-small.template \
atomic_ops/sysdeps/loadstore/acquire_release_volatile.template \
atomic_ops/sysdeps/loadstore/atomic_load.template \
atomic_ops/sysdeps/loadstore/atomic_store.template \
@@ -27,6 +29,8 @@ EXTRA_DIST = Makefile.msft atomic_ops/sysdeps/README \
BUILT_SOURCES = atomic_ops/generalize-arithm.h \
atomic_ops/generalize-small.h \
atomic_ops/sysdeps/ao_t_is_int.h \
+ atomic_ops/sysdeps/gcc/generic-arithm.h \
+ atomic_ops/sysdeps/gcc/generic-small.h \
atomic_ops/sysdeps/loadstore/acquire_release_volatile.h \
atomic_ops/sysdeps/loadstore/atomic_load.h \
atomic_ops/sysdeps/loadstore/atomic_store.h \
@@ -63,10 +67,12 @@ nobase_private_HEADERS = atomic_ops/ao_version.h \
\
atomic_ops/sysdeps/armcc/arm_v6.h \
\
+ atomic_ops/sysdeps/gcc/aarch64.h \
atomic_ops/sysdeps/gcc/alpha.h \
atomic_ops/sysdeps/gcc/arm.h \
atomic_ops/sysdeps/gcc/avr32.h \
atomic_ops/sysdeps/gcc/cris.h \
+ atomic_ops/sysdeps/gcc/generic.h \
atomic_ops/sysdeps/gcc/hexagon.h \
atomic_ops/sysdeps/gcc/hppa.h \
atomic_ops/sysdeps/gcc/ia64.h \
@@ -116,6 +122,48 @@ atomic_ops/sysdeps/ao_t_is_int.h: atomic_ops/sysdeps/ao_t_is_int.template
sed -e s:XBAR:write:g $? >> $@
sed -e s:XBAR:read:g $? >> $@
+atomic_ops/sysdeps/gcc/generic-arithm.h: \
+ atomic_ops/sysdeps/gcc/generic-arithm.template
+ sed -e s:_XBAR::g -e s:XGCCBAR:RELAXED:g \
+ -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g $? > $@
+ sed -e s:_XBAR::g -e s:XGCCBAR:RELAXED:g \
+ -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g $? >> $@
+ sed -e s:_XBAR::g -e s:XGCCBAR:RELAXED:g \
+ -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g $? >> $@
+ sed -e s:_XBAR::g -e s:XGCCBAR:RELAXED:g \
+ -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? >> $@
+ sed -e s:XBAR:acquire:g -e s:XGCCBAR:ACQUIRE:g \
+ -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g $? >> $@
+ sed -e s:XBAR:acquire:g -e s:XGCCBAR:ACQUIRE:g \
+ -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g $? >> $@
+ sed -e s:XBAR:acquire:g -e s:XGCCBAR:ACQUIRE:g \
+ -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g $? >> $@
+ sed -e s:XBAR:acquire:g -e s:XGCCBAR:ACQUIRE:g \
+ -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? >> $@
+ sed -e s:XBAR:release:g -e s:XGCCBAR:RELEASE:g \
+ -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g $? >> $@
+ sed -e s:XBAR:release:g -e s:XGCCBAR:RELEASE:g \
+ -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g $? >> $@
+ sed -e s:XBAR:release:g -e s:XGCCBAR:RELEASE:g \
+ -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g $? >> $@
+ sed -e s:XBAR:release:g -e s:XGCCBAR:RELEASE:g \
+ -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? >> $@
+ sed -e s:XBAR:full:g -e s:XGCCBAR:SEQ_CST:g \
+ -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g $? >> $@
+ sed -e s:XBAR:full:g -e s:XGCCBAR:SEQ_CST:g \
+ -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g $? >> $@
+ sed -e s:XBAR:full:g -e s:XGCCBAR:SEQ_CST:g \
+ -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g $? >> $@
+ sed -e s:XBAR:full:g -e s:XGCCBAR:SEQ_CST:g \
+ -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? >> $@
+
+atomic_ops/sysdeps/gcc/generic-small.h: \
+ atomic_ops/sysdeps/gcc/generic-small.template
+ sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g $? > $@
+ sed -e s:XSIZE:short:g -e s:XCTYPE:unsigned/**/short:g $? >> $@
+ sed -e s:XSIZE:int:g -e s:XCTYPE:unsigned:g $? >> $@
+ sed -e s:XSIZE_::g -e s:XCTYPE:AO_t:g $? >> $@
+
atomic_ops/sysdeps/loadstore/ordered_loads_only.h: \
atomic_ops/sysdeps/loadstore/ordered_loads_only.template
sed -e s:XSIZE:char:g -e s:XCTYPE:unsigned/**/char:g $? > $@
diff --git a/src/atomic_ops.h b/src/atomic_ops.h
index 7756c90..e468e5e 100644
--- a/src/atomic_ops.h
+++ b/src/atomic_ops.h
@@ -266,6 +266,10 @@
|| defined(__powerpc64__) || defined(__ppc64__)
# include "atomic_ops/sysdeps/gcc/powerpc.h"
# endif /* __powerpc__ */
+# if defined(__aarch64__)
+# include "atomic_ops/sysdeps/gcc/aarch64.h"
+# define AO_CAN_EMUL_CAS
+# endif /* __aarch64__ */
# if defined(__arm__)
# include "atomic_ops/sysdeps/gcc/arm.h"
# define AO_CAN_EMUL_CAS
diff --git a/src/atomic_ops/sysdeps/gcc/aarch64.h b/src/atomic_ops/sysdeps/gcc/aarch64.h
new file mode 100644
index 0000000..6ae26f3
--- /dev/null
+++ b/src/atomic_ops/sysdeps/gcc/aarch64.h
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+#include "../test_and_set_t_is_ao_t.h"
+
+#include "../standard_ao_double_t.h"
+
+#ifndef AO_UNIPROCESSOR
+ AO_INLINE void
+ AO_nop_write(void)
+ {
+ __asm__ __volatile__("dmb st" : : : "memory");
+ }
+# define AO_HAVE_nop_write
+#endif
+
+/* TODO: Adjust version check on fixing double-wide AO support in GCC. */
+#if __GNUC__ == 4
+
+ AO_INLINE AO_double_t
+ AO_double_load(const volatile AO_double_t *addr)
+ {
+ AO_double_t result;
+ int status;
+
+ /* Note that STXP cannot be discarded because LD[A]XP is not */
+ /* single-copy atomic (unlike LDREXD for 32-bit ARM). */
+ do {
+ __asm__ __volatile__("//AO_double_load\n"
+ " ldxp %0, %1, %3\n"
+ " stxp %w2, %0, %1, %3"
+ : "=&r" (result.AO_val1), "=&r" (result.AO_val2), "=&r" (status)
+ : "Q" (*addr));
+ } while (AO_EXPECT_FALSE(status));
+ return result;
+ }
+# define AO_HAVE_double_load
+
+ AO_INLINE AO_double_t
+ AO_double_load_acquire(const volatile AO_double_t *addr)
+ {
+ AO_double_t result;
+ int status;
+
+ do {
+ __asm__ __volatile__("//AO_double_load_acquire\n"
+ " ldaxp %0, %1, %3\n"
+ " stxp %w2, %0, %1, %3"
+ : "=&r" (result.AO_val1), "=&r" (result.AO_val2), "=&r" (status)
+ : "Q" (*addr));
+ } while (AO_EXPECT_FALSE(status));
+ return result;
+ }
+# define AO_HAVE_double_load_acquire
+
+ AO_INLINE void
+ AO_double_store(volatile AO_double_t *addr, AO_double_t value)
+ {
+ AO_double_t old_val;
+ int status;
+
+ do {
+ __asm__ __volatile__("//AO_double_store\n"
+ " ldxp %0, %1, %3\n"
+ " stxp %w2, %4, %5, %3"
+ : "=&r" (old_val.AO_val1), "=&r" (old_val.AO_val2), "=&r" (status),
+ "=Q" (*addr)
+ : "r" (value.AO_val1), "r" (value.AO_val2));
+ /* Compared to the arm.h implementation, the 'cc' (flags) are not */
+ /* clobbered because A64 has no concept of conditional execution. */
+ } while (AO_EXPECT_FALSE(status));
+ }
+# define AO_HAVE_double_store
+
+ AO_INLINE void
+ AO_double_store_release(volatile AO_double_t *addr, AO_double_t value)
+ {
+ AO_double_t old_val;
+ int status;
+
+ do {
+ __asm__ __volatile__("//AO_double_store_release\n"
+ " ldxp %0, %1, %3\n"
+ " stlxp %w2, %4, %5, %3"
+ : "=&r" (old_val.AO_val1), "=&r" (old_val.AO_val2), "=&r" (status),
+ "=Q" (*addr)
+ : "r" (value.AO_val1), "r" (value.AO_val2));
+ } while (AO_EXPECT_FALSE(status));
+ }
+# define AO_HAVE_double_store_release
+
+ AO_INLINE int
+ AO_double_compare_and_swap(volatile AO_double_t *addr,
+ AO_double_t old_val, AO_double_t new_val)
+ {
+ AO_double_t tmp;
+ int result = 1;
+
+ do {
+ __asm__ __volatile__("//AO_double_compare_and_swap\n"
+ " ldxp %0, %1, %2\n"
+ : "=&r" (tmp.AO_val1), "=&r" (tmp.AO_val2)
+ : "Q" (*addr));
+ if (tmp.AO_val1 != old_val.AO_val1 || tmp.AO_val2 != old_val.AO_val2)
+ break;
+ __asm__ __volatile__(
+ " stxp %w0, %2, %3, %1\n"
+ : "=&r" (result), "=Q" (*addr)
+ : "r" (new_val.AO_val1), "r" (new_val.AO_val2));
+ } while (AO_EXPECT_FALSE(result));
+ return !result;
+ }
+# define AO_HAVE_double_compare_and_swap
+
+ AO_INLINE int
+ AO_double_compare_and_swap_acquire(volatile AO_double_t *addr,
+ AO_double_t old_val, AO_double_t new_val)
+ {
+ AO_double_t tmp;
+ int result = 1;
+
+ do {
+ __asm__ __volatile__("//AO_double_compare_and_swap_acquire\n"
+ " ldaxp %0, %1, %2\n"
+ : "=&r" (tmp.AO_val1), "=&r" (tmp.AO_val2)
+ : "Q" (*addr));
+ if (tmp.AO_val1 != old_val.AO_val1 || tmp.AO_val2 != old_val.AO_val2)
+ break;
+ __asm__ __volatile__(
+ " stxp %w0, %2, %3, %1\n"
+ : "=&r" (result), "=Q" (*addr)
+ : "r" (new_val.AO_val1), "r" (new_val.AO_val2));
+ } while (AO_EXPECT_FALSE(result));
+ return !result;
+ }
+# define AO_HAVE_double_compare_and_swap_acquire
+
+ AO_INLINE int
+ AO_double_compare_and_swap_release(volatile AO_double_t *addr,
+ AO_double_t old_val, AO_double_t new_val)
+ {
+ AO_double_t tmp;
+ int result = 1;
+
+ do {
+ __asm__ __volatile__("//AO_double_compare_and_swap_release\n"
+ " ldxp %0, %1, %2\n"
+ : "=&r" (tmp.AO_val1), "=&r" (tmp.AO_val2)
+ : "Q" (*addr));
+ if (tmp.AO_val1 != old_val.AO_val1 || tmp.AO_val2 != old_val.AO_val2)
+ break;
+ __asm__ __volatile__(
+ " stlxp %w0, %2, %3, %1\n"
+ : "=&r" (result), "=Q" (*addr)
+ : "r" (new_val.AO_val1), "r" (new_val.AO_val2));
+ } while (AO_EXPECT_FALSE(result));
+ return !result;
+ }
+# define AO_HAVE_double_compare_and_swap_release
+#endif
+
+#include "generic.h"
diff --git a/src/atomic_ops/sysdeps/gcc/arm.h b/src/atomic_ops/sysdeps/gcc/arm.h
index e3a6bf8..9b05216 100644
--- a/src/atomic_ops/sysdeps/gcc/arm.h
+++ b/src/atomic_ops/sysdeps/gcc/arm.h
@@ -590,5 +590,4 @@ AO_fetch_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val)
# define AO_HAVE_test_and_set_full
#endif /* !AO_HAVE_test_and_set[_full] && AO_ARM_HAVE_SWP */
-/* FIXME: 32-bit ABI is assumed. */
#define AO_T_IS_INT
diff --git a/src/atomic_ops/sysdeps/gcc/generic-arithm.h b/src/atomic_ops/sysdeps/gcc/generic-arithm.h
new file mode 100644
index 0000000..65e7767
--- /dev/null
+++ b/src/atomic_ops/sysdeps/gcc/generic-arithm.h
@@ -0,0 +1,704 @@
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned/**/char
+AO_char_fetch_and_add(volatile unsigned/**/char *addr, unsigned/**/char incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_char_fetch_and_add
+
+AO_INLINE void
+AO_char_and(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_char_and
+
+AO_INLINE void
+AO_char_or(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_char_or
+
+AO_INLINE void
+AO_char_xor(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_char_xor
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned/**/short
+AO_short_fetch_and_add(volatile unsigned/**/short *addr, unsigned/**/short incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_short_fetch_and_add
+
+AO_INLINE void
+AO_short_and(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_short_and
+
+AO_INLINE void
+AO_short_or(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_short_or
+
+AO_INLINE void
+AO_short_xor(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_short_xor
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned
+AO_int_fetch_and_add(volatile unsigned *addr, unsigned incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_int_fetch_and_add
+
+AO_INLINE void
+AO_int_and(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_int_and
+
+AO_INLINE void
+AO_int_or(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_int_or
+
+AO_INLINE void
+AO_int_xor(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_int_xor
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE AO_t
+AO_fetch_and_add(volatile AO_t *addr, AO_t incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_fetch_and_add
+
+AO_INLINE void
+AO_and(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_and
+
+AO_INLINE void
+AO_or(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_or
+
+AO_INLINE void
+AO_xor(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_xor
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned/**/char
+AO_char_fetch_and_add_acquire(volatile unsigned/**/char *addr, unsigned/**/char incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_char_fetch_and_add_acquire
+
+AO_INLINE void
+AO_char_and_acquire(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_char_and_acquire
+
+AO_INLINE void
+AO_char_or_acquire(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_char_or_acquire
+
+AO_INLINE void
+AO_char_xor_acquire(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_char_xor_acquire
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned/**/short
+AO_short_fetch_and_add_acquire(volatile unsigned/**/short *addr, unsigned/**/short incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_short_fetch_and_add_acquire
+
+AO_INLINE void
+AO_short_and_acquire(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_short_and_acquire
+
+AO_INLINE void
+AO_short_or_acquire(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_short_or_acquire
+
+AO_INLINE void
+AO_short_xor_acquire(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_short_xor_acquire
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned
+AO_int_fetch_and_add_acquire(volatile unsigned *addr, unsigned incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_int_fetch_and_add_acquire
+
+AO_INLINE void
+AO_int_and_acquire(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_int_and_acquire
+
+AO_INLINE void
+AO_int_or_acquire(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_int_or_acquire
+
+AO_INLINE void
+AO_int_xor_acquire(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_int_xor_acquire
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE AO_t
+AO_fetch_and_add_acquire(volatile AO_t *addr, AO_t incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_fetch_and_add_acquire
+
+AO_INLINE void
+AO_and_acquire(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_and_acquire
+
+AO_INLINE void
+AO_or_acquire(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_or_acquire
+
+AO_INLINE void
+AO_xor_acquire(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_xor_acquire
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned/**/char
+AO_char_fetch_and_add_release(volatile unsigned/**/char *addr, unsigned/**/char incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_char_fetch_and_add_release
+
+AO_INLINE void
+AO_char_and_release(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_char_and_release
+
+AO_INLINE void
+AO_char_or_release(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_char_or_release
+
+AO_INLINE void
+AO_char_xor_release(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_char_xor_release
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned/**/short
+AO_short_fetch_and_add_release(volatile unsigned/**/short *addr, unsigned/**/short incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_short_fetch_and_add_release
+
+AO_INLINE void
+AO_short_and_release(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_short_and_release
+
+AO_INLINE void
+AO_short_or_release(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_short_or_release
+
+AO_INLINE void
+AO_short_xor_release(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_short_xor_release
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned
+AO_int_fetch_and_add_release(volatile unsigned *addr, unsigned incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_int_fetch_and_add_release
+
+AO_INLINE void
+AO_int_and_release(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_int_and_release
+
+AO_INLINE void
+AO_int_or_release(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_int_or_release
+
+AO_INLINE void
+AO_int_xor_release(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_int_xor_release
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE AO_t
+AO_fetch_and_add_release(volatile AO_t *addr, AO_t incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_fetch_and_add_release
+
+AO_INLINE void
+AO_and_release(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_and_release
+
+AO_INLINE void
+AO_or_release(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_or_release
+
+AO_INLINE void
+AO_xor_release(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_xor_release
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned/**/char
+AO_char_fetch_and_add_full(volatile unsigned/**/char *addr, unsigned/**/char incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_char_fetch_and_add_full
+
+AO_INLINE void
+AO_char_and_full(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_char_and_full
+
+AO_INLINE void
+AO_char_or_full(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_char_or_full
+
+AO_INLINE void
+AO_char_xor_full(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_char_xor_full
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned/**/short
+AO_short_fetch_and_add_full(volatile unsigned/**/short *addr, unsigned/**/short incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_short_fetch_and_add_full
+
+AO_INLINE void
+AO_short_and_full(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_short_and_full
+
+AO_INLINE void
+AO_short_or_full(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_short_or_full
+
+AO_INLINE void
+AO_short_xor_full(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_short_xor_full
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned
+AO_int_fetch_and_add_full(volatile unsigned *addr, unsigned incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_int_fetch_and_add_full
+
+AO_INLINE void
+AO_int_and_full(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_int_and_full
+
+AO_INLINE void
+AO_int_or_full(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_int_or_full
+
+AO_INLINE void
+AO_int_xor_full(volatile unsigned *addr, unsigned value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_int_xor_full
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE AO_t
+AO_fetch_and_add_full(volatile AO_t *addr, AO_t incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_fetch_and_add_full
+
+AO_INLINE void
+AO_and_full(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_and_full
+
+AO_INLINE void
+AO_or_full(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_or_full
+
+AO_INLINE void
+AO_xor_full(volatile AO_t *addr, AO_t value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_SEQ_CST);
+}
+#define AO_HAVE_xor_full
diff --git a/src/atomic_ops/sysdeps/gcc/generic-arithm.template b/src/atomic_ops/sysdeps/gcc/generic-arithm.template
new file mode 100644
index 0000000..2067d55
--- /dev/null
+++ b/src/atomic_ops/sysdeps/gcc/generic-arithm.template
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE XCTYPE
+AO_XSIZE_fetch_and_add_XBAR(volatile XCTYPE *addr, XCTYPE incr)
+{
+ return __atomic_fetch_add(addr, incr, __ATOMIC_XGCCBAR);
+}
+#define AO_HAVE_XSIZE_fetch_and_add_XBAR
+
+AO_INLINE void
+AO_XSIZE_and_XBAR(volatile XCTYPE *addr, XCTYPE value)
+{
+ (void)__atomic_and_fetch(addr, value, __ATOMIC_XGCCBAR);
+}
+#define AO_HAVE_XSIZE_and_XBAR
+
+AO_INLINE void
+AO_XSIZE_or_XBAR(volatile XCTYPE *addr, XCTYPE value)
+{
+ (void)__atomic_or_fetch(addr, value, __ATOMIC_XGCCBAR);
+}
+#define AO_HAVE_XSIZE_or_XBAR
+
+AO_INLINE void
+AO_XSIZE_xor_XBAR(volatile XCTYPE *addr, XCTYPE value)
+{
+ (void)__atomic_xor_fetch(addr, value, __ATOMIC_XGCCBAR);
+}
+#define AO_HAVE_XSIZE_xor_XBAR
diff --git a/src/atomic_ops/sysdeps/gcc/generic-small.h b/src/atomic_ops/sysdeps/gcc/generic-small.h
new file mode 100644
index 0000000..72f4a5b
--- /dev/null
+++ b/src/atomic_ops/sysdeps/gcc/generic-small.h
@@ -0,0 +1,280 @@
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned/**/char
+AO_char_load(const volatile unsigned/**/char *addr)
+{
+ return __atomic_load_n(addr, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_char_load
+
+AO_INLINE unsigned/**/char
+AO_char_load_acquire(const volatile unsigned/**/char *addr)
+{
+ return __atomic_load_n(addr, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_char_load_acquire
+
+/* char_load_full is generalized using load and nop_full, so that */
+/* char_load_read is defined using load and nop_read. */
+/* char_store_full definition is omitted similar to load_full reason. */
+
+AO_INLINE void
+AO_char_store(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ __atomic_store_n(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_char_store
+
+AO_INLINE void
+AO_char_store_release(volatile unsigned/**/char *addr, unsigned/**/char value)
+{
+ __atomic_store_n(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_char_store_release
+
+AO_INLINE unsigned/**/char
+AO_char_fetch_compare_and_swap(volatile unsigned/**/char *addr,
+ unsigned/**/char old_val, unsigned/**/char new_val)
+{
+ return __sync_val_compare_and_swap(addr, old_val, new_val
+ /* empty protection list */);
+}
+#define AO_HAVE_char_fetch_compare_and_swap
+
+/* TODO: Add CAS _acquire/release/full primitives. */
+
+#ifndef AO_GENERALIZE_ASM_BOOL_CAS
+ AO_INLINE int
+ AO_char_compare_and_swap(volatile unsigned/**/char *addr,
+ unsigned/**/char old_val, unsigned/**/char new_val)
+ {
+ return __sync_bool_compare_and_swap(addr, old_val, new_val
+ /* empty protection list */);
+ }
+# define AO_HAVE_char_compare_and_swap
+#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned/**/short
+AO_short_load(const volatile unsigned/**/short *addr)
+{
+ return __atomic_load_n(addr, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_short_load
+
+AO_INLINE unsigned/**/short
+AO_short_load_acquire(const volatile unsigned/**/short *addr)
+{
+ return __atomic_load_n(addr, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_short_load_acquire
+
+/* short_load_full is generalized using load and nop_full, so that */
+/* short_load_read is defined using load and nop_read. */
+/* short_store_full definition is omitted similar to load_full reason. */
+
+AO_INLINE void
+AO_short_store(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ __atomic_store_n(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_short_store
+
+AO_INLINE void
+AO_short_store_release(volatile unsigned/**/short *addr, unsigned/**/short value)
+{
+ __atomic_store_n(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_short_store_release
+
+AO_INLINE unsigned/**/short
+AO_short_fetch_compare_and_swap(volatile unsigned/**/short *addr,
+ unsigned/**/short old_val, unsigned/**/short new_val)
+{
+ return __sync_val_compare_and_swap(addr, old_val, new_val
+ /* empty protection list */);
+}
+#define AO_HAVE_short_fetch_compare_and_swap
+
+/* TODO: Add CAS _acquire/release/full primitives. */
+
+#ifndef AO_GENERALIZE_ASM_BOOL_CAS
+ AO_INLINE int
+ AO_short_compare_and_swap(volatile unsigned/**/short *addr,
+ unsigned/**/short old_val, unsigned/**/short new_val)
+ {
+ return __sync_bool_compare_and_swap(addr, old_val, new_val
+ /* empty protection list */);
+ }
+# define AO_HAVE_short_compare_and_swap
+#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE unsigned
+AO_int_load(const volatile unsigned *addr)
+{
+ return __atomic_load_n(addr, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_int_load
+
+AO_INLINE unsigned
+AO_int_load_acquire(const volatile unsigned *addr)
+{
+ return __atomic_load_n(addr, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_int_load_acquire
+
+/* int_load_full is generalized using load and nop_full, so that */
+/* int_load_read is defined using load and nop_read. */
+/* int_store_full definition is omitted similar to load_full reason. */
+
+AO_INLINE void
+AO_int_store(volatile unsigned *addr, unsigned value)
+{
+ __atomic_store_n(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_int_store
+
+AO_INLINE void
+AO_int_store_release(volatile unsigned *addr, unsigned value)
+{
+ __atomic_store_n(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_int_store_release
+
+AO_INLINE unsigned
+AO_int_fetch_compare_and_swap(volatile unsigned *addr,
+ unsigned old_val, unsigned new_val)
+{
+ return __sync_val_compare_and_swap(addr, old_val, new_val
+ /* empty protection list */);
+}
+#define AO_HAVE_int_fetch_compare_and_swap
+
+/* TODO: Add CAS _acquire/release/full primitives. */
+
+#ifndef AO_GENERALIZE_ASM_BOOL_CAS
+ AO_INLINE int
+ AO_int_compare_and_swap(volatile unsigned *addr,
+ unsigned old_val, unsigned new_val)
+ {
+ return __sync_bool_compare_and_swap(addr, old_val, new_val
+ /* empty protection list */);
+ }
+# define AO_HAVE_int_compare_and_swap
+#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE AO_t
+AO_load(const volatile AO_t *addr)
+{
+ return __atomic_load_n(addr, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_load
+
+AO_INLINE AO_t
+AO_load_acquire(const volatile AO_t *addr)
+{
+ return __atomic_load_n(addr, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_load_acquire
+
+/* load_full is generalized using load and nop_full, so that */
+/* load_read is defined using load and nop_read. */
+/* store_full definition is omitted similar to load_full reason. */
+
+AO_INLINE void
+AO_store(volatile AO_t *addr, AO_t value)
+{
+ __atomic_store_n(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_store
+
+AO_INLINE void
+AO_store_release(volatile AO_t *addr, AO_t value)
+{
+ __atomic_store_n(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_store_release
+
+AO_INLINE AO_t
+AO_fetch_compare_and_swap(volatile AO_t *addr,
+ AO_t old_val, AO_t new_val)
+{
+ return __sync_val_compare_and_swap(addr, old_val, new_val
+ /* empty protection list */);
+}
+#define AO_HAVE_fetch_compare_and_swap
+
+/* TODO: Add CAS _acquire/release/full primitives. */
+
+#ifndef AO_GENERALIZE_ASM_BOOL_CAS
+ AO_INLINE int
+ AO_compare_and_swap(volatile AO_t *addr,
+ AO_t old_val, AO_t new_val)
+ {
+ return __sync_bool_compare_and_swap(addr, old_val, new_val
+ /* empty protection list */);
+ }
+# define AO_HAVE_compare_and_swap
+#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
diff --git a/src/atomic_ops/sysdeps/gcc/generic-small.template b/src/atomic_ops/sysdeps/gcc/generic-small.template
new file mode 100644
index 0000000..9685acf
--- /dev/null
+++ b/src/atomic_ops/sysdeps/gcc/generic-small.template
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+AO_INLINE XCTYPE
+AO_XSIZE_load(const volatile XCTYPE *addr)
+{
+ return __atomic_load_n(addr, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_XSIZE_load
+
+AO_INLINE XCTYPE
+AO_XSIZE_load_acquire(const volatile XCTYPE *addr)
+{
+ return __atomic_load_n(addr, __ATOMIC_ACQUIRE);
+}
+#define AO_HAVE_XSIZE_load_acquire
+
+/* XSIZE_load_full is generalized using load and nop_full, so that */
+/* XSIZE_load_read is defined using load and nop_read. */
+/* XSIZE_store_full definition is omitted similar to load_full reason. */
+
+AO_INLINE void
+AO_XSIZE_store(volatile XCTYPE *addr, XCTYPE value)
+{
+ __atomic_store_n(addr, value, __ATOMIC_RELAXED);
+}
+#define AO_HAVE_XSIZE_store
+
+AO_INLINE void
+AO_XSIZE_store_release(volatile XCTYPE *addr, XCTYPE value)
+{
+ __atomic_store_n(addr, value, __ATOMIC_RELEASE);
+}
+#define AO_HAVE_XSIZE_store_release
+
+AO_INLINE XCTYPE
+AO_XSIZE_fetch_compare_and_swap(volatile XCTYPE *addr,
+ XCTYPE old_val, XCTYPE new_val)
+{
+ return __sync_val_compare_and_swap(addr, old_val, new_val
+ /* empty protection list */);
+}
+#define AO_HAVE_XSIZE_fetch_compare_and_swap
+
+/* TODO: Add CAS _acquire/release/full primitives. */
+
+#ifndef AO_GENERALIZE_ASM_BOOL_CAS
+ AO_INLINE int
+ AO_XSIZE_compare_and_swap(volatile XCTYPE *addr,
+ XCTYPE old_val, XCTYPE new_val)
+ {
+ return __sync_bool_compare_and_swap(addr, old_val, new_val
+ /* empty protection list */);
+ }
+# define AO_HAVE_XSIZE_compare_and_swap
+#endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
diff --git a/src/atomic_ops/sysdeps/gcc/generic.h b/src/atomic_ops/sysdeps/gcc/generic.h
new file mode 100644
index 0000000..de79edb
--- /dev/null
+++ b/src/atomic_ops/sysdeps/gcc/generic.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 2003-2011 Hewlett-Packard Development Company, L.P.
+ *
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ */
+
+/* The following implementation assumes GCC 4.7 or later. */
+/* For the details, see GNU Manual, chapter 6.52 (Built-in functions */
+/* for memory model aware atomic operations). */
+
+/* TODO: Include this file for other targets if gcc 4.7+ */
+
+#ifdef AO_UNIPROCESSOR
+ /* If only a single processor (core) is used, AO_UNIPROCESSOR could */
+ /* be defined by the client to avoid unnecessary memory barrier. */
+ AO_INLINE void
+ AO_nop_full(void)
+ {
+ AO_compiler_barrier();
+ }
+# define AO_HAVE_nop_full
+
+#else
+ AO_INLINE void
+ AO_nop_read(void)
+ {
+ __atomic_thread_fence(__ATOMIC_ACQUIRE);
+ }
+# define AO_HAVE_nop_read
+
+# ifndef AO_HAVE_nop_write
+ AO_INLINE void
+ AO_nop_write(void)
+ {
+ __atomic_thread_fence(__ATOMIC_RELEASE);
+ }
+# define AO_HAVE_nop_write
+# endif
+
+ AO_INLINE void
+ AO_nop_full(void)
+ {
+ /* __sync_synchronize() could be used instead. */
+ __atomic_thread_fence(__ATOMIC_SEQ_CST);
+ }
+# define AO_HAVE_nop_full
+#endif /* !AO_UNIPROCESSOR */
+
+#include "generic-small.h"
+
+#ifndef AO_PREFER_GENERALIZED
+# include "generic-arithm.h"
+
+ AO_INLINE AO_TS_VAL_t
+ AO_test_and_set(volatile AO_TS_t *addr)
+ {
+ return (AO_TS_VAL_t)__atomic_test_and_set(addr, __ATOMIC_RELAXED);
+ }
+# define AO_HAVE_test_and_set
+
+ AO_INLINE AO_TS_VAL_t
+ AO_test_and_set_acquire(volatile AO_TS_t *addr)
+ {
+ return (AO_TS_VAL_t)__atomic_test_and_set(addr, __ATOMIC_ACQUIRE);
+ }
+# define AO_HAVE_test_and_set_acquire
+
+ AO_INLINE AO_TS_VAL_t
+ AO_test_and_set_release(volatile AO_TS_t *addr)
+ {
+ return (AO_TS_VAL_t)__atomic_test_and_set(addr, __ATOMIC_RELEASE);
+ }
+# define AO_HAVE_test_and_set_release
+
+ AO_INLINE AO_TS_VAL_t
+ AO_test_and_set_full(volatile AO_TS_t *addr)
+ {
+ return (AO_TS_VAL_t)__atomic_test_and_set(addr, __ATOMIC_SEQ_CST);
+ }
+# define AO_HAVE_test_and_set_full
+#endif /* !AO_PREFER_GENERALIZED */
+
+#ifdef AO_HAVE_DOUBLE_PTR_STORAGE
+
+# ifndef AO_HAVE_double_load
+ AO_INLINE AO_double_t
+ AO_double_load(const volatile AO_double_t *addr)
+ {
+ AO_double_t result;
+
+ result.AO_whole = __atomic_load_n(&addr->AO_whole, __ATOMIC_RELAXED);
+ return result;
+ }
+# define AO_HAVE_double_load
+# endif
+
+# ifndef AO_HAVE_double_load_acquire
+ AO_INLINE AO_double_t
+ AO_double_load_acquire(const volatile AO_double_t *addr)
+ {
+ AO_double_t result;
+
+ result.AO_whole = __atomic_load_n(&addr->AO_whole, __ATOMIC_ACQUIRE);
+ return result;
+ }
+# define AO_HAVE_double_load_acquire
+# endif
+
+# ifndef AO_HAVE_double_store
+ AO_INLINE void
+ AO_double_store(volatile AO_double_t *addr, AO_double_t value)
+ {
+ __atomic_store_n(&addr->AO_whole, value.AO_whole, __ATOMIC_RELAXED);
+ }
+# define AO_HAVE_double_store
+# endif
+
+# ifndef AO_HAVE_double_store_release
+ AO_INLINE void
+ AO_double_store_release(volatile AO_double_t *addr, AO_double_t value)
+ {
+ __atomic_store_n(&addr->AO_whole, value.AO_whole, __ATOMIC_RELEASE);
+ }
+# define AO_HAVE_double_store_release
+# endif
+
+# ifndef AO_HAVE_double_compare_and_swap
+ AO_INLINE int
+ AO_double_compare_and_swap(volatile AO_double_t *addr,
+ AO_double_t old_val, AO_double_t new_val)
+ {
+ return (int)__atomic_compare_exchange_n(&addr->AO_whole,
+ &old_val.AO_whole /* p_expected */,
+ new_val.AO_whole /* desired */,
+ 0 /* is_weak: false */,
+ __ATOMIC_RELAXED /* success */,
+ __ATOMIC_RELAXED /* failure */);
+ }
+# define AO_HAVE_double_compare_and_swap
+# endif
+
+ /* TODO: Add double CAS _acquire/release/full primitives. */
+#endif /* AO_HAVE_DOUBLE_PTR_STORAGE */
diff --git a/src/atomic_ops/sysdeps/gcc/x86.h b/src/atomic_ops/sysdeps/gcc/x86.h
index 60bdfb3..bed51d1 100644
--- a/src/atomic_ops/sysdeps/gcc/x86.h
+++ b/src/atomic_ops/sysdeps/gcc/x86.h
@@ -25,8 +25,8 @@
#include "../test_and_set_t_is_char.h"
-#if defined(__x86_64__) && !defined(AO_USE_PENTIUM4_INSTRS)
- /* "mfence" (SSE2) is supported on all x86_64/amd64 chips. */
+#if defined(__SSE2__) && !defined(AO_USE_PENTIUM4_INSTRS)
+ /* "mfence" is a part of SSE2 set (introduced on Intel Pentium 4). */
# define AO_USE_PENTIUM4_INSTRS
#endif
@@ -282,6 +282,9 @@ AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val,
}
# define AO_HAVE_int_fetch_and_add_full
+/* TODO: Implement double_load/store. */
+/* TODO: Test some gcc macro to detect presence of cmpxchg16b. */
+
# ifdef AO_CMPXCHG16B_AVAILABLE
# include "../standard_ao_double_t.h"
diff --git a/src/atomic_ops/sysdeps/standard_ao_double_t.h b/src/atomic_ops/sysdeps/standard_ao_double_t.h
index 1ed1c3a..bf76979 100644
--- a/src/atomic_ops/sysdeps/standard_ao_double_t.h
+++ b/src/atomic_ops/sysdeps/standard_ao_double_t.h
@@ -32,6 +32,8 @@
typedef __m128 double_ptr_storage;
#elif defined(_WIN32) && !defined(__GNUC__)
typedef unsigned __int64 double_ptr_storage;
+#elif defined(__aarch64__)
+ typedef unsigned __int128 double_ptr_storage;
#else
typedef unsigned long long double_ptr_storage;
#endif