@@ -41,44 +41,8 @@
#include <atomic-machine.h>
-/* Wrapper macros to call pre_NN_post (mem, ...) where NN is the
- bit width of *MEM. The calling macro puts parens around MEM
- and following args. */
-#define __atomic_val_bysize(pre, post, mem, ...) \
- ({ \
- __typeof ((__typeof (*(mem))) *(mem)) __atg1_result; \
- if (sizeof (*mem) == 1) \
- __atg1_result = pre##_8_##post (mem, __VA_ARGS__); \
- else if (sizeof (*mem) == 2) \
- __atg1_result = pre##_16_##post (mem, __VA_ARGS__); \
- else if (sizeof (*mem) == 4) \
- __atg1_result = pre##_32_##post (mem, __VA_ARGS__); \
- else if (sizeof (*mem) == 8) \
- __atg1_result = pre##_64_##post (mem, __VA_ARGS__); \
- else \
- abort (); \
- __atg1_result; \
- })
-#define __atomic_bool_bysize(pre, post, mem, ...) \
- ({ \
- int __atg2_result; \
- if (sizeof (*mem) == 1) \
- __atg2_result = pre##_8_##post (mem, __VA_ARGS__); \
- else if (sizeof (*mem) == 2) \
- __atg2_result = pre##_16_##post (mem, __VA_ARGS__); \
- else if (sizeof (*mem) == 4) \
- __atg2_result = pre##_32_##post (mem, __VA_ARGS__); \
- else if (sizeof (*mem) == 8) \
- __atg2_result = pre##_64_##post (mem, __VA_ARGS__); \
- else \
- abort (); \
- __atg2_result; \
- })
-
-
/* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL.
Return the old *MEM value. */
-#undef atomic_compare_and_exchange_val_acq
# define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
({ \
__typeof (*(mem)) __atg3_old = (oldval); \
@@ -86,7 +50,6 @@
__atg3_old; \
})
-#undef atomic_compare_and_exchange_val_rel
#define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \
({ \
__typeof (*(mem)) __atg3_old = (oldval); \
@@ -96,7 +59,6 @@
/* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL.
Return zero if *MEM was changed or non-zero if no exchange happened. */
-#undef atomic_compare_and_exchange_bool_acq
#define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
({ \
__typeof (*(mem)) __atg3_old = (oldval); \
@@ -143,7 +105,7 @@
#ifndef atomic_full_barrier
-# define atomic_full_barrier() __asm ("" ::: "memory")
+# define atomic_full_barrier() __sync_synchronize()
#endif
@@ -20,90 +20,6 @@
#define _AARCH64_ATOMIC_MACHINE_H 1
#define __HAVE_64B_ATOMICS 1
-#define USE_ATOMIC_COMPILER_BUILTINS 1
#define ATOMIC_EXCHANGE_USES_CAS 0
-/* Compare and exchange.
- For all "bool" routines, we return FALSE if exchange succesful. */
-
-# define __arch_compare_and_exchange_bool_8_int(mem, newval, oldval, model) \
- ({ \
- typeof (*mem) __oldval = (oldval); \
- !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
- model, __ATOMIC_RELAXED); \
- })
-
-# define __arch_compare_and_exchange_bool_16_int(mem, newval, oldval, model) \
- ({ \
- typeof (*mem) __oldval = (oldval); \
- !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
- model, __ATOMIC_RELAXED); \
- })
-
-# define __arch_compare_and_exchange_bool_32_int(mem, newval, oldval, model) \
- ({ \
- typeof (*mem) __oldval = (oldval); \
- !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
- model, __ATOMIC_RELAXED); \
- })
-
-# define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \
- ({ \
- typeof (*mem) __oldval = (oldval); \
- !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
- model, __ATOMIC_RELAXED); \
- })
-
-# define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \
- ({ \
- typeof (*mem) __oldval = (oldval); \
- __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
- model, __ATOMIC_RELAXED); \
- __oldval; \
- })
-
-# define __arch_compare_and_exchange_val_16_int(mem, newval, oldval, model) \
- ({ \
- typeof (*mem) __oldval = (oldval); \
- __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
- model, __ATOMIC_RELAXED); \
- __oldval; \
- })
-
-# define __arch_compare_and_exchange_val_32_int(mem, newval, oldval, model) \
- ({ \
- typeof (*mem) __oldval = (oldval); \
- __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
- model, __ATOMIC_RELAXED); \
- __oldval; \
- })
-
-# define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \
- ({ \
- typeof (*mem) __oldval = (oldval); \
- __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
- model, __ATOMIC_RELAXED); \
- __oldval; \
- })
-
-
-/* Compare and exchange with "acquire" semantics, ie barrier after. */
-
-# define atomic_compare_and_exchange_bool_acq(mem, new, old) \
- __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \
- mem, new, old, __ATOMIC_ACQUIRE)
-
-# define atomic_compare_and_exchange_val_acq(mem, new, old) \
- __atomic_val_bysize (__arch_compare_and_exchange_val, int, \
- mem, new, old, __ATOMIC_ACQUIRE)
-
-/* Compare and exchange with "release" semantics, ie barrier before. */
-
-# define atomic_compare_and_exchange_val_rel(mem, new, old) \
- __atomic_val_bysize (__arch_compare_and_exchange_val, int, \
- mem, new, old, __ATOMIC_RELEASE)
-
-/* Barrier macro. */
-#define atomic_full_barrier() __sync_synchronize()
-
#endif
@@ -18,313 +18,10 @@
#include <stdint.h>
#define __HAVE_64B_ATOMICS 1
-#define USE_ATOMIC_COMPILER_BUILTINS 0
/* XXX Is this actually correct? */
#define ATOMIC_EXCHANGE_USES_CAS 1
-
-#define __MB " mb\n"
-
-
-/* Compare and exchange. For all of the "xxx" routines, we expect a
- "__prev" and a "__cmp" variable to be provided by the enclosing scope,
- in which values are returned. */
-
-#define __arch_compare_and_exchange_xxx_8_int(mem, new, old, mb1, mb2) \
-({ \
- unsigned long __tmp, __snew, __addr64; \
- __asm__ __volatile__ ( \
- mb1 \
- " andnot %[__addr8],7,%[__addr64]\n" \
- " insbl %[__new],%[__addr8],%[__snew]\n" \
- "1: ldq_l %[__tmp],0(%[__addr64])\n" \
- " extbl %[__tmp],%[__addr8],%[__prev]\n" \
- " cmpeq %[__prev],%[__old],%[__cmp]\n" \
- " beq %[__cmp],2f\n" \
- " mskbl %[__tmp],%[__addr8],%[__tmp]\n" \
- " or %[__snew],%[__tmp],%[__tmp]\n" \
- " stq_c %[__tmp],0(%[__addr64])\n" \
- " beq %[__tmp],1b\n" \
- mb2 \
- "2:" \
- : [__prev] "=&r" (__prev), \
- [__snew] "=&r" (__snew), \
- [__tmp] "=&r" (__tmp), \
- [__cmp] "=&r" (__cmp), \
- [__addr64] "=&r" (__addr64) \
- : [__addr8] "r" (mem), \
- [__old] "Ir" ((uint64_t)(uint8_t)(uint64_t)(old)), \
- [__new] "r" (new) \
- : "memory"); \
-})
-
-#define __arch_compare_and_exchange_xxx_16_int(mem, new, old, mb1, mb2) \
-({ \
- unsigned long __tmp, __snew, __addr64; \
- __asm__ __volatile__ ( \
- mb1 \
- " andnot %[__addr16],7,%[__addr64]\n" \
- " inswl %[__new],%[__addr16],%[__snew]\n" \
- "1: ldq_l %[__tmp],0(%[__addr64])\n" \
- " extwl %[__tmp],%[__addr16],%[__prev]\n" \
- " cmpeq %[__prev],%[__old],%[__cmp]\n" \
- " beq %[__cmp],2f\n" \
- " mskwl %[__tmp],%[__addr16],%[__tmp]\n" \
- " or %[__snew],%[__tmp],%[__tmp]\n" \
- " stq_c %[__tmp],0(%[__addr64])\n" \
- " beq %[__tmp],1b\n" \
- mb2 \
- "2:" \
- : [__prev] "=&r" (__prev), \
- [__snew] "=&r" (__snew), \
- [__tmp] "=&r" (__tmp), \
- [__cmp] "=&r" (__cmp), \
- [__addr64] "=&r" (__addr64) \
- : [__addr16] "r" (mem), \
- [__old] "Ir" ((uint64_t)(uint16_t)(uint64_t)(old)), \
- [__new] "r" (new) \
- : "memory"); \
-})
-
-#define __arch_compare_and_exchange_xxx_32_int(mem, new, old, mb1, mb2) \
-({ \
- __asm__ __volatile__ ( \
- mb1 \
- "1: ldl_l %[__prev],%[__mem]\n" \
- " cmpeq %[__prev],%[__old],%[__cmp]\n" \
- " beq %[__cmp],2f\n" \
- " mov %[__new],%[__cmp]\n" \
- " stl_c %[__cmp],%[__mem]\n" \
- " beq %[__cmp],1b\n" \
- mb2 \
- "2:" \
- : [__prev] "=&r" (__prev), \
- [__cmp] "=&r" (__cmp) \
- : [__mem] "m" (*(mem)), \
- [__old] "Ir" ((uint64_t)(int32_t)(uint64_t)(old)), \
- [__new] "Ir" (new) \
- : "memory"); \
-})
-
-#define __arch_compare_and_exchange_xxx_64_int(mem, new, old, mb1, mb2) \
-({ \
- __asm__ __volatile__ ( \
- mb1 \
- "1: ldq_l %[__prev],%[__mem]\n" \
- " cmpeq %[__prev],%[__old],%[__cmp]\n" \
- " beq %[__cmp],2f\n" \
- " mov %[__new],%[__cmp]\n" \
- " stq_c %[__cmp],%[__mem]\n" \
- " beq %[__cmp],1b\n" \
- mb2 \
- "2:" \
- : [__prev] "=&r" (__prev), \
- [__cmp] "=&r" (__cmp) \
- : [__mem] "m" (*(mem)), \
- [__old] "Ir" ((uint64_t)(old)), \
- [__new] "Ir" (new) \
- : "memory"); \
-})
-
-/* For all "bool" routines, we return FALSE if exchange succesful. */
-
-#define __arch_compare_and_exchange_bool_8_int(mem, new, old, mb1, mb2) \
-({ unsigned long __prev; int __cmp; \
- __arch_compare_and_exchange_xxx_8_int(mem, new, old, mb1, mb2); \
- !__cmp; })
-
-#define __arch_compare_and_exchange_bool_16_int(mem, new, old, mb1, mb2) \
-({ unsigned long __prev; int __cmp; \
- __arch_compare_and_exchange_xxx_16_int(mem, new, old, mb1, mb2); \
- !__cmp; })
-
-#define __arch_compare_and_exchange_bool_32_int(mem, new, old, mb1, mb2) \
-({ unsigned long __prev; int __cmp; \
- __arch_compare_and_exchange_xxx_32_int(mem, new, old, mb1, mb2); \
- !__cmp; })
-
-#define __arch_compare_and_exchange_bool_64_int(mem, new, old, mb1, mb2) \
-({ unsigned long __prev; int __cmp; \
- __arch_compare_and_exchange_xxx_64_int(mem, new, old, mb1, mb2); \
- !__cmp; })
-
-/* For all "val" routines, return the old value whether exchange
- successful or not. */
-
-#define __arch_compare_and_exchange_val_8_int(mem, new, old, mb1, mb2) \
-({ unsigned long __prev; int __cmp; \
- __arch_compare_and_exchange_xxx_8_int(mem, new, old, mb1, mb2); \
- (typeof (*mem))__prev; })
-
-#define __arch_compare_and_exchange_val_16_int(mem, new, old, mb1, mb2) \
-({ unsigned long __prev; int __cmp; \
- __arch_compare_and_exchange_xxx_16_int(mem, new, old, mb1, mb2); \
- (typeof (*mem))__prev; })
-
-#define __arch_compare_and_exchange_val_32_int(mem, new, old, mb1, mb2) \
-({ unsigned long __prev; int __cmp; \
- __arch_compare_and_exchange_xxx_32_int(mem, new, old, mb1, mb2); \
- (typeof (*mem))__prev; })
-
-#define __arch_compare_and_exchange_val_64_int(mem, new, old, mb1, mb2) \
-({ unsigned long __prev; int __cmp; \
- __arch_compare_and_exchange_xxx_64_int(mem, new, old, mb1, mb2); \
- (typeof (*mem))__prev; })
-
-/* Compare and exchange with "acquire" semantics, ie barrier after. */
-
-#define atomic_compare_and_exchange_bool_acq(mem, new, old) \
- __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \
- mem, new, old, "", __MB)
-
-#define atomic_compare_and_exchange_val_acq(mem, new, old) \
- __atomic_val_bysize (__arch_compare_and_exchange_val, int, \
- mem, new, old, "", __MB)
-
-/* Compare and exchange with "release" semantics, ie barrier before. */
-
-#define atomic_compare_and_exchange_val_rel(mem, new, old) \
- __atomic_val_bysize (__arch_compare_and_exchange_val, int, \
- mem, new, old, __MB, "")
-
-
-/* Atomically store value and return the previous value. */
-
-#define __arch_exchange_8_int(mem, value, mb1, mb2) \
-({ \
- unsigned long __tmp, __addr64, __sval; __typeof(*mem) __ret; \
- __asm__ __volatile__ ( \
- mb1 \
- " andnot %[__addr8],7,%[__addr64]\n" \
- " insbl %[__value],%[__addr8],%[__sval]\n" \
- "1: ldq_l %[__tmp],0(%[__addr64])\n" \
- " extbl %[__tmp],%[__addr8],%[__ret]\n" \
- " mskbl %[__tmp],%[__addr8],%[__tmp]\n" \
- " or %[__sval],%[__tmp],%[__tmp]\n" \
- " stq_c %[__tmp],0(%[__addr64])\n" \
- " beq %[__tmp],1b\n" \
- mb2 \
- : [__ret] "=&r" (__ret), \
- [__sval] "=&r" (__sval), \
- [__tmp] "=&r" (__tmp), \
- [__addr64] "=&r" (__addr64) \
- : [__addr8] "r" (mem), \
- [__value] "r" (value) \
- : "memory"); \
- __ret; })
-
-#define __arch_exchange_16_int(mem, value, mb1, mb2) \
-({ \
- unsigned long __tmp, __addr64, __sval; __typeof(*mem) __ret; \
- __asm__ __volatile__ ( \
- mb1 \
- " andnot %[__addr16],7,%[__addr64]\n" \
- " inswl %[__value],%[__addr16],%[__sval]\n" \
- "1: ldq_l %[__tmp],0(%[__addr64])\n" \
- " extwl %[__tmp],%[__addr16],%[__ret]\n" \
- " mskwl %[__tmp],%[__addr16],%[__tmp]\n" \
- " or %[__sval],%[__tmp],%[__tmp]\n" \
- " stq_c %[__tmp],0(%[__addr64])\n" \
- " beq %[__tmp],1b\n" \
- mb2 \
- : [__ret] "=&r" (__ret), \
- [__sval] "=&r" (__sval), \
- [__tmp] "=&r" (__tmp), \
- [__addr64] "=&r" (__addr64) \
- : [__addr16] "r" (mem), \
- [__value] "r" (value) \
- : "memory"); \
- __ret; })
-
-#define __arch_exchange_32_int(mem, value, mb1, mb2) \
-({ \
- signed int __tmp; __typeof(*mem) __ret; \
- __asm__ __volatile__ ( \
- mb1 \
- "1: ldl_l %[__ret],%[__mem]\n" \
- " mov %[__val],%[__tmp]\n" \
- " stl_c %[__tmp],%[__mem]\n" \
- " beq %[__tmp],1b\n" \
- mb2 \
- : [__ret] "=&r" (__ret), \
- [__tmp] "=&r" (__tmp) \
- : [__mem] "m" (*(mem)), \
- [__val] "Ir" (value) \
- : "memory"); \
- __ret; })
-
-#define __arch_exchange_64_int(mem, value, mb1, mb2) \
-({ \
- unsigned long __tmp; __typeof(*mem) __ret; \
- __asm__ __volatile__ ( \
- mb1 \
- "1: ldq_l %[__ret],%[__mem]\n" \
- " mov %[__val],%[__tmp]\n" \
- " stq_c %[__tmp],%[__mem]\n" \
- " beq %[__tmp],1b\n" \
- mb2 \
- : [__ret] "=&r" (__ret), \
- [__tmp] "=&r" (__tmp) \
- : [__mem] "m" (*(mem)), \
- [__val] "Ir" (value) \
- : "memory"); \
- __ret; })
-
-#define atomic_exchange_acq(mem, value) \
- __atomic_val_bysize (__arch_exchange, int, mem, value, "", __MB)
-
-#define atomic_exchange_rel(mem, value) \
- __atomic_val_bysize (__arch_exchange, int, mem, value, __MB, "")
-
-
-/* Atomically add value and return the previous (unincremented) value. */
-
-#define __arch_exchange_and_add_8_int(mem, value, mb1, mb2) \
- ({ __builtin_trap (); 0; })
-
-#define __arch_exchange_and_add_16_int(mem, value, mb1, mb2) \
- ({ __builtin_trap (); 0; })
-
-#define __arch_exchange_and_add_32_int(mem, value, mb1, mb2) \
-({ \
- signed int __tmp; __typeof(*mem) __ret; \
- __asm__ __volatile__ ( \
- mb1 \
- "1: ldl_l %[__ret],%[__mem]\n" \
- " addl %[__ret],%[__val],%[__tmp]\n" \
- " stl_c %[__tmp],%[__mem]\n" \
- " beq %[__tmp],1b\n" \
- mb2 \
- : [__ret] "=&r" (__ret), \
- [__tmp] "=&r" (__tmp) \
- : [__mem] "m" (*(mem)), \
- [__val] "Ir" ((signed int)(value)) \
- : "memory"); \
- __ret; })
-
-#define __arch_exchange_and_add_64_int(mem, value, mb1, mb2) \
-({ \
- unsigned long __tmp; __typeof(*mem) __ret; \
- __asm__ __volatile__ ( \
- mb1 \
- "1: ldq_l %[__ret],%[__mem]\n" \
- " addq %[__ret],%[__val],%[__tmp]\n" \
- " stq_c %[__tmp],%[__mem]\n" \
- " beq %[__tmp],1b\n" \
- mb2 \
- : [__ret] "=&r" (__ret), \
- [__tmp] "=&r" (__tmp) \
- : [__mem] "m" (*(mem)), \
- [__val] "Ir" ((unsigned long)(value)) \
- : "memory"); \
- __ret; })
-
-/* ??? Barrier semantics for atomic_exchange_and_add appear to be
- undefined. Use full barrier for now, as that's safe. */
-#define atomic_exchange_and_add(mem, value) \
- __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, __MB, __MB)
-
#define atomic_full_barrier() __asm ("mb" : : : "memory");
#define atomic_read_barrier() __asm ("mb" : : : "memory");
#define atomic_write_barrier() __asm ("wmb" : : : "memory");
@@ -20,38 +20,9 @@
#define _ARC_BITS_ATOMIC_H 1
#define __HAVE_64B_ATOMICS 0
-#define USE_ATOMIC_COMPILER_BUILTINS 1
/* ARC does have legacy atomic EX reg, [mem] instruction but the micro-arch
is not as optimal as LLOCK/SCOND specially for SMP. */
#define ATOMIC_EXCHANGE_USES_CAS 1
-#define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \
- (abort (), 0)
-#define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \
- (abort (), 0)
-#define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
- (abort (), 0)
-
-#define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \
- (abort (), (__typeof (*mem)) 0)
-#define __arch_compare_and_exchange_val_16_int(mem, newval, oldval, model) \
- (abort (), (__typeof (*mem)) 0)
-#define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \
- (abort (), (__typeof (*mem)) 0)
-
-#define __arch_compare_and_exchange_val_32_int(mem, newval, oldval, model) \
- ({ \
- typeof (*mem) __oldval = (oldval); \
- __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
- model, __ATOMIC_RELAXED); \
- __oldval; \
- })
-
-#define atomic_compare_and_exchange_val_acq(mem, new, old) \
- __atomic_val_bysize (__arch_compare_and_exchange_val, int, \
- mem, new, old, __ATOMIC_ACQUIRE)
-
-#define atomic_full_barrier() ({ asm volatile ("dmb 3":::"memory"); })
-
#endif /* _ARC_BITS_ATOMIC_H */
@@ -17,122 +17,4 @@
<https://www.gnu.org/licenses/>. */
#define __HAVE_64B_ATOMICS 0
-#define USE_ATOMIC_COMPILER_BUILTINS 0
#define ATOMIC_EXCHANGE_USES_CAS 1
-
-void __arm_link_error (void);
-
-#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
-# define atomic_full_barrier() __sync_synchronize ()
-#else
-# define atomic_full_barrier() __arm_assisted_full_barrier ()
-#endif
-
-/* An OS-specific atomic-machine.h file will define this macro if
- the OS can provide something. If not, we'll fail to build
- with a compiler that doesn't supply the operation. */
-#ifndef __arm_assisted_full_barrier
-# define __arm_assisted_full_barrier() __arm_link_error()
-#endif
-
-/* Use the atomic builtins provided by GCC in case the backend provides
- a pattern to do this efficiently. */
-#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
-
-# define atomic_exchange_acq(mem, value) \
- __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_ACQUIRE)
-
-# define atomic_exchange_rel(mem, value) \
- __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_RELEASE)
-
-/* Atomic exchange (without compare). */
-
-# define __arch_exchange_8_int(mem, newval, model) \
- (__arm_link_error (), (typeof (*mem)) 0)
-
-# define __arch_exchange_16_int(mem, newval, model) \
- (__arm_link_error (), (typeof (*mem)) 0)
-
-# define __arch_exchange_32_int(mem, newval, model) \
- __atomic_exchange_n (mem, newval, model)
-
-# define __arch_exchange_64_int(mem, newval, model) \
- (__arm_link_error (), (typeof (*mem)) 0)
-
-/* Compare and exchange with "acquire" semantics, ie barrier after. */
-
-# define atomic_compare_and_exchange_bool_acq(mem, new, old) \
- __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \
- mem, new, old, __ATOMIC_ACQUIRE)
-
-# define atomic_compare_and_exchange_val_acq(mem, new, old) \
- __atomic_val_bysize (__arch_compare_and_exchange_val, int, \
- mem, new, old, __ATOMIC_ACQUIRE)
-
-/* Compare and exchange with "release" semantics, ie barrier before. */
-
-# define atomic_compare_and_exchange_val_rel(mem, new, old) \
- __atomic_val_bysize (__arch_compare_and_exchange_val, int, \
- mem, new, old, __ATOMIC_RELEASE)
-
-/* Compare and exchange.
- For all "bool" routines, we return FALSE if exchange succesful. */
-
-# define __arch_compare_and_exchange_bool_8_int(mem, newval, oldval, model) \
- ({__arm_link_error (); 0; })
-
-# define __arch_compare_and_exchange_bool_16_int(mem, newval, oldval, model) \
- ({__arm_link_error (); 0; })
-
-# define __arch_compare_and_exchange_bool_32_int(mem, newval, oldval, model) \
- ({ \
- typeof (*mem) __oldval = (oldval); \
- !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
- model, __ATOMIC_RELAXED); \
- })
-
-# define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \
- ({__arm_link_error (); 0; })
-
-# define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \
- ({__arm_link_error (); oldval; })
-
-# define __arch_compare_and_exchange_val_16_int(mem, newval, oldval, model) \
- ({__arm_link_error (); oldval; })
-
-# define __arch_compare_and_exchange_val_32_int(mem, newval, oldval, model) \
- ({ \
- typeof (*mem) __oldval = (oldval); \
- __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
- model, __ATOMIC_RELAXED); \
- __oldval; \
- })
-
-# define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \
- ({__arm_link_error (); oldval; })
-
-#else
-# define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
- __arm_assisted_compare_and_exchange_val_32_acq ((mem), (newval), (oldval))
-#endif
-
-#ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
-/* We don't support atomic operations on any non-word types.
- So make them link errors. */
-# define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
- ({ __arm_link_error (); oldval; })
-
-# define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
- ({ __arm_link_error (); oldval; })
-
-# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
- ({ __arm_link_error (); oldval; })
-#endif
-
-/* An OS-specific atomic-machine.h file will define this macro if
- the OS can provide something. If not, we'll fail to build
- with a compiler that doesn't supply the operation. */
-#ifndef __arm_assisted_compare_and_exchange_val_32_acq
-# define __arm_assisted_compare_and_exchange_val_32_acq(mem, newval, oldval) \
- ({ __arm_link_error (); oldval; })
-#endif
@@ -20,48 +20,6 @@
#define __CSKY_ATOMIC_H_
#define __HAVE_64B_ATOMICS 0
-#define USE_ATOMIC_COMPILER_BUILTINS 1
#define ATOMIC_EXCHANGE_USES_CAS 1
-#define __arch_compare_and_exchange_bool_8_int(mem, newval, oldval, model) \
- (abort (), 0)
-
-#define __arch_compare_and_exchange_bool_16_int(mem, newval, oldval, model) \
- (abort (), 0)
-
-#define __arch_compare_and_exchange_bool_32_int(mem, newval, oldval, model) \
- ({ \
- typeof (*mem) __oldval = (oldval); \
- !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
- model, __ATOMIC_RELAXED); \
- })
-
-#define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \
- (abort (), 0)
-
-#define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \
- (abort (), (__typeof (*mem)) 0)
-
-#define __arch_compare_and_exchange_val_16_int(mem, newval, oldval, model) \
- (abort (), (__typeof (*mem)) 0)
-
-#define __arch_compare_and_exchange_val_32_int(mem, newval, oldval, model) \
- ({ \
- typeof (*mem) __oldval = (oldval); \
- __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
- model, __ATOMIC_RELAXED); \
- __oldval; \
- })
-
-#define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \
- (abort (), (__typeof (*mem)) 0)
-
-#define atomic_compare_and_exchange_bool_acq(mem, new, old) \
- __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \
- mem, new, old, __ATOMIC_ACQUIRE)
-
-#define atomic_compare_and_exchange_val_acq(mem, new, old) \
- __atomic_val_bysize (__arch_compare_and_exchange_val, int, \
- mem, new, old, __ATOMIC_ACQUIRE)
-
#endif /* atomic-machine.h */
@@ -18,24 +18,4 @@
#ifndef _ATOMIC_MACHINE_H
#define _ATOMIC_MACHINE_H 1
-/* We have by default no support for atomic operations. So define
- them non-atomic. If this is a problem somebody will have to come
- up with real definitions. */
-
-/* The only basic operation needed is compare and exchange. */
-#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
- ({ __typeof (mem) __gmemp = (mem); \
- __typeof (*mem) __gret = *__gmemp; \
- __typeof (*mem) __gnewval = (newval); \
- \
- if (__gret == (oldval)) \
- *__gmemp = __gnewval; \
- __gret; })
-
-#define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
- ({ __typeof (mem) __gmemp = (mem); \
- __typeof (*mem) __gnewval = (newval); \
- \
- *__gmemp == (oldval) ? (*__gmemp = __gnewval, 0) : 1; })
-
#endif /* atomic-machine.h */
@@ -22,18 +22,6 @@
#include <atomic.h>
-#ifndef atomic_full_barrier
-# define atomic_full_barrier() __asm ("" ::: "memory")
-#endif
-
-#ifndef atomic_read_barrier
-# define atomic_read_barrier() atomic_full_barrier ()
-#endif
-
-#ifndef atomic_write_barrier
-# define atomic_write_barrier() atomic_full_barrier ()
-#endif
-
#ifndef DEFAULT_TOP_PAD
# define DEFAULT_TOP_PAD 131072
#endif
@@ -18,63 +18,6 @@
#include <ia64intrin.h>
#define __HAVE_64B_ATOMICS 1
-#define USE_ATOMIC_COMPILER_BUILTINS 0
/* XXX Is this actually correct? */
#define ATOMIC_EXCHANGE_USES_CAS 0
-
-
-#define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \
- (abort (), 0)
-
-#define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \
- (abort (), 0)
-
-#define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
- (!__sync_bool_compare_and_swap ((mem), (int) (long) (oldval), \
- (int) (long) (newval)))
-
-#define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
- (!__sync_bool_compare_and_swap ((mem), (long) (oldval), \
- (long) (newval)))
-
-#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
- (abort (), (__typeof (*mem)) 0)
-
-#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
- (abort (), (__typeof (*mem)) 0)
-
-#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
- __sync_val_compare_and_swap ((mem), (int) (long) (oldval), \
- (int) (long) (newval))
-
-#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
- __sync_val_compare_and_swap ((mem), (long) (oldval), (long) (newval))
-
-/* Atomically store newval and return the old value. */
-#define atomic_exchange_acq(mem, value) \
- __sync_lock_test_and_set (mem, value)
-
-#define atomic_exchange_rel(mem, value) \
- (__sync_synchronize (), __sync_lock_test_and_set (mem, value))
-
-#define atomic_exchange_and_add(mem, value) \
- __sync_fetch_and_add ((mem), (value))
-
-#define atomic_decrement_if_positive(mem) \
- ({ __typeof (*mem) __oldval, __val; \
- __typeof (mem) __memp = (mem); \
- \
- __val = (*__memp); \
- do \
- { \
- __oldval = __val; \
- if (__builtin_expect (__val <= 0, 0)) \
- break; \
- __val = atomic_compare_and_exchange_val_acq (__memp, __oldval - 1, \
- __oldval); \
- } \
- while (__builtin_expect (__val != __oldval, 0)); \
- __oldval; })
-
-#define atomic_full_barrier() __sync_synchronize ()
@@ -20,25 +20,8 @@
/* If we have just non-atomic operations, we can as well make them wide. */
#define __HAVE_64B_ATOMICS 1
-#define USE_ATOMIC_COMPILER_BUILTINS 0
/* XXX Is this actually correct? */
#define ATOMIC_EXCHANGE_USES_CAS 1
-/* The only basic operation needed is compare and exchange. */
-#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
- ({ __typeof (mem) __gmemp = (mem); \
- __typeof (*mem) __gret = *__gmemp; \
- __typeof (*mem) __gnewval = (newval); \
- \
- if (__gret == (oldval)) \
- *__gmemp = __gnewval; \
- __gret; })
-
-#define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
- ({ __typeof (mem) __gmemp = (mem); \
- __typeof (*mem) __gnewval = (newval); \
- \
- *__gmemp == (oldval) ? (*__gmemp = __gnewval, 0) : 1; })
-
#endif
@@ -17,111 +17,6 @@
/* GCC does not support lock-free 64-bit atomic_load/store. */
#define __HAVE_64B_ATOMICS 0
-#define USE_ATOMIC_COMPILER_BUILTINS 0
/* XXX Is this actually correct? */
#define ATOMIC_EXCHANGE_USES_CAS 1
-
-#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
- ({ __typeof (*(mem)) __ret; \
- __asm __volatile ("cas%.b %0,%2,%1" \
- : "=d" (__ret), "+m" (*(mem)) \
- : "d" (newval), "0" (oldval)); \
- __ret; })
-
-#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
- ({ __typeof (*(mem)) __ret; \
- __asm __volatile ("cas%.w %0,%2,%1" \
- : "=d" (__ret), "+m" (*(mem)) \
- : "d" (newval), "0" (oldval)); \
- __ret; })
-
-#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
- ({ __typeof (*(mem)) __ret; \
- __asm __volatile ("cas%.l %0,%2,%1" \
- : "=d" (__ret), "+m" (*(mem)) \
- : "d" (newval), "0" (oldval)); \
- __ret; })
-
-# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
- ({ __typeof (*(mem)) __ret; \
- __typeof (mem) __memp = (mem); \
- __asm __volatile ("cas2%.l %0:%R0,%1:%R1,(%2):(%3)" \
- : "=d" (__ret) \
- : "d" ((__typeof (*(mem))) (newval)), "r" (__memp), \
- "r" ((char *) __memp + 4), "0" (oldval) \
- : "memory"); \
- __ret; })
-
-#define atomic_exchange_acq(mem, newvalue) \
- ({ __typeof (*(mem)) __result = *(mem); \
- if (sizeof (*(mem)) == 1) \
- __asm __volatile ("1: cas%.b %0,%2,%1;" \
- " jbne 1b" \
- : "=d" (__result), "+m" (*(mem)) \
- : "d" (newvalue), "0" (__result)); \
- else if (sizeof (*(mem)) == 2) \
- __asm __volatile ("1: cas%.w %0,%2,%1;" \
- " jbne 1b" \
- : "=d" (__result), "+m" (*(mem)) \
- : "d" (newvalue), "0" (__result)); \
- else if (sizeof (*(mem)) == 4) \
- __asm __volatile ("1: cas%.l %0,%2,%1;" \
- " jbne 1b" \
- : "=d" (__result), "+m" (*(mem)) \
- : "d" (newvalue), "0" (__result)); \
- else \
- { \
- __typeof (mem) __memp = (mem); \
- __asm __volatile ("1: cas2%.l %0:%R0,%1:%R1,(%2):(%3);" \
- " jbne 1b" \
- : "=d" (__result) \
- : "d" ((__typeof (*(mem))) (newvalue)), \
- "r" (__memp), "r" ((char *) __memp + 4), \
- "0" (__result) \
- : "memory"); \
- } \
- __result; })
-
-#define atomic_exchange_and_add(mem, value) \
- ({ __typeof (*(mem)) __result = *(mem); \
- __typeof (*(mem)) __temp; \
- if (sizeof (*(mem)) == 1) \
- __asm __volatile ("1: move%.b %0,%2;" \
- " add%.b %3,%2;" \
- " cas%.b %0,%2,%1;" \
- " jbne 1b" \
- : "=d" (__result), "+m" (*(mem)), \
- "=&d" (__temp) \
- : "d" (value), "0" (__result)); \
- else if (sizeof (*(mem)) == 2) \
- __asm __volatile ("1: move%.w %0,%2;" \
- " add%.w %3,%2;" \
- " cas%.w %0,%2,%1;" \
- " jbne 1b" \
- : "=d" (__result), "+m" (*(mem)), \
- "=&d" (__temp) \
- : "d" (value), "0" (__result)); \
- else if (sizeof (*(mem)) == 4) \
- __asm __volatile ("1: move%.l %0,%2;" \
- " add%.l %3,%2;" \
- " cas%.l %0,%2,%1;" \
- " jbne 1b" \
- : "=d" (__result), "+m" (*(mem)), \
- "=&d" (__temp) \
- : "d" (value), "0" (__result)); \
- else \
- { \
- __typeof (mem) __memp = (mem); \
- __asm __volatile ("1: move%.l %0,%1;" \
- " move%.l %R0,%R1;" \
- " add%.l %R2,%R1;" \
- " addx%.l %2,%1;" \
- " cas2%.l %0:%R0,%1:%R1,(%3):(%4);" \
- " jbne 1b" \
- : "=d" (__result), "=&d" (__temp) \
- : "d" ((__typeof (*(mem))) (value)), "r" (__memp), \
- "r" ((char *) __memp + 4), "0" (__result) \
- : "memory"); \
- } \
- __result; })
@@ -19,156 +19,6 @@
#include <sysdep.h>
#define __HAVE_64B_ATOMICS 0
-#define USE_ATOMIC_COMPILER_BUILTINS 0
/* XXX Is this actually correct? */
#define ATOMIC_EXCHANGE_USES_CAS 1
-
-
-/* Microblaze does not have byte and halfword forms of load and reserve and
- store conditional. So for microblaze we stub out the 8- and 16-bit forms. */
-#define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \
- (abort (), 0)
-
-#define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \
- (abort (), 0)
-
-#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
- ({ \
- __typeof (*(mem)) __tmp; \
- __typeof (mem) __memp = (mem); \
- int test; \
- __asm __volatile ( \
- " addc r0, r0, r0;" \
- "1: lwx %0, %3, r0;" \
- " addic %1, r0, 0;" \
- " bnei %1, 1b;" \
- " cmp %1, %0, %4;" \
- " bnei %1, 2f;" \
- " swx %5, %3, r0;" \
- " addic %1, r0, 0;" \
- " bnei %1, 1b;" \
- "2:" \
- : "=&r" (__tmp), \
- "=&r" (test), \
- "=m" (*__memp) \
- : "r" (__memp), \
- "r" (oldval), \
- "r" (newval) \
- : "cc", "memory"); \
- __tmp; \
- })
-
-#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
- (abort (), (__typeof (*mem)) 0)
-
-#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
- ({ \
- __typeof (*(mem)) __result; \
- if (sizeof (*mem) == 4) \
- __result = __arch_compare_and_exchange_val_32_acq (mem, newval, oldval); \
- else if (sizeof (*mem) == 8) \
- __result = __arch_compare_and_exchange_val_64_acq (mem, newval, oldval); \
- else \
- abort (); \
- __result; \
- })
-
-#define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \
- ({ \
- __typeof (*(mem)) __result; \
- if (sizeof (*mem) == 4) \
- __result = __arch_compare_and_exchange_val_32_acq (mem, newval, oldval); \
- else if (sizeof (*mem) == 8) \
- __result = __arch_compare_and_exchange_val_64_acq (mem, newval, oldval); \
- else \
- abort (); \
- __result; \
- })
-
-#define __arch_atomic_exchange_32_acq(mem, value) \
- ({ \
- __typeof (*(mem)) __tmp; \
- __typeof (mem) __memp = (mem); \
- int test; \
- __asm __volatile ( \
- " addc r0, r0, r0;" \
- "1: lwx %0, %4, r0;" \
- " addic %1, r0, 0;" \
- " bnei %1, 1b;" \
- " swx %3, %4, r0;" \
- " addic %1, r0, 0;" \
- " bnei %1, 1b;" \
- : "=&r" (__tmp), \
- "=&r" (test), \
- "=m" (*__memp) \
- : "r" (value), \
- "r" (__memp) \
- : "cc", "memory"); \
- __tmp; \
- })
-
-#define __arch_atomic_exchange_64_acq(mem, newval) \
- (abort (), (__typeof (*mem)) 0)
-
-#define atomic_exchange_acq(mem, value) \
- ({ \
- __typeof (*(mem)) __result; \
- if (sizeof (*mem) == 4) \
- __result = __arch_atomic_exchange_32_acq (mem, value); \
- else if (sizeof (*mem) == 8) \
- __result = __arch_atomic_exchange_64_acq (mem, value); \
- else \
- abort (); \
- __result; \
- })
-
-#define atomic_exchange_rel(mem, value) \
- ({ \
- __typeof (*(mem)) __result; \
- if (sizeof (*mem) == 4) \
- __result = __arch_atomic_exchange_32_acq (mem, value); \
- else if (sizeof (*mem) == 8) \
- __result = __arch_atomic_exchange_64_acq (mem, value); \
- else \
- abort (); \
- __result; \
- })
-
-#define __arch_atomic_exchange_and_add_32(mem, value) \
- ({ \
- __typeof (*(mem)) __tmp; \
- __typeof (mem) __memp = (mem); \
- int test; \
- __asm __volatile ( \
- " addc r0, r0, r0;" \
- "1: lwx %0, %4, r0;" \
- " addic %1, r0, 0;" \
- " bnei %1, 1b;" \
- " add %1, %3, %0;" \
- " swx %1, %4, r0;" \
- " addic %1, r0, 0;" \
- " bnei %1, 1b;" \
- : "=&r" (__tmp), \
- "=&r" (test), \
- "=m" (*__memp) \
- : "r" (value), \
- "r" (__memp) \
- : "cc", "memory"); \
- __tmp; \
- })
-
-#define __arch_atomic_exchange_and_add_64(mem, value) \
- (abort (), (__typeof (*mem)) 0)
-
-#define atomic_exchange_and_add(mem, value) \
- ({ \
- __typeof (*(mem)) __result; \
- if (sizeof (*mem) == 4) \
- __result = __arch_atomic_exchange_and_add_32 (mem, value); \
- else if (sizeof (*mem) == 8) \
- __result = __arch_atomic_exchange_and_add_64 (mem, value); \
- else \
- abort (); \
- __result; \
- })
@@ -21,29 +21,12 @@
#include <sgidefs.h>
-#if _MIPS_SIM == _ABIO32 && __mips < 2
-#define MIPS_PUSH_MIPS2 ".set mips2\n\t"
-#else
-#define MIPS_PUSH_MIPS2
-#endif
-
#if _MIPS_SIM == _ABIO32 || _MIPS_SIM == _ABIN32
#define __HAVE_64B_ATOMICS 0
#else
#define __HAVE_64B_ATOMICS 1
#endif
-/* See the comments in <sys/asm.h> about the use of the sync instruction. */
-#ifndef MIPS_SYNC
-# define MIPS_SYNC sync
-#endif
-
-#define MIPS_SYNC_STR_2(X) #X
-#define MIPS_SYNC_STR_1(X) MIPS_SYNC_STR_2(X)
-#define MIPS_SYNC_STR MIPS_SYNC_STR_1(MIPS_SYNC)
-
-#define USE_ATOMIC_COMPILER_BUILTINS 1
-
/* MIPS is an LL/SC machine. However, XLP has a direct atomic exchange
instruction which will be used by __atomic_exchange_n. */
#ifdef _MIPS_ARCH_XLP
@@ -52,133 +35,4 @@
# define ATOMIC_EXCHANGE_USES_CAS 1
#endif
-/* Compare and exchange.
- For all "bool" routines, we return FALSE if exchange succesful. */
-
-#define __arch_compare_and_exchange_bool_8_int(mem, newval, oldval, model) \
- (abort (), 0)
-
-#define __arch_compare_and_exchange_bool_16_int(mem, newval, oldval, model) \
- (abort (), 0)
-
-#define __arch_compare_and_exchange_bool_32_int(mem, newval, oldval, model) \
- ({ \
- typeof (*mem) __oldval = (oldval); \
- !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
- model, __ATOMIC_RELAXED); \
- })
-
-#define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \
- (abort (), (typeof(*mem)) 0)
-
-#define __arch_compare_and_exchange_val_16_int(mem, newval, oldval, model) \
- (abort (), (typeof(*mem)) 0)
-
-#define __arch_compare_and_exchange_val_32_int(mem, newval, oldval, model) \
- ({ \
- typeof (*mem) __oldval = (oldval); \
- __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
- model, __ATOMIC_RELAXED); \
- __oldval; \
- })
-
-#if _MIPS_SIM == _ABIO32
- /* We can't do an atomic 64-bit operation in O32. */
-# define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \
- (abort (), 0)
-# define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \
- (abort (), (typeof(*mem)) 0)
-#else
-# define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \
- __arch_compare_and_exchange_bool_32_int (mem, newval, oldval, model)
-# define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \
- __arch_compare_and_exchange_val_32_int (mem, newval, oldval, model)
-#endif
-
-/* Compare and exchange with "acquire" semantics, ie barrier after. */
-
-#define atomic_compare_and_exchange_bool_acq(mem, new, old) \
- __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \
- mem, new, old, __ATOMIC_ACQUIRE)
-
-#define atomic_compare_and_exchange_val_acq(mem, new, old) \
- __atomic_val_bysize (__arch_compare_and_exchange_val, int, \
- mem, new, old, __ATOMIC_ACQUIRE)
-
-/* Compare and exchange with "release" semantics, ie barrier before. */
-
-#define atomic_compare_and_exchange_val_rel(mem, new, old) \
- __atomic_val_bysize (__arch_compare_and_exchange_val, int, \
- mem, new, old, __ATOMIC_RELEASE)
-
-
-/* Atomic exchange (without compare). */
-
-#define __arch_exchange_8_int(mem, newval, model) \
- (abort (), (typeof(*mem)) 0)
-
-#define __arch_exchange_16_int(mem, newval, model) \
- (abort (), (typeof(*mem)) 0)
-
-#define __arch_exchange_32_int(mem, newval, model) \
- __atomic_exchange_n (mem, newval, model)
-
-#if _MIPS_SIM == _ABIO32
-/* We can't do an atomic 64-bit operation in O32. */
-# define __arch_exchange_64_int(mem, newval, model) \
- (abort (), (typeof(*mem)) 0)
-#else
-# define __arch_exchange_64_int(mem, newval, model) \
- __atomic_exchange_n (mem, newval, model)
-#endif
-
-#define atomic_exchange_acq(mem, value) \
- __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_ACQUIRE)
-
-#define atomic_exchange_rel(mem, value) \
- __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_RELEASE)
-
-
-/* Atomically add value and return the previous (unincremented) value. */
-
-#define __arch_exchange_and_add_8_int(mem, value, model) \
- (abort (), (typeof(*mem)) 0)
-
-#define __arch_exchange_and_add_16_int(mem, value, model) \
- (abort (), (typeof(*mem)) 0)
-
-#define __arch_exchange_and_add_32_int(mem, value, model) \
- __atomic_fetch_add (mem, value, model)
-
-#if _MIPS_SIM == _ABIO32
-/* We can't do an atomic 64-bit operation in O32. */
-# define __arch_exchange_and_add_64_int(mem, value, model) \
- (abort (), (typeof(*mem)) 0)
-#else
-# define __arch_exchange_and_add_64_int(mem, value, model) \
- __atomic_fetch_add (mem, value, model)
-#endif
-
-#define atomic_exchange_and_add_acq(mem, value) \
- __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \
- __ATOMIC_ACQUIRE)
-
-#define atomic_exchange_and_add_rel(mem, value) \
- __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \
- __ATOMIC_RELEASE)
-
-/* TODO: More atomic operations could be implemented efficiently; only the
- basic requirements are done. */
-
-#ifdef __mips16
-# define atomic_full_barrier() __sync_synchronize ()
-
-#else /* !__mips16 */
-# define atomic_full_barrier() \
- __asm__ __volatile__ (".set push\n\t" \
- MIPS_PUSH_MIPS2 \
- MIPS_SYNC_STR "\n\t" \
- ".set pop" : : : "memory")
-#endif /* !__mips16 */
-
#endif /* atomic-machine.h */
@@ -22,50 +22,6 @@
#include <stdint.h>
#define __HAVE_64B_ATOMICS 0
-#define USE_ATOMIC_COMPILER_BUILTINS 1
#define ATOMIC_EXCHANGE_USES_CAS 1
-#define __arch_compare_and_exchange_bool_8_int(mem, newval, oldval, model) \
- (abort (), 0)
-
-#define __arch_compare_and_exchange_bool_16_int(mem, newval, oldval, model) \
- (abort (), 0)
-
-#define __arch_compare_and_exchange_bool_32_int(mem, newval, oldval, model) \
- ({ \
- typeof (*mem) __oldval = (oldval); \
- !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
- model, __ATOMIC_RELAXED); \
- })
-
-#define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \
- (abort (), 0)
-
-#define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \
- (abort (), (__typeof (*mem)) 0)
-
-#define __arch_compare_and_exchange_val_16_int(mem, newval, oldval, model) \
- (abort (), (__typeof (*mem)) 0)
-
-#define __arch_compare_and_exchange_val_32_int(mem, newval, oldval, model) \
- ({ \
- typeof (*mem) __oldval = (oldval); \
- __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
- model, __ATOMIC_RELAXED); \
- __oldval; \
- })
-
-#define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \
- (abort (), (__typeof (*mem)) 0)
-
-#define atomic_compare_and_exchange_bool_acq(mem, new, old) \
- __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \
- mem, new, old, __ATOMIC_ACQUIRE)
-
-#define atomic_compare_and_exchange_val_acq(mem, new, old) \
- __atomic_val_bysize (__arch_compare_and_exchange_val, int, \
- mem, new, old, __ATOMIC_ACQUIRE)
-
-#define atomic_full_barrier() ({ asm volatile ("l.msync" ::: "memory"); })
-
#endif /* atomic-machine.h */
deleted file mode 100644
@@ -1,261 +0,0 @@
-/* Atomic operations. PowerPC Common version.
- Copyright (C) 2003-2022 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <https://www.gnu.org/licenses/>. */
-
-/*
- * Never include sysdeps/powerpc/atomic-machine.h directly.
- * Alway use include/atomic.h which will include either
- * sysdeps/powerpc/powerpc32/atomic-machine.h
- * or
- * sysdeps/powerpc/powerpc64/atomic-machine.h
- * as appropriate and which in turn include this file.
- */
-
-/*
- * Powerpc does not have byte and halfword forms of load and reserve and
- * store conditional. So for powerpc we stub out the 8- and 16-bit forms.
- */
-#define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \
- (abort (), 0)
-
-#define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \
- (abort (), 0)
-
-#define __ARCH_ACQ_INSTR "isync"
-#ifndef __ARCH_REL_INSTR
-# define __ARCH_REL_INSTR "sync"
-#endif
-
-#ifndef MUTEX_HINT_ACQ
-# define MUTEX_HINT_ACQ
-#endif
-#ifndef MUTEX_HINT_REL
-# define MUTEX_HINT_REL
-#endif
-
-#define atomic_full_barrier() __asm ("sync" ::: "memory")
-
-#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
- ({ \
- __typeof (*(mem)) __tmp; \
- __typeof (mem) __memp = (mem); \
- __asm __volatile ( \
- "1: lwarx %0,0,%1" MUTEX_HINT_ACQ "\n" \
- " cmpw %0,%2\n" \
- " bne 2f\n" \
- " stwcx. %3,0,%1\n" \
- " bne- 1b\n" \
- "2: " __ARCH_ACQ_INSTR \
- : "=&r" (__tmp) \
- : "b" (__memp), "r" (oldval), "r" (newval) \
- : "cr0", "memory"); \
- __tmp; \
- })
-
-#define __arch_compare_and_exchange_val_32_rel(mem, newval, oldval) \
- ({ \
- __typeof (*(mem)) __tmp; \
- __typeof (mem) __memp = (mem); \
- __asm __volatile (__ARCH_REL_INSTR "\n" \
- "1: lwarx %0,0,%1" MUTEX_HINT_REL "\n" \
- " cmpw %0,%2\n" \
- " bne 2f\n" \
- " stwcx. %3,0,%1\n" \
- " bne- 1b\n" \
- "2: " \
- : "=&r" (__tmp) \
- : "b" (__memp), "r" (oldval), "r" (newval) \
- : "cr0", "memory"); \
- __tmp; \
- })
-
-#define __arch_atomic_exchange_32_acq(mem, value) \
- ({ \
- __typeof (*mem) __val; \
- __asm __volatile ( \
- "1: lwarx %0,0,%2" MUTEX_HINT_ACQ "\n" \
- " stwcx. %3,0,%2\n" \
- " bne- 1b\n" \
- " " __ARCH_ACQ_INSTR \
- : "=&r" (__val), "=m" (*mem) \
- : "b" (mem), "r" (value), "m" (*mem) \
- : "cr0", "memory"); \
- __val; \
- })
-
-#define __arch_atomic_exchange_32_rel(mem, value) \
- ({ \
- __typeof (*mem) __val; \
- __asm __volatile (__ARCH_REL_INSTR "\n" \
- "1: lwarx %0,0,%2" MUTEX_HINT_REL "\n" \
- " stwcx. %3,0,%2\n" \
- " bne- 1b" \
- : "=&r" (__val), "=m" (*mem) \
- : "b" (mem), "r" (value), "m" (*mem) \
- : "cr0", "memory"); \
- __val; \
- })
-
-#define __arch_atomic_exchange_and_add_32(mem, value) \
- ({ \
- __typeof (*mem) __val, __tmp; \
- __asm __volatile ("1: lwarx %0,0,%3\n" \
- " add %1,%0,%4\n" \
- " stwcx. %1,0,%3\n" \
- " bne- 1b" \
- : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
- : "b" (mem), "r" (value), "m" (*mem) \
- : "cr0", "memory"); \
- __val; \
- })
-
-#define __arch_atomic_exchange_and_add_32_acq(mem, value) \
- ({ \
- __typeof (*mem) __val, __tmp; \
- __asm __volatile ("1: lwarx %0,0,%3" MUTEX_HINT_ACQ "\n" \
- " add %1,%0,%4\n" \
- " stwcx. %1,0,%3\n" \
- " bne- 1b\n" \
- __ARCH_ACQ_INSTR \
- : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
- : "b" (mem), "r" (value), "m" (*mem) \
- : "cr0", "memory"); \
- __val; \
- })
-
-#define __arch_atomic_exchange_and_add_32_rel(mem, value) \
- ({ \
- __typeof (*mem) __val, __tmp; \
- __asm __volatile (__ARCH_REL_INSTR "\n" \
- "1: lwarx %0,0,%3" MUTEX_HINT_REL "\n" \
- " add %1,%0,%4\n" \
- " stwcx. %1,0,%3\n" \
- " bne- 1b" \
- : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
- : "b" (mem), "r" (value), "m" (*mem) \
- : "cr0", "memory"); \
- __val; \
- })
-
-#define __arch_atomic_decrement_if_positive_32(mem) \
- ({ int __val, __tmp; \
- __asm __volatile ("1: lwarx %0,0,%3\n" \
- " cmpwi 0,%0,0\n" \
- " addi %1,%0,-1\n" \
- " ble 2f\n" \
- " stwcx. %1,0,%3\n" \
- " bne- 1b\n" \
- "2: " __ARCH_ACQ_INSTR \
- : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
- : "b" (mem), "m" (*mem) \
- : "cr0", "memory"); \
- __val; \
- })
-
-#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
- ({ \
- __typeof (*(mem)) __result; \
- if (sizeof (*mem) == 4) \
- __result = __arch_compare_and_exchange_val_32_acq(mem, newval, oldval); \
- else if (sizeof (*mem) == 8) \
- __result = __arch_compare_and_exchange_val_64_acq(mem, newval, oldval); \
- else \
- abort (); \
- __result; \
- })
-
-#define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \
- ({ \
- __typeof (*(mem)) __result; \
- if (sizeof (*mem) == 4) \
- __result = __arch_compare_and_exchange_val_32_rel(mem, newval, oldval); \
- else if (sizeof (*mem) == 8) \
- __result = __arch_compare_and_exchange_val_64_rel(mem, newval, oldval); \
- else \
- abort (); \
- __result; \
- })
-
-#define atomic_exchange_acq(mem, value) \
- ({ \
- __typeof (*(mem)) __result; \
- if (sizeof (*mem) == 4) \
- __result = __arch_atomic_exchange_32_acq (mem, value); \
- else if (sizeof (*mem) == 8) \
- __result = __arch_atomic_exchange_64_acq (mem, value); \
- else \
- abort (); \
- __result; \
- })
-
-#define atomic_exchange_rel(mem, value) \
- ({ \
- __typeof (*(mem)) __result; \
- if (sizeof (*mem) == 4) \
- __result = __arch_atomic_exchange_32_rel (mem, value); \
- else if (sizeof (*mem) == 8) \
- __result = __arch_atomic_exchange_64_rel (mem, value); \
- else \
- abort (); \
- __result; \
- })
-
-#define atomic_exchange_and_add(mem, value) \
- ({ \
- __typeof (*(mem)) __result; \
- if (sizeof (*mem) == 4) \
- __result = __arch_atomic_exchange_and_add_32 (mem, value); \
- else if (sizeof (*mem) == 8) \
- __result = __arch_atomic_exchange_and_add_64 (mem, value); \
- else \
- abort (); \
- __result; \
- })
-#define atomic_exchange_and_add_acq(mem, value) \
- ({ \
- __typeof (*(mem)) __result; \
- if (sizeof (*mem) == 4) \
- __result = __arch_atomic_exchange_and_add_32_acq (mem, value); \
- else if (sizeof (*mem) == 8) \
- __result = __arch_atomic_exchange_and_add_64_acq (mem, value); \
- else \
- abort (); \
- __result; \
- })
-#define atomic_exchange_and_add_rel(mem, value) \
- ({ \
- __typeof (*(mem)) __result; \
- if (sizeof (*mem) == 4) \
- __result = __arch_atomic_exchange_and_add_32_rel (mem, value); \
- else if (sizeof (*mem) == 8) \
- __result = __arch_atomic_exchange_and_add_64_rel (mem, value); \
- else \
- abort (); \
- __result; \
- })
-
-/* Decrement *MEM if it is > 0, and return the old value. */
-#define atomic_decrement_if_positive(mem) \
- ({ __typeof (*(mem)) __result; \
- if (sizeof (*mem) == 4) \
- __result = __arch_atomic_decrement_if_positive_32 (mem); \
- else if (sizeof (*mem) == 8) \
- __result = __arch_atomic_decrement_if_positive_64 (mem); \
- else \
- abort (); \
- __result; \
- })
@@ -32,65 +32,11 @@
# define MUTEX_HINT_REL
#endif
+#define __ARCH_ACQ_INSTR "isync"
+
#define __HAVE_64B_ATOMICS 0
-#define USE_ATOMIC_COMPILER_BUILTINS 0
#define ATOMIC_EXCHANGE_USES_CAS 1
-/*
- * The 32-bit exchange_bool is different on powerpc64 because the subf
- * does signed 64-bit arithmetic while the lwarx is 32-bit unsigned
- * (a load word and zero (high 32) form). So powerpc64 has a slightly
- * different version in sysdeps/powerpc/powerpc64/atomic-machine.h.
- */
-#define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
-({ \
- unsigned int __tmp; \
- __asm __volatile ( \
- "1: lwarx %0,0,%1" MUTEX_HINT_ACQ "\n" \
- " subf. %0,%2,%0\n" \
- " bne 2f\n" \
- " stwcx. %3,0,%1\n" \
- " bne- 1b\n" \
- "2: " __ARCH_ACQ_INSTR \
- : "=&r" (__tmp) \
- : "b" (mem), "r" (oldval), "r" (newval) \
- : "cr0", "memory"); \
- __tmp != 0; \
-})
-
-/* Powerpc32 processors don't implement the 64-bit (doubleword) forms of
- load and reserve (ldarx) and store conditional (stdcx.) instructions.
- So for powerpc32 we stub out the 64-bit forms. */
-#define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
- (abort (), 0)
-
-#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
- (abort (), (__typeof (*mem)) 0)
-
-#define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \
- (abort (), (__typeof (*mem)) 0)
-
-#define __arch_atomic_exchange_64_acq(mem, value) \
- ({ abort (); (*mem) = (value); })
-
-#define __arch_atomic_exchange_64_rel(mem, value) \
- ({ abort (); (*mem) = (value); })
-
-#define __arch_atomic_exchange_and_add_64(mem, value) \
- ({ abort (); (*mem) = (value); })
-
-#define __arch_atomic_exchange_and_add_64_acq(mem, value) \
- ({ abort (); (*mem) = (value); })
-
-#define __arch_atomic_exchange_and_add_64_rel(mem, value) \
- ({ abort (); (*mem) = (value); })
-
-#define __arch_atomic_decrement_val_64(mem) \
- ({ abort (); (*mem)--; })
-
-#define __arch_atomic_decrement_if_positive_64(mem) \
- ({ abort (); (*mem)--; })
-
#ifdef _ARCH_PWR4
/*
* Newer powerpc64 processors support the new "light weight" sync (lwsync)
@@ -101,7 +47,6 @@
/*
* "light weight" sync can also be used for the release barrier.
*/
-# define __ARCH_REL_INSTR "lwsync"
# define atomic_write_barrier() __asm ("lwsync" ::: "memory")
#else
/*
@@ -112,9 +57,3 @@
# define atomic_read_barrier() __asm ("sync" ::: "memory")
# define atomic_write_barrier() __asm ("sync" ::: "memory")
#endif
-
-/*
- * Include the rest of the atomic ops macros which are common to both
- * powerpc32 and powerpc64.
- */
-#include_next <atomic-machine.h>
@@ -32,183 +32,11 @@
# define MUTEX_HINT_REL
#endif
+#define __ARCH_ACQ_INSTR "isync"
+
#define __HAVE_64B_ATOMICS 1
-#define USE_ATOMIC_COMPILER_BUILTINS 0
#define ATOMIC_EXCHANGE_USES_CAS 1
-/* The 32-bit exchange_bool is different on powerpc64 because the subf
- does signed 64-bit arithmetic while the lwarx is 32-bit unsigned
- (a load word and zero (high 32) form) load.
- In powerpc64 register values are 64-bit by default, including oldval.
- The value in old val unknown sign extension, lwarx loads the 32-bit
- value as unsigned. So we explicitly clear the high 32 bits in oldval. */
-#define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
-({ \
- unsigned int __tmp, __tmp2; \
- __asm __volatile (" clrldi %1,%1,32\n" \
- "1: lwarx %0,0,%2" MUTEX_HINT_ACQ "\n" \
- " subf. %0,%1,%0\n" \
- " bne 2f\n" \
- " stwcx. %4,0,%2\n" \
- " bne- 1b\n" \
- "2: " __ARCH_ACQ_INSTR \
- : "=&r" (__tmp), "=r" (__tmp2) \
- : "b" (mem), "1" (oldval), "r" (newval) \
- : "cr0", "memory"); \
- __tmp != 0; \
-})
-
-/*
- * Only powerpc64 processors support Load doubleword and reserve index (ldarx)
- * and Store doubleword conditional indexed (stdcx) instructions. So here
- * we define the 64-bit forms.
- */
-#define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
-({ \
- unsigned long __tmp; \
- __asm __volatile ( \
- "1: ldarx %0,0,%1" MUTEX_HINT_ACQ "\n" \
- " subf. %0,%2,%0\n" \
- " bne 2f\n" \
- " stdcx. %3,0,%1\n" \
- " bne- 1b\n" \
- "2: " __ARCH_ACQ_INSTR \
- : "=&r" (__tmp) \
- : "b" (mem), "r" (oldval), "r" (newval) \
- : "cr0", "memory"); \
- __tmp != 0; \
-})
-
-#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
- ({ \
- __typeof (*(mem)) __tmp; \
- __typeof (mem) __memp = (mem); \
- __asm __volatile ( \
- "1: ldarx %0,0,%1" MUTEX_HINT_ACQ "\n" \
- " cmpd %0,%2\n" \
- " bne 2f\n" \
- " stdcx. %3,0,%1\n" \
- " bne- 1b\n" \
- "2: " __ARCH_ACQ_INSTR \
- : "=&r" (__tmp) \
- : "b" (__memp), "r" (oldval), "r" (newval) \
- : "cr0", "memory"); \
- __tmp; \
- })
-
-#define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \
- ({ \
- __typeof (*(mem)) __tmp; \
- __typeof (mem) __memp = (mem); \
- __asm __volatile (__ARCH_REL_INSTR "\n" \
- "1: ldarx %0,0,%1" MUTEX_HINT_REL "\n" \
- " cmpd %0,%2\n" \
- " bne 2f\n" \
- " stdcx. %3,0,%1\n" \
- " bne- 1b\n" \
- "2: " \
- : "=&r" (__tmp) \
- : "b" (__memp), "r" (oldval), "r" (newval) \
- : "cr0", "memory"); \
- __tmp; \
- })
-
-#define __arch_atomic_exchange_64_acq(mem, value) \
- ({ \
- __typeof (*mem) __val; \
- __asm __volatile (__ARCH_REL_INSTR "\n" \
- "1: ldarx %0,0,%2" MUTEX_HINT_ACQ "\n" \
- " stdcx. %3,0,%2\n" \
- " bne- 1b\n" \
- " " __ARCH_ACQ_INSTR \
- : "=&r" (__val), "=m" (*mem) \
- : "b" (mem), "r" (value), "m" (*mem) \
- : "cr0", "memory"); \
- __val; \
- })
-
-#define __arch_atomic_exchange_64_rel(mem, value) \
- ({ \
- __typeof (*mem) __val; \
- __asm __volatile (__ARCH_REL_INSTR "\n" \
- "1: ldarx %0,0,%2" MUTEX_HINT_REL "\n" \
- " stdcx. %3,0,%2\n" \
- " bne- 1b" \
- : "=&r" (__val), "=m" (*mem) \
- : "b" (mem), "r" (value), "m" (*mem) \
- : "cr0", "memory"); \
- __val; \
- })
-
-#define __arch_atomic_exchange_and_add_64(mem, value) \
- ({ \
- __typeof (*mem) __val, __tmp; \
- __asm __volatile ("1: ldarx %0,0,%3\n" \
- " add %1,%0,%4\n" \
- " stdcx. %1,0,%3\n" \
- " bne- 1b" \
- : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
- : "b" (mem), "r" (value), "m" (*mem) \
- : "cr0", "memory"); \
- __val; \
- })
-
-#define __arch_atomic_exchange_and_add_64_acq(mem, value) \
- ({ \
- __typeof (*mem) __val, __tmp; \
- __asm __volatile ("1: ldarx %0,0,%3" MUTEX_HINT_ACQ "\n" \
- " add %1,%0,%4\n" \
- " stdcx. %1,0,%3\n" \
- " bne- 1b\n" \
- __ARCH_ACQ_INSTR \
- : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
- : "b" (mem), "r" (value), "m" (*mem) \
- : "cr0", "memory"); \
- __val; \
- })
-
-#define __arch_atomic_exchange_and_add_64_rel(mem, value) \
- ({ \
- __typeof (*mem) __val, __tmp; \
- __asm __volatile (__ARCH_REL_INSTR "\n" \
- "1: ldarx %0,0,%3" MUTEX_HINT_REL "\n" \
- " add %1,%0,%4\n" \
- " stdcx. %1,0,%3\n" \
- " bne- 1b" \
- : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
- : "b" (mem), "r" (value), "m" (*mem) \
- : "cr0", "memory"); \
- __val; \
- })
-
-#define __arch_atomic_decrement_val_64(mem) \
- ({ \
- __typeof (*(mem)) __val; \
- __asm __volatile ("1: ldarx %0,0,%2\n" \
- " subi %0,%0,1\n" \
- " stdcx. %0,0,%2\n" \
- " bne- 1b" \
- : "=&b" (__val), "=m" (*mem) \
- : "b" (mem), "m" (*mem) \
- : "cr0", "memory"); \
- __val; \
- })
-
-#define __arch_atomic_decrement_if_positive_64(mem) \
- ({ int __val, __tmp; \
- __asm __volatile ("1: ldarx %0,0,%3\n" \
- " cmpdi 0,%0,0\n" \
- " addi %1,%0,-1\n" \
- " ble 2f\n" \
- " stdcx. %1,0,%3\n" \
- " bne- 1b\n" \
- "2: " __ARCH_ACQ_INSTR \
- : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \
- : "b" (mem), "m" (*mem) \
- : "cr0", "memory"); \
- __val; \
- })
-
/*
* All powerpc64 processors support the new "light weight" sync (lwsync).
*/
@@ -216,11 +44,4 @@
/*
* "light weight" sync can also be used for the release barrier.
*/
-#define __ARCH_REL_INSTR "lwsync"
#define atomic_write_barrier() __asm ("lwsync" ::: "memory")
-
-/*
- * Include the rest of the atomic ops macros which are common to both
- * powerpc32 and powerpc64.
- */
-#include_next <atomic-machine.h>
@@ -15,24 +15,6 @@
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
-/* Activate all C11 atomic builtins.
-
- Note:
- E.g. in nptl/pthread_key_delete.c if compiled with GCCs 6 and before,
- an extra stack-frame is generated and the old value is stored on stack
- before cs instruction but it never loads this value from stack.
- An unreleased GCC 7 omit those stack operations.
-
- E.g. in nptl/pthread_once.c the condition code of cs instruction is
- evaluated by a sequence of ipm, sra, compare and jump instructions instead
- of one conditional jump instruction. This also occurs with an unreleased
- GCC 7.
-
- The atomic_fetch_abc_def C11 builtins are now using load-and-abc instructions
- on z196 zarch and higher cpus instead of a loop with compare-and-swap
- instruction. */
-#define USE_ATOMIC_COMPILER_BUILTINS 1
-
#ifdef __s390x__
# define __HAVE_64B_ATOMICS 1
#else
@@ -40,43 +22,3 @@
#endif
#define ATOMIC_EXCHANGE_USES_CAS 1
-
-/* Implement some of the non-C11 atomic macros from include/atomic.h
- with help of the C11 atomic builtins. The other non-C11 atomic macros
- are using the macros defined here. */
-
-/* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL.
- Return the old *MEM value. */
-#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
- ({ __atomic_check_size((mem)); \
- typeof ((__typeof (*(mem))) *(mem)) __atg1_oldval = (oldval); \
- __atomic_compare_exchange_n (mem, (void *) &__atg1_oldval, \
- newval, 1, __ATOMIC_ACQUIRE, \
- __ATOMIC_RELAXED); \
- __atg1_oldval; })
-#define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \
- ({ __atomic_check_size((mem)); \
- typeof ((__typeof (*(mem))) *(mem)) __atg1_2_oldval = (oldval); \
- __atomic_compare_exchange_n (mem, (void *) &__atg1_2_oldval, \
- newval, 1, __ATOMIC_RELEASE, \
- __ATOMIC_RELAXED); \
- __atg1_2_oldval; })
-
-/* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL.
- Return zero if *MEM was changed or non-zero if no exchange happened. */
-#define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
- ({ __atomic_check_size((mem)); \
- typeof ((__typeof (*(mem))) *(mem)) __atg2_oldval = (oldval); \
- !__atomic_compare_exchange_n (mem, (void *) &__atg2_oldval, newval, \
- 1, __ATOMIC_ACQUIRE, \
- __ATOMIC_RELAXED); })
-
-/* Add VALUE to *MEM and return the old value of *MEM. */
-/* The gcc builtin uses load-and-add instruction on z196 zarch and higher cpus
- instead of a loop with compare-and-swap instruction. */
-# define atomic_exchange_and_add_acq(mem, operand) \
- ({ __atomic_check_size((mem)); \
- __atomic_fetch_add ((mem), (operand), __ATOMIC_ACQUIRE); })
-# define atomic_exchange_and_add_rel(mem, operand) \
- ({ __atomic_check_size((mem)); \
- __atomic_fetch_add ((mem), (operand), __ATOMIC_RELEASE); })
@@ -24,34 +24,10 @@
#else
# define __HAVE_64B_ATOMICS 0
#endif
-#define USE_ATOMIC_COMPILER_BUILTINS 1
/* XXX Is this actually correct? */
#define ATOMIC_EXCHANGE_USES_CAS __HAVE_64B_ATOMICS
-/* Compare and exchange.
- For all "bool" routines, we return FALSE if exchange succesful. */
-
-#define __arch_compare_and_exchange_val_int(mem, newval, oldval, model) \
- ({ \
- typeof (*mem) __oldval = (oldval); \
- __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
- model, __ATOMIC_RELAXED); \
- __oldval; \
- })
-
-#define atomic_compare_and_exchange_val_acq(mem, new, old) \
- ({ \
- __typeof ((__typeof (*(mem))) *(mem)) __result; \
- if (sizeof (*mem) == 4 \
- || (__HAVE_64B_ATOMICS && sizeof (*mem) == 8)) \
- __result = __arch_compare_and_exchange_val_int (mem, new, old, \
- __ATOMIC_ACQUIRE); \
- else \
- abort (); \
- __result; \
- })
-
#ifdef __sparc_v9__
# define atomic_full_barrier() \
__asm __volatile ("membar #LoadLoad | #LoadStore" \
@@ -18,87 +18,10 @@
#ifndef _ATOMIC_MACHINE_H
#define _ATOMIC_MACHINE_H 1
-#define atomic_full_barrier() __sync_synchronize ()
-
#define __HAVE_64B_ATOMICS 0
-#define USE_ATOMIC_COMPILER_BUILTINS 0
-
-/* We use the compiler atomic load and store builtins as the generic
- defines are not atomic. In particular, we need to use compare and
- exchange for stores as the implementation is synthesized. */
-void __atomic_link_error (void);
-#define __atomic_check_size_ls(mem) \
- if ((sizeof (*mem) != 1) && (sizeof (*mem) != 2) && sizeof (*mem) != 4) \
- __atomic_link_error ();
-
-#define atomic_load_relaxed(mem) \
- ({ __atomic_check_size_ls((mem)); \
- __atomic_load_n ((mem), __ATOMIC_RELAXED); })
-#define atomic_load_acquire(mem) \
- ({ __atomic_check_size_ls((mem)); \
- __atomic_load_n ((mem), __ATOMIC_ACQUIRE); })
-
-#define atomic_store_relaxed(mem, val) \
- do { \
- __atomic_check_size_ls((mem)); \
- __atomic_store_n ((mem), (val), __ATOMIC_RELAXED); \
- } while (0)
-#define atomic_store_release(mem, val) \
- do { \
- __atomic_check_size_ls((mem)); \
- __atomic_store_n ((mem), (val), __ATOMIC_RELEASE); \
- } while (0)
/* XXX Is this actually correct? */
#define ATOMIC_EXCHANGE_USES_CAS 1
-/* prev = *addr;
- if (prev == old)
- *addr = new;
- return prev; */
-
-/* Use the kernel atomic light weight syscalls on hppa. */
-#define _LWS "0xb0"
-#define _LWS_CAS "0"
-/* Note r31 is the link register. */
-#define _LWS_CLOBBER "r1", "r23", "r22", "r20", "r31", "memory"
-/* String constant for -EAGAIN. */
-#define _ASM_EAGAIN "-11"
-/* String constant for -EDEADLOCK. */
-#define _ASM_EDEADLOCK "-45"
-
-/* The only basic operation needed is compare and exchange. The mem
- pointer must be word aligned. We no longer loop on deadlock. */
-#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
- ({ \
- register long lws_errno asm("r21"); \
- register unsigned long lws_ret asm("r28"); \
- register unsigned long lws_mem asm("r26") = (unsigned long)(mem); \
- register unsigned long lws_old asm("r25") = (unsigned long)(oldval);\
- register unsigned long lws_new asm("r24") = (unsigned long)(newval);\
- __asm__ __volatile__( \
- "0: \n\t" \
- "ble " _LWS "(%%sr2, %%r0) \n\t" \
- "ldi " _LWS_CAS ", %%r20 \n\t" \
- "cmpiclr,<> " _ASM_EAGAIN ", %%r21, %%r0\n\t" \
- "b,n 0b \n\t" \
- "cmpclr,= %%r0, %%r21, %%r0 \n\t" \
- "iitlbp %%r0,(%%sr0, %%r0) \n\t" \
- : "=r" (lws_ret), "=r" (lws_errno) \
- : "r" (lws_mem), "r" (lws_old), "r" (lws_new) \
- : _LWS_CLOBBER \
- ); \
- \
- (__typeof (oldval)) lws_ret; \
- })
-
-#define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
- ({ \
- __typeof__ (*mem) ret; \
- ret = atomic_compare_and_exchange_val_acq(mem, newval, oldval); \
- /* Return 1 if it was already acquired. */ \
- (ret != oldval); \
- })
-
#endif
/* _ATOMIC_MACHINE_H */
@@ -20,34 +20,11 @@
#include <sysdep.h>
-/* Coldfire has no atomic compare-and-exchange operation, but the
- kernel provides userspace atomicity operations. Use them. */
-
#define __HAVE_64B_ATOMICS 0
-#define USE_ATOMIC_COMPILER_BUILTINS 0
/* XXX Is this actually correct? */
#define ATOMIC_EXCHANGE_USES_CAS 1
-/* The only basic operation needed is compare and exchange. */
-#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
- ({ \
- /* Use temporary variables to workaround call-clobberness of \
- the registers. */ \
- __typeof (mem) _mem = mem; \
- __typeof (oldval) _oldval = oldval; \
- __typeof (newval) _newval = newval; \
- register uint32_t _d0 asm ("d0") = SYS_ify (atomic_cmpxchg_32); \
- register uint32_t *_a0 asm ("a0") = (uint32_t *) _mem; \
- register uint32_t _d2 asm ("d2") = (uint32_t) _oldval; \
- register uint32_t _d1 asm ("d1") = (uint32_t) _newval; \
- \
- asm ("trap #0" \
- : "+d" (_d0), "+m" (*_a0) \
- : "a" (_a0), "d" (_d2), "d" (_d1)); \
- (__typeof (oldval)) _d0; \
- })
-
# define atomic_full_barrier() \
(INTERNAL_SYSCALL_CALL (atomic_barrier), (void) 0)
@@ -20,64 +20,8 @@
#define _NIOS2_ATOMIC_MACHINE_H 1
#define __HAVE_64B_ATOMICS 0
-#define USE_ATOMIC_COMPILER_BUILTINS 0
/* XXX Is this actually correct? */
#define ATOMIC_EXCHANGE_USES_CAS 1
-#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
- (abort (), (__typeof (*mem)) 0)
-#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
- (abort (), (__typeof (*mem)) 0)
-#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
- (abort (), (__typeof (*mem)) 0)
-
-#define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \
- (abort (), 0)
-#define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \
- (abort (), 0)
-#define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
- (abort (), 0)
-
-#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
- ({ \
- register int r2 asm ("r2"); \
- register int* r4 asm ("r4") = (int*)(mem); \
- register int r5 asm ("r5"); \
- register int r6 asm ("r6") = (int)(newval); \
- int retval, orig_oldval = (int)(oldval); \
- long kernel_cmpxchg = 0x1004; \
- while (1) \
- { \
- r5 = *r4; \
- if (r5 != orig_oldval) \
- { \
- retval = r5; \
- break; \
- } \
- asm volatile ("callr %1\n" \
- : "=r" (r2) \
- : "r" (kernel_cmpxchg), "r" (r4), "r" (r5), "r" (r6) \
- : "ra", "memory"); \
- if (!r2) { retval = orig_oldval; break; } \
- } \
- (__typeof (*(mem))) retval; \
- })
-
-#define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
- ({ \
- register int r2 asm ("r2"); \
- register int *r4 asm ("r4") = (int*)(mem); \
- register int r5 asm ("r5") = (int)(oldval); \
- register int r6 asm ("r6") = (int)(newval); \
- long kernel_cmpxchg = 0x1004; \
- asm volatile ("callr %1\n" \
- : "=r" (r2) \
- : "r" (kernel_cmpxchg), "r" (r4), "r" (r5), "r" (r6) \
- : "ra", "memory"); \
- r2; \
- })
-
-#define atomic_full_barrier() ({ asm volatile ("sync"); })
-
#endif /* _NIOS2_ATOMIC_MACHINE_H */
@@ -19,127 +19,11 @@
#ifndef _LINUX_RISCV_BITS_ATOMIC_H
#define _LINUX_RISCV_BITS_ATOMIC_H 1
-#define atomic_full_barrier() __sync_synchronize ()
-
#ifdef __riscv_atomic
# define __HAVE_64B_ATOMICS (__riscv_xlen >= 64)
-# define USE_ATOMIC_COMPILER_BUILTINS 1
# define ATOMIC_EXCHANGE_USES_CAS 0
-/* Compare and exchange.
- For all "bool" routines, we return FALSE if exchange succesful. */
-
-# define __arch_compare_and_exchange_bool_8_int(mem, newval, oldval, model) \
- ({ \
- typeof (*mem) __oldval = (oldval); \
- !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
- model, __ATOMIC_RELAXED); \
- })
-
-# define __arch_compare_and_exchange_bool_16_int(mem, newval, oldval, model) \
- ({ \
- typeof (*mem) __oldval = (oldval); \
- !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
- model, __ATOMIC_RELAXED); \
- })
-
-# define __arch_compare_and_exchange_bool_32_int(mem, newval, oldval, model) \
- ({ \
- typeof (*mem) __oldval = (oldval); \
- !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
- model, __ATOMIC_RELAXED); \
- })
-
-# define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \
- ({ \
- typeof (*mem) __oldval = (oldval); \
- !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
- model, __ATOMIC_RELAXED); \
- })
-
-# define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \
- ({ \
- typeof (*mem) __oldval = (oldval); \
- __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
- model, __ATOMIC_RELAXED); \
- __oldval; \
- })
-
-# define __arch_compare_and_exchange_val_16_int(mem, newval, oldval, model) \
- ({ \
- typeof (*mem) __oldval = (oldval); \
- __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
- model, __ATOMIC_RELAXED); \
- __oldval; \
- })
-
-# define __arch_compare_and_exchange_val_32_int(mem, newval, oldval, model) \
- ({ \
- typeof (*mem) __oldval = (oldval); \
- __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
- model, __ATOMIC_RELAXED); \
- __oldval; \
- })
-
-# define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \
- ({ \
- typeof (*mem) __oldval = (oldval); \
- __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \
- model, __ATOMIC_RELAXED); \
- __oldval; \
- })
-
-/* Atomic compare and exchange. */
-
-# define atomic_compare_and_exchange_bool_acq(mem, new, old) \
- __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \
- mem, new, old, __ATOMIC_ACQUIRE)
-
-# define atomic_compare_and_exchange_val_acq(mem, new, old) \
- __atomic_val_bysize (__arch_compare_and_exchange_val, int, \
- mem, new, old, __ATOMIC_ACQUIRE)
-
-# define atomic_compare_and_exchange_val_rel(mem, new, old) \
- __atomic_val_bysize (__arch_compare_and_exchange_val, int, \
- mem, new, old, __ATOMIC_RELEASE)
-
-/* Atomic exchange (without compare). */
-
-# define __arch_exchange_8_int(mem, newval, model) \
- __atomic_exchange_n (mem, newval, model)
-
-# define __arch_exchange_16_int(mem, newval, model) \
- __atomic_exchange_n (mem, newval, model)
-
-# define __arch_exchange_32_int(mem, newval, model) \
- __atomic_exchange_n (mem, newval, model)
-
-# define __arch_exchange_64_int(mem, newval, model) \
- __atomic_exchange_n (mem, newval, model)
-
-/* Atomically add value and return the previous (unincremented) value. */
-
-# define __arch_exchange_and_add_8_int(mem, value, model) \
- __atomic_fetch_add (mem, value, model)
-
-# define __arch_exchange_and_add_16_int(mem, value, model) \
- __atomic_fetch_add (mem, value, model)
-
-# define __arch_exchange_and_add_32_int(mem, value, model) \
- __atomic_fetch_add (mem, value, model)
-
-# define __arch_exchange_and_add_64_int(mem, value, model) \
- __atomic_fetch_add (mem, value, model)
-
-# define atomic_exchange_and_add_acq(mem, value) \
- __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \
- __ATOMIC_ACQUIRE)
-
-# define atomic_exchange_and_add_rel(mem, value) \
- __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \
- __ATOMIC_RELEASE)
-
/* Miscellaneous. */
# define asm_amo(which, ordering, mem, value) ({ \
@@ -17,136 +17,6 @@
<https://www.gnu.org/licenses/>. */
#define __HAVE_64B_ATOMICS 0
-#define USE_ATOMIC_COMPILER_BUILTINS 0
/* XXX Is this actually correct? */
#define ATOMIC_EXCHANGE_USES_CAS 1
-
-/* SH kernel has implemented a gUSA ("g" User Space Atomicity) support
- for the user space atomicity. The atomicity macros use this scheme.
-
- Reference:
- Niibe Yutaka, "gUSA: Simple and Efficient User Space Atomicity
- Emulation with Little Kernel Modification", Linux Conference 2002,
- Japan. http://lc.linux.or.jp/lc2002/papers/niibe0919h.pdf (in
- Japanese).
-
- B.N. Bershad, D. Redell, and J. Ellis, "Fast Mutual Exclusion for
- Uniprocessors", Proceedings of the Fifth Architectural Support for
- Programming Languages and Operating Systems (ASPLOS), pp. 223-233,
- October 1992. http://www.cs.washington.edu/homes/bershad/Papers/Rcs.ps
-
- SuperH ABI:
- r15: -(size of atomic instruction sequence) < 0
- r0: end point
- r1: saved stack pointer
-*/
-
-#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
- ({ __typeof (*(mem)) __result; \
- __asm __volatile ("\
- mova 1f,r0\n\
- .align 2\n\
- mov r15,r1\n\
- mov #(0f-1f),r15\n\
- 0: mov.b @%1,%0\n\
- cmp/eq %0,%3\n\
- bf 1f\n\
- mov.b %2,@%1\n\
- 1: mov r1,r15"\
- : "=&r" (__result) : "u" (mem), "u" (newval), "u" (oldval) \
- : "r0", "r1", "t", "memory"); \
- __result; })
-
-#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
- ({ __typeof (*(mem)) __result; \
- __asm __volatile ("\
- mova 1f,r0\n\
- mov r15,r1\n\
- .align 2\n\
- mov #(0f-1f),r15\n\
- mov #-8,r15\n\
- 0: mov.w @%1,%0\n\
- cmp/eq %0,%3\n\
- bf 1f\n\
- mov.w %2,@%1\n\
- 1: mov r1,r15"\
- : "=&r" (__result) : "u" (mem), "u" (newval), "u" (oldval) \
- : "r0", "r1", "t", "memory"); \
- __result; })
-
-#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
- ({ __typeof (*(mem)) __result; \
- __asm __volatile ("\
- mova 1f,r0\n\
- .align 2\n\
- mov r15,r1\n\
- mov #(0f-1f),r15\n\
- 0: mov.l @%1,%0\n\
- cmp/eq %0,%3\n\
- bf 1f\n\
- mov.l %2,@%1\n\
- 1: mov r1,r15"\
- : "=&r" (__result) : "u" (mem), "u" (newval), "u" (oldval) \
- : "r0", "r1", "t", "memory"); \
- __result; })
-
-/* XXX We do not really need 64-bit compare-and-exchange. At least
- not in the moment. Using it would mean causing portability
- problems since not many other 32-bit architectures have support for
- such an operation. So don't define any code for now. */
-
-# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
- (abort (), (__typeof (*mem)) 0)
-
-#define atomic_exchange_and_add(mem, value) \
- ({ __typeof (*(mem)) __result, __tmp, __value = (value); \
- if (sizeof (*(mem)) == 1) \
- __asm __volatile ("\
- mova 1f,r0\n\
- .align 2\n\
- mov r15,r1\n\
- mov #(0f-1f),r15\n\
- 0: mov.b @%2,%0\n\
- mov %1,r2\n\
- add %0,r2\n\
- mov.b r2,@%2\n\
- 1: mov r1,r15"\
- : "=&r" (__result), "=&r" (__tmp) : "u" (mem), "1" (__value) \
- : "r0", "r1", "r2", "memory"); \
- else if (sizeof (*(mem)) == 2) \
- __asm __volatile ("\
- mova 1f,r0\n\
- .align 2\n\
- mov r15,r1\n\
- mov #(0f-1f),r15\n\
- 0: mov.w @%2,%0\n\
- mov %1,r2\n\
- add %0,r2\n\
- mov.w r2,@%2\n\
- 1: mov r1,r15"\
- : "=&r" (__result), "=&r" (__tmp) : "u" (mem), "1" (__value) \
- : "r0", "r1", "r2", "memory"); \
- else if (sizeof (*(mem)) == 4) \
- __asm __volatile ("\
- mova 1f,r0\n\
- .align 2\n\
- mov r15,r1\n\
- mov #(0f-1f),r15\n\
- 0: mov.l @%2,%0\n\
- mov %1,r2\n\
- add %0,r2\n\
- mov.l r2,@%2\n\
- 1: mov r1,r15"\
- : "=&r" (__result), "=&r" (__tmp) : "u" (mem), "1" (__value) \
- : "r0", "r1", "r2", "memory"); \
- else \
- { \
- __typeof (mem) memp = (mem); \
- do \
- __result = *memp; \
- while (__arch_compare_and_exchange_val_64_acq \
- (memp, __result + __value, __result) == __result); \
- (void) __value; \
- } \
- __result; })
@@ -19,36 +19,19 @@
#ifndef _X86_ATOMIC_MACHINE_H
#define _X86_ATOMIC_MACHINE_H 1
-#include <stdint.h>
#include <tls.h> /* For mach. */
-#include <libc-pointer-arith.h> /* For cast_to_integer. */
-
-#define LOCK_PREFIX "lock;"
-
-#define USE_ATOMIC_COMPILER_BUILTINS 1
#ifdef __x86_64__
# define __HAVE_64B_ATOMICS 1
-# define SP_REG "rsp"
#else
/* Since the Pentium, i386 CPUs have supported 64-bit atomics, but the
i386 psABI supplement provides only 4-byte alignment for uint64_t
inside structs, so it is currently not possible to use 64-bit
atomics on this platform. */
# define __HAVE_64B_ATOMICS 0
-# define SP_REG "esp"
#endif
#define ATOMIC_EXCHANGE_USES_CAS 0
-#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
- __sync_val_compare_and_swap (mem, oldval, newval)
-#define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
- (! __sync_bool_compare_and_swap (mem, oldval, newval))
-
-/* We don't use mfence because it is supposedly slower due to having to
- provide stronger guarantees (e.g., regarding self-modifying code). */
-#define atomic_full_barrier() \
- __asm __volatile (LOCK_PREFIX "orl $0, (%%" SP_REG ")" ::: "memory")
#define atomic_read_barrier() __asm ("" ::: "memory")
#define atomic_write_barrier() __asm ("" ::: "memory")