@@ -74,7 +74,7 @@ __assert_fail_base (const char *fmt, const char *assertion, const char *file,
/* We have to free the old buffer since the application might
catch the SIGABRT signal. */
- struct abort_msg_s *old = atomic_exchange_acq (&__abort_msg, buf);
+ struct abort_msg_s *old = atomic_exchange_acquire (&__abort_msg, buf);
if (old != NULL)
__munmap (old, old->size);
@@ -89,7 +89,7 @@ __lll_abstimed_lock (void *ptr,
while (1)
{
- if (atomic_exchange_acq ((int *)ptr, 2) == 0)
+ if (atomic_exchange_acquire ((int *)ptr, 2) == 0)
return 0;
else if (! valid_nanoseconds (tsp->tv_nsec))
return EINVAL;
@@ -78,7 +78,7 @@ extern kern_return_t __gsync_wait_intr
|| atomic_compare_and_exchange_bool_acq (__iptr, 1, 0) != 0) \
while (1) \
{ \
- if (atomic_exchange_acq (__iptr, 2) == 0) \
+ if (atomic_exchange_acquire (__iptr, 2) == 0) \
break; \
__lll_wait (__iptr, 2, __flags); \
} \
@@ -102,7 +102,7 @@ extern kern_return_t __gsync_wait_intr
#define __lll_unlock(ptr, flags) \
({ \
int *__iptr = (int *)(ptr); \
- if (atomic_exchange_rel (__iptr, 0) == 2) \
+ if (atomic_exchange_release (__iptr, 0) == 2) \
__lll_wake (__iptr, (flags)); \
(void)0; \
})
@@ -4743,7 +4743,7 @@ static void malloc_consolidate(mstate av)
maxfb = &fastbin (av, NFASTBINS - 1);
fb = &fastbin (av, 0);
do {
- p = atomic_exchange_acq (fb, NULL);
+ p = atomic_exchange_acquire (fb, NULL);
if (p != 0) {
do {
{
@@ -2538,7 +2538,7 @@ aiocb64}, since the LFS transparently replaces the old interface.
@c munmap ok
@c THREAD_COPY_STACK_GUARD ok
@c THREAD_COPY_POINTER_GUARD ok
-@c atomic_exchange_acq ok
+@c atomic_exchange_acquire ok
@c lll_futex_wake ok
@c deallocate_stack @asulock @ascuheap @aculock @acsmem
@c lll_lock (state_cache_lock) @asulock @aculock
@@ -2570,7 +2570,7 @@ aiocb64}, since the LFS transparently replaces the old interface.
@c atomic_increment ok
@c clone ok
@c atomic_decrement ok
-@c atomic_exchange_acq ok
+@c atomic_exchange_acquire ok
@c lll_futex_wake ok
@c deallocate_stack dup
@c sched_setaffinity ok
@@ -2590,7 +2590,7 @@ aiocb64}, since the LFS transparently replaces the old interface.
@c start_thread ok
@c HP_TIMING_NOW ok
@c ctype_init @mtslocale
-@c atomic_exchange_acq ok
+@c atomic_exchange_acquire ok
@c lll_futex_wake ok
@c sigemptyset ok
@c sigaddset ok
@@ -451,7 +451,7 @@ this function is in @file{stdlib.h}.
@c clear_fastchunks ok
@c unsorted_chunks dup ok
@c fastbin dup ok
-@c atomic_exchange_acq ok
+@c atomic_exchange_acquire ok
@c check_inuse_chunk dup ok/disabled
@c chunk_at_offset dup ok
@c chunksize dup ok
@@ -96,14 +96,6 @@ do_test (void)
ret = 1;
}
- mem = 64;
- if (atomic_exchange_acq (&mem, 31) != 64
- || mem != 31)
- {
- puts ("atomic_exchange_acq test failed");
- ret = 1;
- }
-
mem = 2;
if (atomic_exchange_and_add (&mem, 11) != 2
|| mem != 13)
@@ -415,7 +415,7 @@ start_thread (void *arg)
unwind_buf.priv.data.cleanup = NULL;
/* Allow setxid from now onwards. */
- if (__glibc_unlikely (atomic_exchange_acq (&pd->setxid_futex, 0) == -2))
+ if (__glibc_unlikely (atomic_exchange_acquire (&pd->setxid_futex, 0) == -2))
futex_wake (&pd->setxid_futex, 1, FUTEX_PRIVATE);
if (__glibc_likely (! not_first_call))
@@ -171,7 +171,7 @@ __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
The unlock operation must be the last access to the mutex to not
violate the mutex destruction requirements (see __lll_unlock). */
private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
- if (__glibc_unlikely ((atomic_exchange_rel (&mutex->__data.__lock, 0)
+ if (__glibc_unlikely ((atomic_exchange_release (&mutex->__data.__lock, 0)
& FUTEX_WAITERS) != 0))
futex_wake ((unsigned int *) &mutex->__data.__lock, 1, private);
@@ -103,28 +103,6 @@
__atomic_val_bysize (__arch_compare_and_exchange_val, int, \
mem, new, old, __ATOMIC_RELEASE)
-
-/* Atomic exchange (without compare). */
-
-# define __arch_exchange_8_int(mem, newval, model) \
- __atomic_exchange_n (mem, newval, model)
-
-# define __arch_exchange_16_int(mem, newval, model) \
- __atomic_exchange_n (mem, newval, model)
-
-# define __arch_exchange_32_int(mem, newval, model) \
- __atomic_exchange_n (mem, newval, model)
-
-# define __arch_exchange_64_int(mem, newval, model) \
- __atomic_exchange_n (mem, newval, model)
-
-# define atomic_exchange_acq(mem, value) \
- __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_ACQUIRE)
-
-# define atomic_exchange_rel(mem, value) \
- __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_RELEASE)
-
-
/* Atomically add value and return the previous (unincremented) value. */
# define __arch_exchange_and_add_8_int(mem, value, model) \
@@ -98,7 +98,7 @@ typedef struct
# define THREAD_GSCOPE_RESET_FLAG() \
do \
{ int __res \
- = atomic_exchange_rel (&THREAD_SELF->header.gscope_flag, \
+ = atomic_exchange_release (&THREAD_SELF->header.gscope_flag, \
THREAD_GSCOPE_FLAG_UNUSED); \
if (__res == THREAD_GSCOPE_FLAG_WAIT) \
lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE); \
@@ -95,7 +95,7 @@ typedef struct
#define THREAD_GSCOPE_RESET_FLAG() \
do \
{ int __res \
- = atomic_exchange_rel (&THREAD_SELF->header.gscope_flag, \
+ = atomic_exchange_release (&THREAD_SELF->header.gscope_flag, \
THREAD_GSCOPE_FLAG_UNUSED); \
if (__res == THREAD_GSCOPE_FLAG_WAIT) \
lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE); \
@@ -103,7 +103,7 @@ typedef struct
#define THREAD_GSCOPE_RESET_FLAG() \
do \
{ int __res \
- = atomic_exchange_rel (&THREAD_SELF->header.gscope_flag, \
+ = atomic_exchange_release (&THREAD_SELF->header.gscope_flag, \
THREAD_GSCOPE_FLAG_UNUSED); \
if (__res == THREAD_GSCOPE_FLAG_WAIT) \
lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE); \
@@ -89,7 +89,7 @@ typedef struct
#define THREAD_GSCOPE_RESET_FLAG() \
do \
{ int __res \
- = atomic_exchange_rel (&THREAD_SELF->header.gscope_flag, \
+ = atomic_exchange_release (&THREAD_SELF->header.gscope_flag, \
THREAD_GSCOPE_FLAG_UNUSED); \
if (__res == THREAD_GSCOPE_FLAG_WAIT) \
lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE); \
@@ -118,7 +118,7 @@ typedef struct
# define THREAD_GSCOPE_RESET_FLAG() \
do \
{ int __res \
- = atomic_exchange_rel (&THREAD_SELF->header.gscope_flag, \
+ = atomic_exchange_release (&THREAD_SELF->header.gscope_flag, \
THREAD_GSCOPE_FLAG_UNUSED); \
if (__res == THREAD_GSCOPE_FLAG_WAIT) \
lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE); \
@@ -23,7 +23,7 @@ __pthread_spin_init (pthread_spinlock_t *lock, int pshared)
{
/* CONCURRENCTY NOTES:
- The atomic_exchange_rel synchronizes-with the atomic_exhange_acq in
+ The atomic_exchange_release synchronizes-with the atomic_exhange_acq in
pthread_spin_lock.
On hppa we must not use a plain `stw` to reset the guard lock. This
@@ -40,7 +40,7 @@ __pthread_spin_init (pthread_spinlock_t *lock, int pshared)
Therefore if a variable is used in an atomic macro it must always be
manipulated with atomic macros in order for memory ordering rules to
be preserved. */
- atomic_exchange_rel (lock, 0);
+ atomic_exchange_release (lock, 0);
return 0;
}
versioned_symbol (libc, __pthread_spin_init, pthread_spin_init,
@@ -23,7 +23,7 @@ __pthread_spin_unlock (pthread_spinlock_t *lock)
{
/* CONCURRENCTY NOTES:
- The atomic_exchange_rel synchronizes-with the atomic_exhange_acq in
+ The atomic_exchange_release synchronizes-with the atomic_exhange_acq in
pthread_spin_lock.
On hppa we must not use a plain `stw` to reset the guard lock. This
@@ -40,7 +40,7 @@ __pthread_spin_unlock (pthread_spinlock_t *lock)
Therefore if a variable is used in an atomic macro it must always be
manipulated with atomic macros in order for memory ordering rules to
be preserved. */
- atomic_exchange_rel (lock, 0);
+ atomic_exchange_release (lock, 0);
return 0;
}
versioned_symbol (libc, __pthread_spin_unlock, pthread_spin_unlock,
@@ -123,7 +123,7 @@ static inline void __set_cr27(struct pthread *cr27)
#define THREAD_GSCOPE_RESET_FLAG() \
do \
{ int __res \
- = atomic_exchange_rel (&THREAD_SELF->header.gscope_flag, \
+ = atomic_exchange_release (&THREAD_SELF->header.gscope_flag, \
THREAD_GSCOPE_FLAG_UNUSED); \
if (__res == THREAD_GSCOPE_FLAG_WAIT) \
lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE); \
@@ -147,7 +147,7 @@ register struct pthread *__thread_self __asm__("r13");
#define THREAD_GSCOPE_RESET_FLAG() \
do \
{ int __res \
- = atomic_exchange_rel (&THREAD_SELF->header.gscope_flag, \
+ = atomic_exchange_release (&THREAD_SELF->header.gscope_flag, \
THREAD_GSCOPE_FLAG_UNUSED); \
if (__res == THREAD_GSCOPE_FLAG_WAIT) \
lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE); \
@@ -122,7 +122,7 @@ extern void * __m68k_read_tp (void);
#define THREAD_GSCOPE_RESET_FLAG() \
do \
{ int __res \
- = atomic_exchange_rel (&THREAD_SELF->header.gscope_flag, \
+ = atomic_exchange_release (&THREAD_SELF->header.gscope_flag, \
THREAD_GSCOPE_FLAG_UNUSED); \
if (__res == THREAD_GSCOPE_FLAG_WAIT) \
lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE); \
@@ -100,7 +100,7 @@ typedef struct
# define THREAD_GSCOPE_RESET_FLAG() \
do \
{ int __res \
- = atomic_exchange_rel (&THREAD_SELF->header.gscope_flag, \
+ = atomic_exchange_release (&THREAD_SELF->header.gscope_flag, \
THREAD_GSCOPE_FLAG_UNUSED); \
if (__res == THREAD_GSCOPE_FLAG_WAIT) \
lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE); \
@@ -150,7 +150,7 @@ typedef struct
#define THREAD_GSCOPE_RESET_FLAG() \
do \
{ int __res \
- = atomic_exchange_rel (&THREAD_SELF->header.gscope_flag, \
+ = atomic_exchange_release (&THREAD_SELF->header.gscope_flag, \
THREAD_GSCOPE_FLAG_UNUSED); \
if (__res == THREAD_GSCOPE_FLAG_WAIT) \
lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE); \
@@ -130,7 +130,7 @@ register struct pthread *__thread_self __asm__("r23");
#define THREAD_GSCOPE_RESET_FLAG() \
do \
{ int __res \
- = atomic_exchange_rel (&THREAD_SELF->header.gscope_flag, \
+ = atomic_exchange_release (&THREAD_SELF->header.gscope_flag, \
THREAD_GSCOPE_FLAG_UNUSED); \
if (__res == THREAD_GSCOPE_FLAG_WAIT) \
lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE); \
@@ -319,7 +319,7 @@ __futex_clocklock64 (int *futex, clockid_t clockid,
{
if (__glibc_unlikely (atomic_compare_and_exchange_bool_acq (futex, 1, 0)))
{
- while (atomic_exchange_acq (futex, 2) != 0)
+ while (atomic_exchange_acquire (futex, 2) != 0)
{
int err = 0;
err = __futex_abstimed_wait64 ((unsigned int *) futex, 2, clockid,
@@ -119,7 +119,7 @@ libc_hidden_proto (__lll_lock_wait)
((void) \
({ \
int *__futex = (futex); \
- if (__glibc_unlikely (atomic_exchange_acq (__futex, 2) != 0)) \
+ if (__glibc_unlikely (atomic_exchange_acquire (__futex, 2) != 0)) \
__lll_lock_wait (__futex, private); \
}))
#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
@@ -147,7 +147,7 @@ libc_hidden_proto (__lll_lock_wake)
({ \
int *__futex = (futex); \
int __private = (private); \
- int __oldval = atomic_exchange_rel (__futex, 0); \
+ int __oldval = atomic_exchange_release (__futex, 0); \
if (__glibc_unlikely (__oldval > 1)) \
{ \
if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
@@ -164,7 +164,7 @@ register tcbhead_t *__thread_self __asm__("r10");
#define THREAD_GSCOPE_RESET_FLAG() \
do \
{ \
- int __res = atomic_exchange_rel (&THREAD_SELF->header.gscope_flag,\
+ int __res = atomic_exchange_release (&THREAD_SELF->header.gscope_flag,\
THREAD_GSCOPE_FLAG_UNUSED); \
if (__res == THREAD_GSCOPE_FLAG_WAIT) \
lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, \
@@ -140,8 +140,8 @@ __libc_message (enum __libc_message_action action, const char *fmt, ...)
/* We have to free the old buffer since the application might
catch the SIGABRT signal. */
- struct abort_msg_s *old = atomic_exchange_acq (&__abort_msg,
- buf);
+ struct abort_msg_s *old =
+ atomic_exchange_acquire (&__abort_msg, buf);
if (old != NULL)
__munmap (old, old->size);
}
@@ -214,7 +214,7 @@ typedef struct
#define THREAD_GSCOPE_RESET_FLAG() \
do \
{ int __res \
- = atomic_exchange_rel (&THREAD_SELF->header.gscope_flag, \
+ = atomic_exchange_release (&THREAD_SELF->header.gscope_flag, \
THREAD_GSCOPE_FLAG_UNUSED); \
if (__res == THREAD_GSCOPE_FLAG_WAIT) \
lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE); \
@@ -113,7 +113,7 @@ typedef struct
# define THREAD_GSCOPE_RESET_FLAG() \
do \
{ int __res \
- = atomic_exchange_rel (&THREAD_SELF->header.gscope_flag, \
+ = atomic_exchange_release (&THREAD_SELF->header.gscope_flag, \
THREAD_GSCOPE_FLAG_UNUSED); \
if (__res == THREAD_GSCOPE_FLAG_WAIT) \
lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE); \
@@ -71,14 +71,6 @@
1, __ATOMIC_ACQUIRE, \
__ATOMIC_RELAXED); })
-/* Store NEWVALUE in *MEM and return the old value. */
-#define atomic_exchange_acq(mem, newvalue) \
- ({ __atomic_check_size((mem)); \
- __atomic_exchange_n (mem, newvalue, __ATOMIC_ACQUIRE); })
-#define atomic_exchange_rel(mem, newvalue) \
- ({ __atomic_check_size((mem)); \
- __atomic_exchange_n (mem, newvalue, __ATOMIC_RELEASE); })
-
/* Add VALUE to *MEM and return the old value of *MEM. */
/* The gcc builtin uses load-and-add instruction on z196 zarch and higher cpus
instead of a loop with compare-and-swap instruction. */
@@ -157,7 +157,7 @@ typedef struct
#define THREAD_GSCOPE_RESET_FLAG() \
do \
{ int __res \
- = atomic_exchange_rel (&THREAD_SELF->header.gscope_flag, \
+ = atomic_exchange_release (&THREAD_SELF->header.gscope_flag, \
THREAD_GSCOPE_FLAG_UNUSED); \
if (__res == THREAD_GSCOPE_FLAG_WAIT) \
lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE); \
@@ -129,7 +129,7 @@ typedef struct
#define THREAD_GSCOPE_RESET_FLAG() \
do \
{ int __res \
- = atomic_exchange_rel (&THREAD_SELF->header.gscope_flag, \
+ = atomic_exchange_release (&THREAD_SELF->header.gscope_flag, \
THREAD_GSCOPE_FLAG_UNUSED); \
if (__res == THREAD_GSCOPE_FLAG_WAIT) \
lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE); \
@@ -130,7 +130,7 @@ register struct pthread *__thread_self __asm__("%g7");
#define THREAD_GSCOPE_RESET_FLAG() \
do \
{ int __res \
- = atomic_exchange_rel (&THREAD_SELF->header.gscope_flag, \
+ = atomic_exchange_release (&THREAD_SELF->header.gscope_flag, \
THREAD_GSCOPE_FLAG_UNUSED); \
if (__res == THREAD_GSCOPE_FLAG_WAIT) \
lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE); \
@@ -118,12 +118,6 @@
# define __arch_exchange_64_int(mem, newval, model) \
__atomic_exchange_n (mem, newval, model)
-# define atomic_exchange_acq(mem, value) \
- __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_ACQUIRE)
-
-# define atomic_exchange_rel(mem, value) \
- __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_RELEASE)
-
/* Atomically add value and return the previous (unincremented) value. */
# define __arch_exchange_and_add_8_int(mem, value, model) \
@@ -95,34 +95,6 @@
#endif
-/* Note that we need no lock prefix. */
-#define atomic_exchange_acq(mem, newvalue) \
- ({ __typeof (*mem) result; \
- if (sizeof (*mem) == 1) \
- __asm __volatile ("xchgb %b0, %1" \
- : "=q" (result), "=m" (*mem) \
- : "0" (newvalue), "m" (*mem)); \
- else if (sizeof (*mem) == 2) \
- __asm __volatile ("xchgw %w0, %1" \
- : "=r" (result), "=m" (*mem) \
- : "0" (newvalue), "m" (*mem)); \
- else if (sizeof (*mem) == 4) \
- __asm __volatile ("xchgl %0, %1" \
- : "=r" (result), "=m" (*mem) \
- : "0" (newvalue), "m" (*mem)); \
- else if (__HAVE_64B_ATOMICS) \
- __asm __volatile ("xchgq %q0, %1" \
- : "=r" (result), "=m" (*mem) \
- : "0" ((int64_t) cast_to_integer (newvalue)), \
- "m" (*mem)); \
- else \
- { \
- result = 0; \
- __atomic_link_error (); \
- } \
- result; })
-
-
#define __arch_exchange_and_add_body(lock, pfx, mem, value) \
({ __typeof (*mem) __result; \
__typeof (value) __addval = (value); \