@@ -255,23 +255,6 @@
#endif
-/* Atomically *mem &= mask. */
-#ifndef atomic_and
-# define atomic_and(mem, mask) \
- do { \
- __typeof (*(mem)) __atg15_old; \
- __typeof (mem) __atg15_memp = (mem); \
- __typeof (*(mem)) __atg15_mask = (mask); \
- \
- do \
- __atg15_old = (*__atg15_memp); \
- while (__builtin_expect \
- (atomic_compare_and_exchange_bool_acq (__atg15_memp, \
- __atg15_old & __atg15_mask, \
- __atg15_old), 0)); \
- } while (0)
-#endif
-
/* Atomically *mem &= mask and return the old value of *mem. */
#ifndef atomic_and_val
# define atomic_and_val(mem, mask) \
@@ -289,23 +272,6 @@
__atg16_old; })
#endif
-/* Atomically *mem |= mask and return the old value of *mem. */
-#ifndef atomic_or
-# define atomic_or(mem, mask) \
- do { \
- __typeof (*(mem)) __atg17_old; \
- __typeof (mem) __atg17_memp = (mem); \
- __typeof (*(mem)) __atg17_mask = (mask); \
- \
- do \
- __atg17_old = (*__atg17_memp); \
- while (__builtin_expect \
- (atomic_compare_and_exchange_bool_acq (__atg17_memp, \
- __atg17_old | __atg17_mask, \
- __atg17_old), 0)); \
- } while (0)
-#endif
-
/* Atomically *mem |= mask and return the old value of *mem. */
#ifndef atomic_or_val
# define atomic_or_val(mem, mask) \
@@ -539,7 +539,7 @@ start_thread (void *arg)
# endif
this->__list.__next = NULL;
- atomic_or (&this->__lock, FUTEX_OWNER_DIED);
+ atomic_fetch_or_acquire (&this->__lock, FUTEX_OWNER_DIED);
futex_wake ((unsigned int *) &this->__lock, 1,
/* XYZ */ FUTEX_SHARED);
}
@@ -462,7 +462,7 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex)
if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
{
- atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
+ atomic_fetch_and_acquire (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
/* We got the mutex. */
mutex->__data.__count = 1;
@@ -392,7 +392,7 @@ __pthread_mutex_clocklock_common (pthread_mutex_t *mutex,
if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
{
- atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
+ atomic_fetch_and_acquire (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
/* We got the mutex. */
mutex->__data.__count = 1;
@@ -308,7 +308,7 @@ ___pthread_mutex_trylock (pthread_mutex_t *mutex)
if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
{
- atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
+ atomic_fetch_and_acquire (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
/* We got the mutex. */
mutex->__data.__count = 1;
@@ -80,27 +80,3 @@
# define atomic_exchange_and_add_rel(mem, operand) \
({ __atomic_check_size((mem)); \
__atomic_fetch_add ((mem), (operand), __ATOMIC_RELEASE); })
-
-/* Atomically *mem |= mask and return the old value of *mem. */
-/* The gcc builtin uses load-and-or instruction on z196 zarch and higher cpus
- instead of a loop with compare-and-swap instruction. */
-#define atomic_or_val(mem, operand) \
- ({ __atomic_check_size((mem)); \
- __atomic_fetch_or ((mem), (operand), __ATOMIC_ACQUIRE); })
-/* Atomically *mem |= mask. */
-#define atomic_or(mem, mask) \
- do { \
- atomic_or_val (mem, mask); \
- } while (0)
-
-/* Atomically *mem &= mask and return the old value of *mem. */
-/* The gcc builtin uses load-and-and instruction on z196 zarch and higher cpus
- instead of a loop with compare-and-swap instruction. */
-#define atomic_and_val(mem, operand) \
- ({ __atomic_check_size((mem)); \
- __atomic_fetch_and ((mem), (operand), __ATOMIC_ACQUIRE); })
-/* Atomically *mem &= mask. */
-#define atomic_and(mem, mask) \
- do { \
- atomic_and_val (mem, mask); \
- } while (0)
@@ -292,54 +292,6 @@
__result; })
-#define __arch_and_body(lock, mem, mask) \
- do { \
- if (sizeof (*mem) == 1) \
- __asm __volatile (lock "andb %b1, %0" \
- : "=m" (*mem) \
- : IBR_CONSTRAINT (mask), "m" (*mem)); \
- else if (sizeof (*mem) == 2) \
- __asm __volatile (lock "andw %w1, %0" \
- : "=m" (*mem) \
- : "ir" (mask), "m" (*mem)); \
- else if (sizeof (*mem) == 4) \
- __asm __volatile (lock "andl %1, %0" \
- : "=m" (*mem) \
- : "ir" (mask), "m" (*mem)); \
- else if (__HAVE_64B_ATOMICS) \
- __asm __volatile (lock "andq %q1, %0" \
- : "=m" (*mem) \
- : "ir" (mask), "m" (*mem)); \
- else \
- __atomic_link_error (); \
- } while (0)
-
-#define atomic_and(mem, mask) __arch_and_body (LOCK_PREFIX, mem, mask)
-
-#define __arch_or_body(lock, mem, mask) \
- do { \
- if (sizeof (*mem) == 1) \
- __asm __volatile (lock "orb %b1, %0" \
- : "=m" (*mem) \
- : IBR_CONSTRAINT (mask), "m" (*mem)); \
- else if (sizeof (*mem) == 2) \
- __asm __volatile (lock "orw %w1, %0" \
- : "=m" (*mem) \
- : "ir" (mask), "m" (*mem)); \
- else if (sizeof (*mem) == 4) \
- __asm __volatile (lock "orl %1, %0" \
- : "=m" (*mem) \
- : "ir" (mask), "m" (*mem)); \
- else if (__HAVE_64B_ATOMICS) \
- __asm __volatile (lock "orq %q1, %0" \
- : "=m" (*mem) \
- : "ir" (mask), "m" (*mem)); \
- else \
- __atomic_link_error (); \
- } while (0)
-
-#define atomic_or(mem, mask) __arch_or_body (LOCK_PREFIX, mem, mask)
-
/* We don't use mfence because it is supposedly slower due to having to
provide stronger guarantees (e.g., regarding self-modifying code). */
#define atomic_full_barrier() \