@@ -262,7 +262,7 @@ failed_starting:
}
__pthread_setid (pthread->thread, NULL);
- atomic_decrement (&__pthread_total);
+ atomic_fetch_add_relaxed (&__pthread_total, -1);
failed_sigstate:
__pthread_sigstate_destroy (pthread);
failed_setup:
@@ -33,7 +33,7 @@ extern pthread_mutex_t __pthread_free_threads_lock;
void
__pthread_dealloc (struct __pthread *pthread)
{
- if (!atomic_decrement_and_test (&pthread->nr_refs))
+ if (atomic_fetch_add_relaxed (&pthread->nr_refs, -1) != 1)
return;
/* Withdraw this thread from the thread ID lookup table. */
@@ -50,7 +50,7 @@ __pthread_exit (void *status)
/* Decrease the number of threads. We use an atomic operation to
make sure that only the last thread calls `exit'. */
- if (atomic_decrement_and_test (&__pthread_total))
+ if (atomic_fetch_add_relaxed (&__pthread_total, -1) == 1)
/* We are the last thread. */
exit (0);
@@ -182,27 +182,6 @@
} while (0)
#endif
-#ifndef atomic_add
-# define atomic_add(mem, value) (void) atomic_exchange_and_add ((mem), (value))
-#endif
-
-
-#ifndef atomic_decrement
-# define atomic_decrement(mem) atomic_add ((mem), -1)
-#endif
-
-
-#ifndef atomic_decrement_val
-# define atomic_decrement_val(mem) (atomic_exchange_and_add ((mem), -1) - 1)
-#endif
-
-
-/* Subtract 1 from *MEM and return true iff it's now zero. */
-#ifndef atomic_decrement_and_test
-# define atomic_decrement_and_test(mem) \
- (atomic_exchange_and_add ((mem), -1) == 1)
-#endif
-
/* Decrement *MEM if it is > 0, and return the old value. */
#ifndef atomic_decrement_if_positive
@@ -89,7 +89,7 @@ by @theglibc{}.
@c
@c Given the use atomic operations this function seems
@c to be AS-safe. It is AC-unsafe because there is still
-@c a window between atomic_decrement and the pthread_push
+@c a window between atomic_fetch_add_relaxed and the pthread_push
@c of the handler that undoes that operation. A cancellation
@c at that point would fail to remove the process from the
@c waiters count.
@@ -2569,7 +2569,7 @@ aiocb64}, since the LFS transparently replaces the old interface.
@c lll_lock (pd->lock) @asulock @aculock
@c atomic_fetch_add_relaxed ok
@c clone ok
-@c atomic_decrement ok
+@c atomic_fetch_add_relaxed ok
@c atomic_exchange_acquire ok
@c lll_futex_wake ok
@c deallocate_stack dup
@@ -2614,7 +2614,7 @@ aiocb64}, since the LFS transparently replaces the old interface.
@c free @ascuheap @acsmem
@c libc_thread_freeres
@c libc_thread_subfreeres ok
-@c atomic_decrement_and_test ok
+@c atomic_fetch_add_relaxed ok
@c td_eventword ok
@c td_eventmask ok
@c atomic_compare_exchange_bool_acq ok
@@ -397,7 +397,7 @@ this function is in @file{stdlib.h}.
@c mutex_lock (list_lock) dup @asulock @aculock
@c atomic_write_barrier ok
@c mutex_unlock (list_lock) @aculock
-@c atomic_decrement ok
+@c atomic_fetch_add_relaxed ok
@c reused_arena @asulock @aculock
@c reads&writes next_to_use and iterates over arena next without guards
@c those are harmless as long as we don't drop arenas from the
@@ -96,68 +96,6 @@ do_test (void)
ret = 1;
}
- mem = 2;
- if (atomic_exchange_and_add (&mem, 11) != 2
- || mem != 13)
- {
- puts ("atomic_exchange_and_add test failed");
- ret = 1;
- }
-
- mem = 2;
- if (atomic_exchange_and_add_acq (&mem, 11) != 2
- || mem != 13)
- {
- puts ("atomic_exchange_and_add test failed");
- ret = 1;
- }
-
- mem = 2;
- if (atomic_exchange_and_add_rel (&mem, 11) != 2
- || mem != 13)
- {
- puts ("atomic_exchange_and_add test failed");
- ret = 1;
- }
-
- mem = 17;
- atomic_decrement (&mem);
- if (mem != 16)
- {
- puts ("atomic_decrement test failed");
- ret = 1;
- }
-
- if (atomic_decrement_val (&mem) != 15)
- {
- puts ("atomic_decrement_val test failed");
- ret = 1;
- }
-
- mem = 0;
- if (atomic_decrement_and_test (&mem)
- || mem != -1)
- {
- puts ("atomic_decrement_and_test test 1 failed");
- ret = 1;
- }
-
- mem = 15;
- if (atomic_decrement_and_test (&mem)
- || mem != 14)
- {
- puts ("atomic_decrement_and_test test 2 failed");
- ret = 1;
- }
-
- mem = 1;
- if (! atomic_decrement_and_test (&mem)
- || mem != 0)
- {
- puts ("atomic_decrement_and_test test 3 failed");
- ret = 1;
- }
-
mem = 1;
if (atomic_decrement_if_positive (&mem) != 1
|| mem != 0)
@@ -24,7 +24,7 @@ cons (void *arg)
do
{
- if (atomic_decrement_and_test (&ntogo))
+ if (atomic_fetch_add_relaxed (&ntogo, -1) == 1)
{
pthread_mutex_lock (&mut2);
alldone = true;
@@ -88,7 +88,7 @@ __nptl_setxid_sighandler (int sig, siginfo_t *si, void *ctx)
self->setxid_futex = 1;
futex_wake (&self->setxid_futex, 1, FUTEX_PRIVATE);
- if (atomic_decrement_val (&xidcmd->cntr) == 0)
+ if (atomic_fetch_add_relaxed (&xidcmd->cntr, -1) == 1)
futex_wake ((unsigned int *) &xidcmd->cntr, 1, FUTEX_PRIVATE);
}
libc_hidden_def (__nptl_setxid_sighandler)
@@ -489,7 +489,7 @@ start_thread (void *arg)
the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
atomic_fetch_or_acquire (&pd->cancelhandling, 1 << EXITING_BIT);
- if (__glibc_unlikely (atomic_decrement_and_test (&__nptl_nthreads)))
+ if (__glibc_unlikely (atomic_fetch_add_relaxed (&__nptl_nthreads, -1) == 1))
/* This was the last thread. */
exit (0);
@@ -861,7 +861,7 @@ __pthread_create_2_1 (pthread_t *newthread, const pthread_attr_t *attr,
NOTES above). */
/* Oops, we lied for a second. */
- atomic_decrement (&__nptl_nthreads);
+ atomic_fetch_add_relaxed (&__nptl_nthreads, -1);
/* Free the resources. */
__nptl_deallocate_stack (pd);
@@ -421,7 +421,7 @@ __nscd_drop_map_ref (struct mapped_database *map, int *gc_cycle)
return -1;
}
- if (atomic_decrement_val (&map->counter) == 0)
+ if (atomic_fetch_add_relaxed (&map->counter, -1) == 1)
__nscd_unmap (map);
}
@@ -198,7 +198,7 @@ __nscd_getai (const char *key, struct nscd_ai_result **result, int *h_errnop)
if ((gc_cycle & 1) != 0 || ++nretries == 5 || retval == -1)
{
/* nscd is just running gc now. Disable using the mapping. */
- if (atomic_decrement_val (&mapped->counter) == 0)
+ if (atomic_fetch_add_relaxed (&mapped->counter, -1) == 1)
__nscd_unmap (mapped);
mapped = NO_MAPPING;
}
@@ -312,7 +312,7 @@ nscd_getgr_r (const char *key, size_t keylen, request_type type,
if ((gc_cycle & 1) != 0 || ++nretries == 5 || retval == -1)
{
/* nscd is just running gc now. Disable using the mapping. */
- if (atomic_decrement_val (&mapped->counter) == 0)
+ if (atomic_fetch_add_relaxed (&mapped->counter, -1) == 1)
__nscd_unmap (mapped);
mapped = NO_MAPPING;
}
@@ -440,7 +440,7 @@ nscd_gethst_r (const char *key, size_t keylen, request_type type,
if ((gc_cycle & 1) != 0 || ++nretries == 5 || retval == -1)
{
/* nscd is just running gc now. Disable using the mapping. */
- if (atomic_decrement_val (&mapped->counter) == 0)
+ if (atomic_fetch_add_relaxed (&mapped->counter, -1) == 1)
__nscd_unmap (mapped);
mapped = NO_MAPPING;
}
@@ -225,7 +225,7 @@ nscd_getpw_r (const char *key, size_t keylen, request_type type,
if ((gc_cycle & 1) != 0 || ++nretries == 5 || retval == -1)
{
/* nscd is just running gc now. Disable using the mapping. */
- if (atomic_decrement_val (&mapped->counter) == 0)
+ if (atomic_fetch_add_relaxed (&mapped->counter, -1) == 1)
__nscd_unmap (mapped);
mapped = NO_MAPPING;
}
@@ -365,7 +365,7 @@ nscd_getserv_r (const char *crit, size_t critlen, const char *proto,
if ((gc_cycle & 1) != 0 || ++nretries == 5 || retval == -1)
{
/* nscd is just running gc now. Disable using the mapping. */
- if (atomic_decrement_val (&mapped->counter) == 0)
+ if (atomic_fetch_add_relaxed (&mapped->counter, -1) == 1)
__nscd_unmap (mapped);
mapped = NO_MAPPING;
}
@@ -390,7 +390,7 @@ __nscd_get_mapping (request_type type, const char *key,
struct mapped_database *oldval = *mappedp;
*mappedp = result;
- if (oldval != NULL && atomic_decrement_val (&oldval->counter) == 0)
+ if (oldval != NULL && atomic_fetch_add_relaxed (&oldval->counter, -1) == 1)
__nscd_unmap (oldval);
return result;
@@ -166,7 +166,7 @@ __nscd_getgrouplist (const char *user, gid_t group, long int *size,
if ((gc_cycle & 1) != 0 || ++nretries == 5 || retval == -1)
{
/* nscd is just running gc now. Disable using the mapping. */
- if (atomic_decrement_val (&mapped->counter) == 0)
+ if (atomic_fetch_add_relaxed (&mapped->counter, -1) == 1)
__nscd_unmap (mapped);
mapped = NO_MAPPING;
}
@@ -148,7 +148,7 @@ __nscd_setnetgrent (const char *group, struct __netgrent *datap)
if ((gc_cycle & 1) != 0 || ++nretries == 5 || retval == -1)
{
/* nscd is just running gc now. Disable using the mapping. */
- if (atomic_decrement_val (&mapped->counter) == 0)
+ if (atomic_fetch_add_relaxed (&mapped->counter, -1) == 1)
__nscd_unmap (mapped);
mapped = NO_MAPPING;
}
@@ -272,7 +272,7 @@ __nscd_innetgr (const char *netgroup, const char *host, const char *user,
if ((gc_cycle & 1) != 0 || ++nretries == 5 || retval == -1)
{
/* nscd is just running gc now. Disable using the mapping. */
- if (atomic_decrement_val (&mapped->counter) == 0)
+ if (atomic_fetch_add_relaxed (&mapped->counter, -1) == 1)
__nscd_unmap (mapped);
mapped = NO_MAPPING;
}
@@ -103,28 +103,6 @@
__atomic_val_bysize (__arch_compare_and_exchange_val, int, \
mem, new, old, __ATOMIC_RELEASE)
-/* Atomically add value and return the previous (unincremented) value. */
-
-# define __arch_exchange_and_add_8_int(mem, value, model) \
- __atomic_fetch_add (mem, value, model)
-
-# define __arch_exchange_and_add_16_int(mem, value, model) \
- __atomic_fetch_add (mem, value, model)
-
-# define __arch_exchange_and_add_32_int(mem, value, model) \
- __atomic_fetch_add (mem, value, model)
-
-# define __arch_exchange_and_add_64_int(mem, value, model) \
- __atomic_fetch_add (mem, value, model)
-
-# define atomic_exchange_and_add_acq(mem, value) \
- __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \
- __ATOMIC_ACQUIRE)
-
-# define atomic_exchange_and_add_rel(mem, value) \
- __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \
- __ATOMIC_RELEASE)
-
/* Barrier macro. */
#define atomic_full_barrier() __sync_synchronize()
@@ -124,66 +124,3 @@
: "memory"); \
} \
__result; })
-
-#define atomic_add(mem, value) \
- (void) ({ if (sizeof (*(mem)) == 1) \
- __asm __volatile ("add%.b %1,%0" \
- : "+m" (*(mem)) \
- : "id" (value)); \
- else if (sizeof (*(mem)) == 2) \
- __asm __volatile ("add%.w %1,%0" \
- : "+m" (*(mem)) \
- : "id" (value)); \
- else if (sizeof (*(mem)) == 4) \
- __asm __volatile ("add%.l %1,%0" \
- : "+m" (*(mem)) \
- : "id" (value)); \
- else \
- { \
- __typeof (mem) __memp = (mem); \
- __typeof (*(mem)) __oldval = *__memp; \
- __typeof (*(mem)) __temp; \
- __asm __volatile ("1: move%.l %0,%1;" \
- " move%.l %R0,%R1;" \
- " add%.l %R2,%R1;" \
- " addx%.l %2,%1;" \
- " cas2%.l %0:%R0,%1:%R1,(%3):(%4);" \
- " jbne 1b" \
- : "=d" (__oldval), "=&d" (__temp) \
- : "d" ((__typeof (*(mem))) (value)), \
- "r" (__memp), "r" ((char *) __memp + 4), \
- "0" (__oldval) \
- : "memory"); \
- } \
- })
-
-#define atomic_decrement_and_test(mem) \
- ({ char __result; \
- if (sizeof (*(mem)) == 1) \
- __asm __volatile ("subq%.b %#1,%1; seq %0" \
- : "=dm" (__result), "+m" (*(mem))); \
- else if (sizeof (*(mem)) == 2) \
- __asm __volatile ("subq%.w %#1,%1; seq %0" \
- : "=dm" (__result), "+m" (*(mem))); \
- else if (sizeof (*(mem)) == 4) \
- __asm __volatile ("subq%.l %#1,%1; seq %0" \
- : "=dm" (__result), "+m" (*(mem))); \
- else \
- { \
- __typeof (mem) __memp = (mem); \
- __typeof (*(mem)) __oldval = *__memp; \
- __typeof (*(mem)) __temp; \
- __asm __volatile ("1: move%.l %1,%2;" \
- " move%.l %R1,%R2;" \
- " subq%.l %#1,%R2;" \
- " subx%.l %5,%2;" \
- " seq %0;" \
- " cas2%.l %1:%R1,%2:%R2,(%3):(%4);" \
- " jbne 1b" \
- : "=&dm" (__result), "=d" (__oldval), \
- "=&d" (__temp) \
- : "r" (__memp), "r" ((char *) __memp + 4), \
- "d" (0), "1" (__oldval) \
- : "memory"); \
- } \
- __result; })
@@ -172,42 +172,3 @@
abort (); \
__result; \
})
-
-#define __arch_atomic_decrement_val_32(mem) \
- ({ \
- __typeof (*(mem)) __val; \
- int test; \
- __asm __volatile ( \
- " addc r0, r0, r0;" \
- "1: lwx %0, %3, r0;" \
- " addic %1, r0, 0;" \
- " bnei %1, 1b;" \
- " rsubi %0, %0, 1;" \
- " swx %0, %3, r0;" \
- " addic %1, r0, 0;" \
- " bnei %1, 1b;" \
- : "=&r" (__val), \
- "=&r" (test), \
- "=m" (*mem) \
- : "r" (mem), \
- "m" (*mem) \
- : "cc", "memory"); \
- __val; \
- })
-
-#define __arch_atomic_decrement_val_64(mem) \
- (abort (), (__typeof (*mem)) 0)
-
-#define atomic_decrement_val(mem) \
- ({ \
- __typeof (*(mem)) __result; \
- if (sizeof (*(mem)) == 4) \
- __result = __arch_atomic_decrement_val_32 (mem); \
- else if (sizeof (*(mem)) == 8) \
- __result = __arch_atomic_decrement_val_64 (mem); \
- else \
- abort (); \
- __result; \
- })
-
-#define atomic_decrement(mem) ({ atomic_decrement_val (mem); (void) 0; })
@@ -65,7 +65,7 @@ __libc_start_call_main (int (*main) (int, char **, char ** MAIN_AUXVEC_DECL),
/* One less thread. Decrement the counter. If it is zero we
terminate the entire process. */
result = 0;
- if (! atomic_decrement_and_test (&__nptl_nthreads))
+ if (atomic_fetch_add_relaxed (&__nptl_nthreads, -1) != 1)
/* Not much left to do but to exit the thread, not the process. */
while (1)
INTERNAL_SYSCALL_CALL (exit, 0);
@@ -151,19 +151,6 @@
__val; \
})
-#define __arch_atomic_decrement_val_32(mem) \
- ({ \
- __typeof (*(mem)) __val; \
- __asm __volatile ("1: lwarx %0,0,%2\n" \
- " subi %0,%0,1\n" \
- " stwcx. %0,0,%2\n" \
- " bne- 1b" \
- : "=&b" (__val), "=m" (*mem) \
- : "b" (mem), "m" (*mem) \
- : "cr0", "memory"); \
- __val; \
- })
-
#define __arch_atomic_decrement_if_positive_32(mem) \
({ int __val, __tmp; \
__asm __volatile ("1: lwarx %0,0,%3\n" \
@@ -261,21 +248,6 @@
__result; \
})
-#define atomic_decrement_val(mem) \
- ({ \
- __typeof (*(mem)) __result; \
- if (sizeof (*(mem)) == 4) \
- __result = __arch_atomic_decrement_val_32 (mem); \
- else if (sizeof (*(mem)) == 8) \
- __result = __arch_atomic_decrement_val_64 (mem); \
- else \
- abort (); \
- __result; \
- })
-
-#define atomic_decrement(mem) ({ atomic_decrement_val (mem); (void) 0; })
-
-
/* Decrement *MEM if it is > 0, and return the old value. */
#define atomic_decrement_if_positive(mem) \
({ __typeof (*(mem)) __result; \
@@ -150,104 +150,3 @@
(void) __value; \
} \
__result; })
-
-#define atomic_add(mem, value) \
- (void) ({ __typeof (*(mem)) __tmp, __value = (value); \
- if (sizeof (*(mem)) == 1) \
- __asm __volatile ("\
- mova 1f,r0\n\
- mov r15,r1\n\
- .align 2\n\
- mov #(0f-1f),r15\n\
- 0: mov.b @%1,r2\n\
- add %0,r2\n\
- mov.b r2,@%1\n\
- 1: mov r1,r15"\
- : "=&r" (__tmp) : "u" (mem), "0" (__value) \
- : "r0", "r1", "r2", "memory"); \
- else if (sizeof (*(mem)) == 2) \
- __asm __volatile ("\
- mova 1f,r0\n\
- mov r15,r1\n\
- .align 2\n\
- mov #(0f-1f),r15\n\
- 0: mov.w @%1,r2\n\
- add %0,r2\n\
- mov.w r2,@%1\n\
- 1: mov r1,r15"\
- : "=&r" (__tmp) : "u" (mem), "0" (__value) \
- : "r0", "r1", "r2", "memory"); \
- else if (sizeof (*(mem)) == 4) \
- __asm __volatile ("\
- mova 1f,r0\n\
- mov r15,r1\n\
- .align 2\n\
- mov #(0f-1f),r15\n\
- 0: mov.l @%1,r2\n\
- add %0,r2\n\
- mov.l r2,@%1\n\
- 1: mov r1,r15"\
- : "=&r" (__tmp) : "u" (mem), "0" (__value) \
- : "r0", "r1", "r2", "memory"); \
- else \
- { \
- __typeof (*(mem)) oldval; \
- __typeof (mem) memp = (mem); \
- do \
- oldval = *memp; \
- while (__arch_compare_and_exchange_val_64_acq \
- (memp, oldval + __value, oldval) == oldval); \
- (void) __value; \
- } \
- })
-
-#define atomic_add_zero(mem, value) \
- ({ unsigned char __result; \
- __typeof (*(mem)) __tmp, __value = (value); \
- if (sizeof (*(mem)) == 1) \
- __asm __volatile ("\
- mova 1f,r0\n\
- mov r15,r1\n\
- .align 2\n\
- mov #(0f-1f),r15\n\
- 0: mov.b @%2,r2\n\
- add %1,r2\n\
- mov.b r2,@%2\n\
- 1: mov r1,r15\n\
- tst r2,r2\n\
- movt %0"\
- : "=r" (__result), "=&r" (__tmp) : "u" (mem), "1" (__value) \
- : "r0", "r1", "r2", "t", "memory"); \
- else if (sizeof (*(mem)) == 2) \
- __asm __volatile ("\
- mova 1f,r0\n\
- mov r15,r1\n\
- .align 2\n\
- mov #(0f-1f),r15\n\
- 0: mov.w @%2,r2\n\
- add %1,r2\n\
- mov.w r2,@%2\n\
- 1: mov r1,r15\n\
- tst r2,r2\n\
- movt %0"\
- : "=r" (__result), "=&r" (__tmp) : "u" (mem), "1" (__value) \
- : "r0", "r1", "r2", "t", "memory"); \
- else if (sizeof (*(mem)) == 4) \
- __asm __volatile ("\
- mova 1f,r0\n\
- mov r15,r1\n\
- .align 2\n\
- mov #(0f-1f),r15\n\
- 0: mov.l @%2,r2\n\
- add %1,r2\n\
- mov.l r2,@%2\n\
- 1: mov r1,r15\n\
- tst r2,r2\n\
- movt %0"\
- : "=r" (__result), "=&r" (__tmp) : "u" (mem), "1" (__value) \
- : "r0", "r1", "r2", "t", "memory"); \
- else \
- abort (); \
- __result; })
-
-#define atomic_decrement_and_test(mem) atomic_add_zero((mem), -1)
@@ -30,9 +30,6 @@
#ifdef __x86_64__
# define __HAVE_64B_ATOMICS 1
# define SP_REG "rsp"
-# define SEG_REG "fs"
-# define BR_CONSTRAINT "q"
-# define IBR_CONSTRAINT "iq"
#else
/* Since the Pentium, i386 CPUs have supported 64-bit atomics, but the
i386 psABI supplement provides only 4-byte alignment for uint64_t
@@ -40,9 +37,6 @@
atomics on this platform. */
# define __HAVE_64B_ATOMICS 0
# define SP_REG "esp"
-# define SEG_REG "gs"
-# define BR_CONSTRAINT "r"
-# define IBR_CONSTRAINT "ir"
#endif
#define ATOMIC_EXCHANGE_USES_CAS 0
@@ -51,122 +45,6 @@
#define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
(! __sync_bool_compare_and_swap (mem, oldval, newval))
-
-#ifdef __x86_64__
-# define do_exchange_and_add_val_64_acq(pfx, mem, value) 0
-# define do_add_val_64_acq(pfx, mem, value) do { } while (0)
-#else
-/* XXX We do not really need 64-bit compare-and-exchange. At least
- not in the moment. Using it would mean causing portability
- problems since not many other 32-bit architectures have support for
- such an operation. So don't define any code for now. If it is
- really going to be used the code below can be used on Intel Pentium
- and later, but NOT on i486. */
-# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
- ({ __typeof (*mem) ret = *(mem); \
- __atomic_link_error (); \
- ret = (newval); \
- ret = (oldval); \
- ret; })
-
-# define do_exchange_and_add_val_64_acq(pfx, mem, value) \
- ({ __typeof (value) __addval = (value); \
- __typeof (*mem) __result; \
- __typeof (mem) __memp = (mem); \
- __typeof (*mem) __tmpval; \
- __result = *__memp; \
- do \
- __tmpval = __result; \
- while ((__result = pfx##_compare_and_exchange_val_64_acq \
- (__memp, __result + __addval, __result)) == __tmpval); \
- __result; })
-
-# define do_add_val_64_acq(pfx, mem, value) \
- { \
- __typeof (value) __addval = (value); \
- __typeof (mem) __memp = (mem); \
- __typeof (*mem) __oldval = *__memp; \
- __typeof (*mem) __tmpval; \
- do \
- __tmpval = __oldval; \
- while ((__oldval = pfx##_compare_and_exchange_val_64_acq \
- (__memp, __oldval + __addval, __oldval)) == __tmpval); \
- }
-#endif
-
-
-#define __arch_exchange_and_add_body(lock, pfx, mem, value) \
- ({ __typeof (*mem) __result; \
- __typeof (value) __addval = (value); \
- if (sizeof (*mem) == 1) \
- __asm __volatile (lock "xaddb %b0, %1" \
- : "=q" (__result), "=m" (*mem) \
- : "0" (__addval), "m" (*mem)); \
- else if (sizeof (*mem) == 2) \
- __asm __volatile (lock "xaddw %w0, %1" \
- : "=r" (__result), "=m" (*mem) \
- : "0" (__addval), "m" (*mem)); \
- else if (sizeof (*mem) == 4) \
- __asm __volatile (lock "xaddl %0, %1" \
- : "=r" (__result), "=m" (*mem) \
- : "0" (__addval), "m" (*mem)); \
- else if (__HAVE_64B_ATOMICS) \
- __asm __volatile (lock "xaddq %q0, %1" \
- : "=r" (__result), "=m" (*mem) \
- : "0" ((int64_t) cast_to_integer (__addval)), \
- "m" (*mem)); \
- else \
- __result = do_exchange_and_add_val_64_acq (pfx, (mem), __addval); \
- __result; })
-
-#define atomic_exchange_and_add(mem, value) \
- __sync_fetch_and_add (mem, value)
-
-#define __arch_decrement_body(lock, pfx, mem) \
- do { \
- if (sizeof (*mem) == 1) \
- __asm __volatile (lock "decb %b0" \
- : "=m" (*mem) \
- : "m" (*mem)); \
- else if (sizeof (*mem) == 2) \
- __asm __volatile (lock "decw %w0" \
- : "=m" (*mem) \
- : "m" (*mem)); \
- else if (sizeof (*mem) == 4) \
- __asm __volatile (lock "decl %0" \
- : "=m" (*mem) \
- : "m" (*mem)); \
- else if (__HAVE_64B_ATOMICS) \
- __asm __volatile (lock "decq %q0" \
- : "=m" (*mem) \
- : "m" (*mem)); \
- else \
- do_add_val_64_acq (pfx, mem, -1); \
- } while (0)
-
-#define atomic_decrement(mem) __arch_decrement_body (LOCK_PREFIX, __arch, mem)
-
-#define atomic_decrement_and_test(mem) \
- ({ unsigned char __result; \
- if (sizeof (*mem) == 1) \
- __asm __volatile (LOCK_PREFIX "decb %b0; sete %1" \
- : "=m" (*mem), "=qm" (__result) \
- : "m" (*mem)); \
- else if (sizeof (*mem) == 2) \
- __asm __volatile (LOCK_PREFIX "decw %w0; sete %1" \
- : "=m" (*mem), "=qm" (__result) \
- : "m" (*mem)); \
- else if (sizeof (*mem) == 4) \
- __asm __volatile (LOCK_PREFIX "decl %0; sete %1" \
- : "=m" (*mem), "=qm" (__result) \
- : "m" (*mem)); \
- else \
- __asm __volatile (LOCK_PREFIX "decq %q0; sete %1" \
- : "=m" (*mem), "=qm" (__result) \
- : "m" (*mem)); \
- __result; })
-
-
/* We don't use mfence because it is supposedly slower due to having to
provide stronger guarantees (e.g., regarding self-modifying code). */
#define atomic_full_barrier() \