Message ID | 20170614183048.11040-10-palmer@dabbelt.com |
---|---|
State | New |
Headers | show |
On Wed, 2017-06-14 at 11:30 -0700, Palmer Dabbelt wrote: > This patch implements various atomic and locking routines on RISC-V, > either via the A extension (when present) or via a Linux system call > that does a compare-and-exchange. This contains both the library > routines and the syscall wrapper. In the overview email you seemed to say that you only support the HW variants that have atomics. If that's so, why don't you just check that this is the case and produce a compile-time error if it isn't? > diff --git a/sysdeps/riscv/atomic-machine.h b/sysdeps/riscv/atomic-machine.h > new file mode 100644 > index 0000000000..c88dc1ce33 > --- /dev/null > +++ b/sysdeps/riscv/atomic-machine.h > @@ -0,0 +1,132 @@ > +/* Low-level functions for atomic operations. RISC-V version. > + Copyright (C) 2011-2016 Free Software Foundation, Inc. > + > + Contributed by Andrew Waterman (andrew@sifive.com). > + > + This file is part of the GNU C Library. > + > + The GNU C Library is free software; you can redistribute it and/or > + modify it under the terms of the GNU Lesser General Public > + License as published by the Free Software Foundation; either > + version 2.1 of the License, or (at your option) any later version. > + > + The GNU C Library is distributed in the hope that it will be useful, > + but WITHOUT ANY WARRANTY; without even the implied warranty of > + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU > + Lesser General Public License for more details. > + > + You should have received a copy of the GNU Lesser General Public > + License along with the GNU C Library; if not, write to the Free > + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA > + 02111-1307 USA. */ > + > +#ifndef _RISCV_BITS_ATOMIC_H > +#define _RISCV_BITS_ATOMIC_H 1 > + > +#include <inttypes.h> > + > +typedef int32_t atomic32_t; > +typedef uint32_t uatomic32_t; > +typedef int_fast32_t atomic_fast32_t; > +typedef uint_fast32_t uatomic_fast32_t; > + > +typedef int64_t atomic64_t; > +typedef uint64_t uatomic64_t; > +typedef int_fast64_t atomic_fast64_t; > +typedef uint_fast64_t uatomic_fast64_t; I believe the *fast* types aren't used anymore (yes we should clean that up). > +typedef intptr_t atomicptr_t; > +typedef uintptr_t uatomicptr_t; > +typedef intmax_t atomic_max_t; > +typedef uintmax_t uatomic_max_t; > + > +#define atomic_full_barrier() __sync_synchronize() > + > +#ifdef __riscv_atomic > + > +#define __HAVE_64B_ATOMICS (__riscv_xlen >= 64) > +#define USE_ATOMIC_COMPILER_BUILTINS 1 If the built-ins do what you want them to do, I suggest that you implement the old-style glibc atomics using the compiler built-ins (see aarch64 for example). > +/* MIPS uses swap on machines that have it, and uses CAS on machines that Refer to something else than MIPS I'd suggest, or clarify the link. > + * don't. This, we use amoswap when the A extension is enabled, and fall back > + * to the atomic system call when the A extension is disabled. */ That's not what the code does. > +#ifdef __riscv_atomic You are already in a__riscv_atomic #ifdef (see above). If you assume that your HW is of the variant with atomics, then you don't need to fall back to the kernel helper, or do you? > +# define ATOMIC_EXCHANGE_USES_CAS 0 > +#else > +# define ATOMIC_EXCHANGE_USES_CAS 1 The code for the old-style atomics doesn't seem to define an exchange; do the compiler builtins have one? > +#endif > + > +#define asm_amo(which, ordering, mem, value) ({ \ > + typeof(*mem) __tmp; \ > + if (sizeof(__tmp) == 4) \ > + asm volatile (which ".w" ordering "\t%0, %z2, %1" \ > + : "=r"(__tmp), "+A"(*(mem)) \ > + : "rJ"(value)); \ > + else if (sizeof(__tmp) == 8) \ > + asm volatile (which ".d" ordering "\t%0, %z2, %1" \ > + : "=r"(__tmp), "+A"(*(mem)) \ > + : "rJ"(value)); \ > + else \ > + abort(); \ > + __tmp; }) > + > +/* Atomic compare and exchange. */ > + > +#define atomic_cas(ordering, mem, newval, oldval) ({ \ > + typeof(*mem) __tmp; \ > + int __tmp2; \ > + if (sizeof(__tmp) == 4) \ > + asm volatile ("1: lr.w" ordering "\t%0, %2\n" \ > + "bne\t%0, %z4, 1f\n" \ > + "sc.w" ordering "\t%1, %z3, %2\n" \ > + "bnez\t%1, 1b\n" \ > + "1:" \ > + : "=&r"(__tmp), "=&r"(__tmp2), "+A"(*(mem)) \ > + : "rJ"(newval), "rJ"(oldval)); \ > + else if (sizeof(__tmp) == 8) \ > + asm volatile ("1: lr.d" ordering "\t%0, %2\n" \ > + "bne\t%0, %z4, 1f\n" \ > + "sc.d" ordering "\t%1, %z3, %2\n" \ > + "bnez\t%1, 1b\n" \ > + "1:" \ > + : "=&r"(__tmp), "=&r"(__tmp2), "+A"(*(mem)) \ > + : "rJ"(newval), "rJ"(oldval)); \ > + else \ > + abort(); \ > + __tmp; }) > + > +#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ > + atomic_cas(".aq", mem, newval, oldval) > + > +#define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \ > + atomic_cas(".rl", mem, newval, oldval) > + > +/* Atomic exchange (without compare). */ > + > +#define atomic_exchange_acq(mem, value) asm_amo("amoswap", ".aq", mem, value) > +#define atomic_exchange_rel(mem, value) asm_amo("amoswap", ".rl", mem, value) > + > + > +/* Atomically add value and return the previous (unincremented) value. */ > + > +#define atomic_exchange_and_add(mem, value) asm_amo("amoadd", "", mem, value) The semantics of these old-style atomics aren't really well defined in glibc (but we're moving to C11-like atomics anyway). The default is for atomic_exchange_and_add to be the same as atomic_exchange_and_add_acq, see for example include/atomic.h: #ifndef atomic_exchange_and_add_acq # ifdef atomic_exchange_and_add # define atomic_exchange_and_add_acq(mem, value) \ atomic_exchange_and_add (mem, value) I suggest you keep it that way, instead of making them have relaxed MO. This will go away once we've completed the transition to C11 atomics. > + > +#define atomic_max(mem, value) asm_amo("amomaxu", "", mem, value) > +#define atomic_min(mem, value) asm_amo("amominu", "", mem, value) > + > +#define atomic_bit_test_set(mem, bit) \ > + ({ typeof(*mem) __mask = (typeof(*mem))1 << (bit); \ > + asm_amo("amoor", "", mem, __mask) & __mask; }) Likewise. > +#define catomic_exchange_and_add(mem, value) \ > + atomic_exchange_and_add(mem, value) > +#define catomic_max(mem, value) atomic_max(mem, value) > + > +#else /* __riscv_atomic */ > + > +#define __HAVE_64B_ATOMICS 0 > +#define USE_ATOMIC_COMPILER_BUILTINS 0 You need to provide at least a CAS to make this work. Having this file be included from the linux-specific variant is confusing, I think. Do you really need to support glibc not running on a Linux kernel? Otherwise, perhaps just use the linux-specific atomic-machine.h? > + > +#endif /* !__riscv_atomic */ > + > +#endif /* bits/atomic.h */ > diff --git a/sysdeps/riscv/nptl/bits/pthreadtypes-arch.h b/sysdeps/riscv/nptl/bits/pthreadtypes-arch.h > new file mode 100644 > index 0000000000..4473b0891a > --- /dev/null > +++ b/sysdeps/riscv/nptl/bits/pthreadtypes-arch.h > @@ -0,0 +1,98 @@ > +/* Machine-specific pthread type layouts. RISC-V version. > + Copyright (C) 2011-2014, 2017 Free Software Foundation, Inc. > + This file is part of the GNU C Library. > + > + The GNU C Library is free software; you can redistribute it and/or > + modify it under the terms of the GNU Lesser General Public > + License as published by the Free Software Foundation; either > + version 2.1 of the License, or (at your option) any later version. > + > + The GNU C Library is distributed in the hope that it will be useful, > + but WITHOUT ANY WARRANTY; without even the implied warranty of > + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU > + Lesser General Public License for more details. > + > + You should have received a copy of the GNU Lesser General Public > + License along with the GNU C Library; if not, write to the Free > + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA > + 02111-1307 USA. */ > + > +#ifndef _BITS_PTHREADTYPES_ARCH_H > +#define _BITS_PTHREADTYPES_ARCH_H 1 > + > +#include <endian.h> > + > +#if __riscv_xlen == 64 > +# define __SIZEOF_PTHREAD_ATTR_T 56 > +# define __SIZEOF_PTHREAD_MUTEX_T 40 > +# define __SIZEOF_PTHREAD_MUTEXATTR_T 4 > +# define __SIZEOF_PTHREAD_COND_T 48 > +# define __SIZEOF_PTHREAD_CONDATTR_T 4 > +# define __SIZEOF_PTHREAD_RWLOCK_T 56 > +# define __SIZEOF_PTHREAD_RWLOCKATTR_T 8 > +# define __SIZEOF_PTHREAD_BARRIER_T 32 > +# define __SIZEOF_PTHREAD_BARRIERATTR_T 4 > +#else > +# define __SIZEOF_PTHREAD_ATTR_T 36 > +# define __SIZEOF_PTHREAD_MUTEX_T 24 > +# define __SIZEOF_PTHREAD_MUTEXATTR_T 4 > +# define __SIZEOF_PTHREAD_COND_T 48 > +# define __SIZEOF_PTHREAD_CONDATTR_T 4 > +# define __SIZEOF_PTHREAD_RWLOCK_T 32 > +# define __SIZEOF_PTHREAD_RWLOCKATTR_T 8 > +# define __SIZEOF_PTHREAD_BARRIER_T 20 > +# define __SIZEOF_PTHREAD_BARRIERATTR_T 4 > +#endif > + > +#define __PTHREAD_COMPAT_PADDING_MID > +#define __PTHREAD_COMPAT_PADDING_END > +#define __PTHREAD_MUTEX_LOCK_ELISION 0 > + > +#define __LOCK_ALIGNMENT > +#define __ONCE_ALIGNMENT > + > +struct __pthread_rwlock_arch_t > +{ > +# if __riscv_xlen == 64 > + unsigned int __readers; > + unsigned int __writers; > + unsigned int __wrphase_futex; > + unsigned int __writers_futex; > + int __cur_writer; > + int __shared; > + int __pad3; > + int __pad4; > + unsigned long int __pad1; > + unsigned long int __pad2; > + /* FLAGS must stay at this position in the structure to maintain > + binary compatibility. */ That's not the case, I assume, given that your port is new? > + unsigned int __flags; > +# else > + unsigned int __readers; > + unsigned int __writers; > + unsigned int __wrphase_futex; > + unsigned int __writers_futex; > + int __cur_writer; > + int __pad3; > +#if __BYTE_ORDER == __BIG_ENDIAN > + unsigned char __pad1; > + unsigned char __pad2; > + unsigned char __shared; > + /* FLAGS must stay at this position in the structure to maintain > + binary compatibility. */ > + unsigned char __flags; > +#else > + /* FLAGS must stay at this position in the structure to maintain > + binary compatibility. */ > + unsigned char __flags; > + unsigned char __shared; > + unsigned char __pad1; > + unsigned char __pad2; > +#endif > + int __writer; > +# endif > +}; > + > +#define __PTHREAD_RWLOCK_ELISION_EXTRA 0 > + > +#endif /* bits/pthreadtypes.h */ > diff --git a/sysdeps/riscv/nptl/pthread_spin_lock.c b/sysdeps/riscv/nptl/pthread_spin_lock.c > new file mode 100644 > index 0000000000..dd5ffae97f > --- /dev/null > +++ b/sysdeps/riscv/nptl/pthread_spin_lock.c > @@ -0,0 +1,43 @@ > +/* Copyright (C) 2005 Free Software Foundation, Inc. > + This file is part of the GNU C Library. > + > + The GNU C Library is free software; you can redistribute it and/or > + modify it under the terms of the GNU Lesser General Public > + License as published by the Free Software Foundation; either > + version 2.1 of the License, or (at your option) any later version. > + > + The GNU C Library is distributed in the hope that it will be useful, > + but WITHOUT ANY WARRANTY; without even the implied warranty of > + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU > + Lesser General Public License for more details. > + > + You should have received a copy of the GNU Lesser General Public > + License along with the GNU C Library; if not, write to the Free > + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA > + 02111-1307 USA. */ > + > +#ifdef __riscv_atomic > + > +#include <errno.h> > + > +int pthread_spin_lock(pthread_spinlock_t* lock) > +{ > + int tmp1, tmp2; > + > + asm volatile ("\n\ > + 1:lw %0, 0(%2)\n\ > + li %1, %3\n\ > + bnez %0, 1b\n\ > + amoswap.w.aq %0, %1, 0(%2)\n\ > + bnez %0, 1b" > + : "=&r"(tmp1), "=&r"(tmp2) : "r"(lock), "i"(EBUSY) > + ); Why do you need a custom spinlock? Why is the generic version not good enough for you? We try to have as little custom synchronization code as possible. > + return tmp1; > +} > + > +#else /* __riscv_atomic */ > + > +#include <sysdeps/../nptl/pthread_spin_lock.c> > + > +#endif /* !__riscv_atomic */ > diff --git a/sysdeps/riscv/nptl/pthread_spin_trylock.c b/sysdeps/riscv/nptl/pthread_spin_trylock.c > new file mode 100644 > index 0000000000..9bd5d80fd6 > --- /dev/null > +++ b/sysdeps/riscv/nptl/pthread_spin_trylock.c > @@ -0,0 +1,43 @@ > +/* Copyright (C) 2005 Free Software Foundation, Inc. > + This file is part of the GNU C Library. > + > + The GNU C Library is free software; you can redistribute it and/or > + modify it under the terms of the GNU Lesser General Public > + License as published by the Free Software Foundation; either > + version 2.1 of the License, or (at your option) any later version. > + > + The GNU C Library is distributed in the hope that it will be useful, > + but WITHOUT ANY WARRANTY; without even the implied warranty of > + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU > + Lesser General Public License for more details. > + > + You should have received a copy of the GNU Lesser General Public > + License along with the GNU C Library; if not, write to the Free > + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA > + 02111-1307 USA. */ > + > +#ifdef __riscv_atomic > + > +#include <errno.h> > + > +int pthread_spin_trylock(pthread_spinlock_t* lock) > +{ > + int tmp1, tmp2; > + > + asm volatile ("\n\ > + lw %0, 0(%2)\n\ > + li %1, %3\n\ > + bnez %0, 1f\n\ > + amoswap.w.aq %0, %1, 0(%2)\n\ > + 1:" > + : "=&r"(tmp1), "=&r"(tmp2) : "r"(lock), "i"(EBUSY) > + ); Likewise. > + > + return tmp1; > +} > + > +#else /* __riscv_atomic */ > + > +#include <sysdeps/../nptl/pthread_spin_trylock.c> > + > +#endif /* !__riscv_atomic */ > diff --git a/sysdeps/unix/sysv/linux/riscv/atomic-machine.h b/sysdeps/unix/sysv/linux/riscv/atomic-machine.h > new file mode 100644 > index 0000000000..5ce6f8da40 > --- /dev/null > +++ b/sysdeps/unix/sysv/linux/riscv/atomic-machine.h > @@ -0,0 +1,53 @@ > +/* Low-level functions for atomic operations. RISC-V version. > + Copyright (C) 2014-2017 Free Software Foundation, Inc. > + This file is part of the GNU C Library. > + > + The GNU C Library is free software; you can redistribute it and/or > + modify it under the terms of the GNU Lesser General Public > + License as published by the Free Software Foundation; either > + version 2.1 of the License, or (at your option) any later version. > + > + The GNU C Library is distributed in the hope that it will be useful, > + but WITHOUT ANY WARRANTY; without even the implied warranty of > + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU > + Lesser General Public License for more details. > + > + You should have received a copy of the GNU Lesser General Public > + License along with the GNU C Library; if not, write to the Free > + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA > + 02111-1307 USA. */ > + > +#ifndef _LINUX_RISCV_BITS_ATOMIC_H > +#define _LINUX_RISCV_BITS_ATOMIC_H 1 > + > +#include_next <atomic-machine.h> See above. > + > +#ifndef __riscv_atomic > + > +#include <sys/syscall.h> > +#include <sysdep.h> > + > +#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ > + (abort (), (__typeof (*mem)) 0) > + > +#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ > + (abort (), (__typeof (*mem)) 0) > + > +/* The only basic operation needed is compare and exchange. */ > +#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ > + ({ \ > + INTERNAL_SYSCALL_DECL (__err); \ > + (__typeof (*mem)) INTERNAL_SYSCALL (sysriscv, __err, 4, \ > + RISCV_ATOMIC_CMPXCHG, mem, oldval, newval); \ > + }) > + > +#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ > + ({ \ > + INTERNAL_SYSCALL_DECL (__err); \ > + (__typeof (*mem)) INTERNAL_SYSCALL (sysriscv, __err, 4, \ > + RISCV_ATOMIC_CMPXCHG64, mem, oldval, newval); \ > + }) > + You should add a comment explaining what your syscall does exactly, in particular why you don't need to use it for atomic stores.
On 06/20/2017 10:20 AM, Torvald Riegel wrote: > On Wed, 2017-06-14 at 11:30 -0700, Palmer Dabbelt wrote: >> This patch implements various atomic and locking routines on RISC-V, >> either via the A extension (when present) or via a Linux system call >> that does a compare-and-exchange. This contains both the library >> routines and the syscall wrapper. > > In the overview email you seemed to say that you only support the HW > variants that have atomics. If that's so, why don't you just check that > this is the case and produce a compile-time error if it isn't? > We do support variants without atomic instructions, although that is not the highest priority now. The current implementation uses a Linux syscall to emulate atomic operations (like Xtensa, ColdFire). Ultimately, we plan to replace this with an operation in the VDSO (like ARM) to improve performance. >> diff --git a/sysdeps/riscv/atomic-machine.h b/sysdeps/riscv/atomic-machine.h >> new file mode 100644 >> index 0000000000..c88dc1ce33 >> --- /dev/null >> +++ b/sysdeps/riscv/atomic-machine.h >> @@ -0,0 +1,132 @@ >> +/* Low-level functions for atomic operations. RISC-V version. >> + Copyright (C) 2011-2016 Free Software Foundation, Inc. >> + >> + Contributed by Andrew Waterman (andrew@sifive.com). >> + >> + This file is part of the GNU C Library. >> + >> + The GNU C Library is free software; you can redistribute it and/or >> + modify it under the terms of the GNU Lesser General Public >> + License as published by the Free Software Foundation; either >> + version 2.1 of the License, or (at your option) any later version. >> + >> + The GNU C Library is distributed in the hope that it will be useful, >> + but WITHOUT ANY WARRANTY; without even the implied warranty of >> + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU >> + Lesser General Public License for more details. >> + >> + You should have received a copy of the GNU Lesser General Public >> + License along with the GNU C Library; if not, write to the Free >> + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA >> + 02111-1307 USA. */ >> + >> +#ifndef _RISCV_BITS_ATOMIC_H >> +#define _RISCV_BITS_ATOMIC_H 1 >> + >> +#include <inttypes.h> >> + >> +typedef int32_t atomic32_t; >> +typedef uint32_t uatomic32_t; >> +typedef int_fast32_t atomic_fast32_t; >> +typedef uint_fast32_t uatomic_fast32_t; >> + >> +typedef int64_t atomic64_t; >> +typedef uint64_t uatomic64_t; >> +typedef int_fast64_t atomic_fast64_t; >> +typedef uint_fast64_t uatomic_fast64_t; > > I believe the *fast* types aren't used anymore (yes we should clean that > up). > >> +typedef intptr_t atomicptr_t; >> +typedef uintptr_t uatomicptr_t; >> +typedef intmax_t atomic_max_t; >> +typedef uintmax_t uatomic_max_t; >> + >> +#define atomic_full_barrier() __sync_synchronize() >> + >> +#ifdef __riscv_atomic >> + >> +#define __HAVE_64B_ATOMICS (__riscv_xlen >= 64) >> +#define USE_ATOMIC_COMPILER_BUILTINS 1 > > If the built-ins do what you want them to do, I suggest that you > implement the old-style glibc atomics using the compiler built-ins (see > aarch64 for example). > >> +/* MIPS uses swap on machines that have it, and uses CAS on machines that > > Refer to something else than MIPS I'd suggest, or clarify the link. > >> + * don't. This, we use amoswap when the A extension is enabled, and fall back >> + * to the atomic system call when the A extension is disabled. */ > > That's not what the code does. > >> +#ifdef __riscv_atomic > > You are already in a__riscv_atomic #ifdef (see above). > > If you assume that your HW is of the variant with atomics, then you > don't need to fall back to the kernel helper, or do you? > Indeed, you are correct. The helper is not necessary if hardware supports atomics. >> +# define ATOMIC_EXCHANGE_USES_CAS 0 >> +#else >> +# define ATOMIC_EXCHANGE_USES_CAS 1 > > The code for the old-style atomics doesn't seem to define an exchange; > do the compiler builtins have one? > Is that not atomic_exchange_acq and atomic_exchange_rel, or do I misunderstand you? >> +#endif >> + >> +#define asm_amo(which, ordering, mem, value) ({ \ >> + typeof(*mem) __tmp; \ >> + if (sizeof(__tmp) == 4) \ >> + asm volatile (which ".w" ordering "\t%0, %z2, %1" \ >> + : "=r"(__tmp), "+A"(*(mem)) \ >> + : "rJ"(value)); \ >> + else if (sizeof(__tmp) == 8) \ >> + asm volatile (which ".d" ordering "\t%0, %z2, %1" \ >> + : "=r"(__tmp), "+A"(*(mem)) \ >> + : "rJ"(value)); \ >> + else \ >> + abort(); \ >> + __tmp; }) >> + >> +/* Atomic compare and exchange. */ >> + >> +#define atomic_cas(ordering, mem, newval, oldval) ({ \ >> + typeof(*mem) __tmp; \ >> + int __tmp2; \ >> + if (sizeof(__tmp) == 4) \ >> + asm volatile ("1: lr.w" ordering "\t%0, %2\n" \ >> + "bne\t%0, %z4, 1f\n" \ >> + "sc.w" ordering "\t%1, %z3, %2\n" \ >> + "bnez\t%1, 1b\n" \ >> + "1:" \ >> + : "=&r"(__tmp), "=&r"(__tmp2), "+A"(*(mem)) \ >> + : "rJ"(newval), "rJ"(oldval)); \ >> + else if (sizeof(__tmp) == 8) \ >> + asm volatile ("1: lr.d" ordering "\t%0, %2\n" \ >> + "bne\t%0, %z4, 1f\n" \ >> + "sc.d" ordering "\t%1, %z3, %2\n" \ >> + "bnez\t%1, 1b\n" \ >> + "1:" \ >> + : "=&r"(__tmp), "=&r"(__tmp2), "+A"(*(mem)) \ >> + : "rJ"(newval), "rJ"(oldval)); \ >> + else \ >> + abort(); \ >> + __tmp; }) >> + >> +#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ >> + atomic_cas(".aq", mem, newval, oldval) >> + >> +#define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \ >> + atomic_cas(".rl", mem, newval, oldval) >> + >> +/* Atomic exchange (without compare). */ >> + >> +#define atomic_exchange_acq(mem, value) asm_amo("amoswap", ".aq", mem, value) >> +#define atomic_exchange_rel(mem, value) asm_amo("amoswap", ".rl", mem, value) >> + >> + >> +/* Atomically add value and return the previous (unincremented) value. */ >> + >> +#define atomic_exchange_and_add(mem, value) asm_amo("amoadd", "", mem, value) > > The semantics of these old-style atomics aren't really well defined in > glibc (but we're moving to C11-like atomics anyway). The default is for > atomic_exchange_and_add to be the same as atomic_exchange_and_add_acq, > see for example include/atomic.h: > > #ifndef atomic_exchange_and_add_acq > # ifdef atomic_exchange_and_add > # define atomic_exchange_and_add_acq(mem, value) \ > atomic_exchange_and_add (mem, value) > > I suggest you keep it that way, instead of making them have relaxed MO. > > This will go away once we've completed the transition to C11 atomics. > >> + >> +#define atomic_max(mem, value) asm_amo("amomaxu", "", mem, value) >> +#define atomic_min(mem, value) asm_amo("amominu", "", mem, value) >> + >> +#define atomic_bit_test_set(mem, bit) \ >> + ({ typeof(*mem) __mask = (typeof(*mem))1 << (bit); \ >> + asm_amo("amoor", "", mem, __mask) & __mask; }) > > Likewise. > >> +#define catomic_exchange_and_add(mem, value) \ >> + atomic_exchange_and_add(mem, value) >> +#define catomic_max(mem, value) atomic_max(mem, value) >> + >> +#else /* __riscv_atomic */ >> + >> +#define __HAVE_64B_ATOMICS 0 >> +#define USE_ATOMIC_COMPILER_BUILTINS 0 > > You need to provide at least a CAS to make this work. Having this file > be included from the linux-specific variant is confusing, I think. Do > you really need to support glibc not running on a Linux kernel? > Otherwise, perhaps just use the linux-specific atomic-machine.h? > I'm not aware of a need to support glibc on anything but Linux, but I don't think we want to preclude it unnecessarily, either. Though we can use a Linux-specific file until there is a need otherwise. >> + >> +#endif /* !__riscv_atomic */ >> + >> +#endif /* bits/atomic.h */ >> diff --git a/sysdeps/riscv/nptl/bits/pthreadtypes-arch.h b/sysdeps/riscv/nptl/bits/pthreadtypes-arch.h >> new file mode 100644 >> index 0000000000..4473b0891a >> --- /dev/null >> +++ b/sysdeps/riscv/nptl/bits/pthreadtypes-arch.h >> @@ -0,0 +1,98 @@ >> +/* Machine-specific pthread type layouts. RISC-V version. >> + Copyright (C) 2011-2014, 2017 Free Software Foundation, Inc. >> + This file is part of the GNU C Library. >> + >> + The GNU C Library is free software; you can redistribute it and/or >> + modify it under the terms of the GNU Lesser General Public >> + License as published by the Free Software Foundation; either >> + version 2.1 of the License, or (at your option) any later version. >> + >> + The GNU C Library is distributed in the hope that it will be useful, >> + but WITHOUT ANY WARRANTY; without even the implied warranty of >> + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU >> + Lesser General Public License for more details. >> + >> + You should have received a copy of the GNU Lesser General Public >> + License along with the GNU C Library; if not, write to the Free >> + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA >> + 02111-1307 USA. */ >> + >> +#ifndef _BITS_PTHREADTYPES_ARCH_H >> +#define _BITS_PTHREADTYPES_ARCH_H 1 >> + >> +#include <endian.h> >> + >> +#if __riscv_xlen == 64 >> +# define __SIZEOF_PTHREAD_ATTR_T 56 >> +# define __SIZEOF_PTHREAD_MUTEX_T 40 >> +# define __SIZEOF_PTHREAD_MUTEXATTR_T 4 >> +# define __SIZEOF_PTHREAD_COND_T 48 >> +# define __SIZEOF_PTHREAD_CONDATTR_T 4 >> +# define __SIZEOF_PTHREAD_RWLOCK_T 56 >> +# define __SIZEOF_PTHREAD_RWLOCKATTR_T 8 >> +# define __SIZEOF_PTHREAD_BARRIER_T 32 >> +# define __SIZEOF_PTHREAD_BARRIERATTR_T 4 >> +#else >> +# define __SIZEOF_PTHREAD_ATTR_T 36 >> +# define __SIZEOF_PTHREAD_MUTEX_T 24 >> +# define __SIZEOF_PTHREAD_MUTEXATTR_T 4 >> +# define __SIZEOF_PTHREAD_COND_T 48 >> +# define __SIZEOF_PTHREAD_CONDATTR_T 4 >> +# define __SIZEOF_PTHREAD_RWLOCK_T 32 >> +# define __SIZEOF_PTHREAD_RWLOCKATTR_T 8 >> +# define __SIZEOF_PTHREAD_BARRIER_T 20 >> +# define __SIZEOF_PTHREAD_BARRIERATTR_T 4 >> +#endif >> + >> +#define __PTHREAD_COMPAT_PADDING_MID >> +#define __PTHREAD_COMPAT_PADDING_END >> +#define __PTHREAD_MUTEX_LOCK_ELISION 0 >> + >> +#define __LOCK_ALIGNMENT >> +#define __ONCE_ALIGNMENT >> + >> +struct __pthread_rwlock_arch_t >> +{ >> +# if __riscv_xlen == 64 >> + unsigned int __readers; >> + unsigned int __writers; >> + unsigned int __wrphase_futex; >> + unsigned int __writers_futex; >> + int __cur_writer; >> + int __shared; >> + int __pad3; >> + int __pad4; >> + unsigned long int __pad1; >> + unsigned long int __pad2; >> + /* FLAGS must stay at this position in the structure to maintain >> + binary compatibility. */ > > That's not the case, I assume, given that your port is new? > Correct, that should not be necessary. >> + unsigned int __flags; >> +# else >> + unsigned int __readers; >> + unsigned int __writers; >> + unsigned int __wrphase_futex; >> + unsigned int __writers_futex; >> + int __cur_writer; >> + int __pad3; >> +#if __BYTE_ORDER == __BIG_ENDIAN >> + unsigned char __pad1; >> + unsigned char __pad2; >> + unsigned char __shared; >> + /* FLAGS must stay at this position in the structure to maintain >> + binary compatibility. */ >> + unsigned char __flags; >> +#else >> + /* FLAGS must stay at this position in the structure to maintain >> + binary compatibility. */ >> + unsigned char __flags; >> + unsigned char __shared; >> + unsigned char __pad1; >> + unsigned char __pad2; >> +#endif >> + int __writer; >> +# endif >> +}; >> + >> +#define __PTHREAD_RWLOCK_ELISION_EXTRA 0 >> + >> +#endif /* bits/pthreadtypes.h */ > >> diff --git a/sysdeps/riscv/nptl/pthread_spin_lock.c b/sysdeps/riscv/nptl/pthread_spin_lock.c >> new file mode 100644 >> index 0000000000..dd5ffae97f >> --- /dev/null >> +++ b/sysdeps/riscv/nptl/pthread_spin_lock.c >> @@ -0,0 +1,43 @@ >> +/* Copyright (C) 2005 Free Software Foundation, Inc. >> + This file is part of the GNU C Library. >> + >> + The GNU C Library is free software; you can redistribute it and/or >> + modify it under the terms of the GNU Lesser General Public >> + License as published by the Free Software Foundation; either >> + version 2.1 of the License, or (at your option) any later version. >> + >> + The GNU C Library is distributed in the hope that it will be useful, >> + but WITHOUT ANY WARRANTY; without even the implied warranty of >> + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU >> + Lesser General Public License for more details. >> + >> + You should have received a copy of the GNU Lesser General Public >> + License along with the GNU C Library; if not, write to the Free >> + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA >> + 02111-1307 USA. */ >> + >> +#ifdef __riscv_atomic >> + >> +#include <errno.h> >> + >> +int pthread_spin_lock(pthread_spinlock_t* lock) >> +{ >> + int tmp1, tmp2; >> + >> + asm volatile ("\n\ >> + 1:lw %0, 0(%2)\n\ >> + li %1, %3\n\ >> + bnez %0, 1b\n\ >> + amoswap.w.aq %0, %1, 0(%2)\n\ >> + bnez %0, 1b" >> + : "=&r"(tmp1), "=&r"(tmp2) : "r"(lock), "i"(EBUSY) >> + ); > > Why do you need a custom spinlock? Why is the generic version not good > enough for you? > We try to have as little custom synchronization code as possible. > The generic spinlock should be fine. >> + return tmp1; >> +} >> + >> +#else /* __riscv_atomic */ >> + >> +#include <sysdeps/../nptl/pthread_spin_lock.c> >> + >> +#endif /* !__riscv_atomic */ >> diff --git a/sysdeps/riscv/nptl/pthread_spin_trylock.c b/sysdeps/riscv/nptl/pthread_spin_trylock.c >> new file mode 100644 >> index 0000000000..9bd5d80fd6 >> --- /dev/null >> +++ b/sysdeps/riscv/nptl/pthread_spin_trylock.c >> @@ -0,0 +1,43 @@ >> +/* Copyright (C) 2005 Free Software Foundation, Inc. >> + This file is part of the GNU C Library. >> + >> + The GNU C Library is free software; you can redistribute it and/or >> + modify it under the terms of the GNU Lesser General Public >> + License as published by the Free Software Foundation; either >> + version 2.1 of the License, or (at your option) any later version. >> + >> + The GNU C Library is distributed in the hope that it will be useful, >> + but WITHOUT ANY WARRANTY; without even the implied warranty of >> + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU >> + Lesser General Public License for more details. >> + >> + You should have received a copy of the GNU Lesser General Public >> + License along with the GNU C Library; if not, write to the Free >> + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA >> + 02111-1307 USA. */ >> + >> +#ifdef __riscv_atomic >> + >> +#include <errno.h> >> + >> +int pthread_spin_trylock(pthread_spinlock_t* lock) >> +{ >> + int tmp1, tmp2; >> + >> + asm volatile ("\n\ >> + lw %0, 0(%2)\n\ >> + li %1, %3\n\ >> + bnez %0, 1f\n\ >> + amoswap.w.aq %0, %1, 0(%2)\n\ >> + 1:" >> + : "=&r"(tmp1), "=&r"(tmp2) : "r"(lock), "i"(EBUSY) >> + ); > > Likewise. > >> + >> + return tmp1; >> +} >> + >> +#else /* __riscv_atomic */ >> + >> +#include <sysdeps/../nptl/pthread_spin_trylock.c> >> + >> +#endif /* !__riscv_atomic */ >> diff --git a/sysdeps/unix/sysv/linux/riscv/atomic-machine.h b/sysdeps/unix/sysv/linux/riscv/atomic-machine.h >> new file mode 100644 >> index 0000000000..5ce6f8da40 >> --- /dev/null >> +++ b/sysdeps/unix/sysv/linux/riscv/atomic-machine.h >> @@ -0,0 +1,53 @@ >> +/* Low-level functions for atomic operations. RISC-V version. >> + Copyright (C) 2014-2017 Free Software Foundation, Inc. >> + This file is part of the GNU C Library. >> + >> + The GNU C Library is free software; you can redistribute it and/or >> + modify it under the terms of the GNU Lesser General Public >> + License as published by the Free Software Foundation; either >> + version 2.1 of the License, or (at your option) any later version. >> + >> + The GNU C Library is distributed in the hope that it will be useful, >> + but WITHOUT ANY WARRANTY; without even the implied warranty of >> + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU >> + Lesser General Public License for more details. >> + >> + You should have received a copy of the GNU Lesser General Public >> + License along with the GNU C Library; if not, write to the Free >> + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA >> + 02111-1307 USA. */ >> + >> +#ifndef _LINUX_RISCV_BITS_ATOMIC_H >> +#define _LINUX_RISCV_BITS_ATOMIC_H 1 >> + >> +#include_next <atomic-machine.h> > > See above. > >> + >> +#ifndef __riscv_atomic >> + >> +#include <sys/syscall.h> >> +#include <sysdep.h> >> + >> +#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ >> + (abort (), (__typeof (*mem)) 0) >> + >> +#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ >> + (abort (), (__typeof (*mem)) 0) >> + >> +/* The only basic operation needed is compare and exchange. */ >> +#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ >> + ({ \ >> + INTERNAL_SYSCALL_DECL (__err); \ >> + (__typeof (*mem)) INTERNAL_SYSCALL (sysriscv, __err, 4, \ >> + RISCV_ATOMIC_CMPXCHG, mem, oldval, newval); \ >> + }) >> + >> +#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ >> + ({ \ >> + INTERNAL_SYSCALL_DECL (__err); \ >> + (__typeof (*mem)) INTERNAL_SYSCALL (sysriscv, __err, 4, \ >> + RISCV_ATOMIC_CMPXCHG64, mem, oldval, newval); \ >> + }) >> + > > You should add a comment explaining what your syscall does exactly, in > particular why you don't need to use it for atomic stores. > Understood about explaining the syscall. Regarding atomic stores, this comment in sysdeps/generic/atomic-machine.h suggests they are not necessary: "The only basic operation needed is compare and exchange." Separately, RISC-V guarantees naturally aligned stores are atomic in all cases (even without atomic instructions), although that may not be sufficient here.
On Thu, 2017-06-22 at 19:42 -0400, Darius Rad wrote: > On 06/20/2017 10:20 AM, Torvald Riegel wrote: > > On Wed, 2017-06-14 at 11:30 -0700, Palmer Dabbelt wrote: > >> This patch implements various atomic and locking routines on RISC-V, > >> either via the A extension (when present) or via a Linux system call > >> that does a compare-and-exchange. This contains both the library > >> routines and the syscall wrapper. > > > > In the overview email you seemed to say that you only support the HW > > variants that have atomics. If that's so, why don't you just check that > > this is the case and produce a compile-time error if it isn't? > > > > We do support variants without atomic instructions, although that is not > the highest priority now. The current implementation uses a Linux > syscall to emulate atomic operations (like Xtensa, ColdFire). > Ultimately, we plan to replace this with an operation in the VDSO (like > ARM) to improve performance. Thanks for providing some background. However, elsewhere in the thread for the whole patch set, you seemed to say that you'll only officially support target configurations that do have atomics (IIRC, that was a discussion with Joseph about the target triples). These officially supported ones are the configurations we'll test with build-many-glibcs.py. I think that it would be better to only have code in master that will be compiled by some build through build-many-glibcs.py (ie, that there's no dead code from that build coverage perspective). Does this clarify my question / concern? > >> +# define ATOMIC_EXCHANGE_USES_CAS 0 > >> +#else > >> +# define ATOMIC_EXCHANGE_USES_CAS 1 > > > > The code for the old-style atomics doesn't seem to define an exchange; > > do the compiler builtins have one? > > > > Is that not atomic_exchange_acq and atomic_exchange_rel, or do I > misunderstand you? You're right, you do make amoswap available. The compiler builtins should use it to. > > You need to provide at least a CAS to make this work. Having this file > > be included from the linux-specific variant is confusing, I think. Do > > you really need to support glibc not running on a Linux kernel? > > Otherwise, perhaps just use the linux-specific atomic-machine.h? > > > > I'm not aware of a need to support glibc on anything but Linux, but I > don't think we want to preclude it unnecessarily, either. Though we can > use a Linux-specific file until there is a need otherwise. The latter sounds better. But if it turns out that you officially support only target configurations with native atomics, then I'd suggest to just include that, and then you won't need the syscall fallback. > >> +#ifndef __riscv_atomic > >> + > >> +#include <sys/syscall.h> > >> +#include <sysdep.h> > >> + > >> +#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ > >> + (abort (), (__typeof (*mem)) 0) > >> + > >> +#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ > >> + (abort (), (__typeof (*mem)) 0) > >> + > >> +/* The only basic operation needed is compare and exchange. */ > >> +#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ > >> + ({ \ > >> + INTERNAL_SYSCALL_DECL (__err); \ > >> + (__typeof (*mem)) INTERNAL_SYSCALL (sysriscv, __err, 4, \ > >> + RISCV_ATOMIC_CMPXCHG, mem, oldval, newval); \ > >> + }) > >> + > >> +#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ > >> + ({ \ > >> + INTERNAL_SYSCALL_DECL (__err); \ > >> + (__typeof (*mem)) INTERNAL_SYSCALL (sysriscv, __err, 4, \ > >> + RISCV_ATOMIC_CMPXCHG64, mem, oldval, newval); \ > >> + }) > >> + > > > > You should add a comment explaining what your syscall does exactly, in > > particular why you don't need to use it for atomic stores. > > > > Understood about explaining the syscall. > > Regarding atomic stores, this comment in > sysdeps/generic/atomic-machine.h suggests they are not necessary: "The > only basic operation needed is compare and exchange." That is true under the assumption that the CAS really is atomic wrt to atomic stores and loads (maybe we should clarify that...). If the syscall cannot be interrupted by stores, then you're good (eg, because there's just a single HW thread and your syscall cannot be interrupted, or because you stop all other threads in the syscall).
On Fri, 23 Jun 2017, Torvald Riegel wrote: > Thanks for providing some background. However, elsewhere in the thread > for the whole patch set, you seemed to say that you'll only officially > support target configurations that do have atomics (IIRC, that was a > discussion with Joseph about the target triples). These officially > supported ones are the configurations we'll test with > build-many-glibcs.py. I think that it would be better to only have code > in master that will be compiled by some build through > build-many-glibcs.py (ie, that there's no dead code from that build > coverage perspective). I'm pretty sure lots of existing glibc architectures have code that's not covered by build-many-glibcs.py at present, even if there are configurations that could be added (e.g. building for another CPU variant) that would cover building that code. That doesn't mean it's a good thing - more configurations to cover such code would be a good idea - but the present minimum for build-many-glibcs.py coverage is that at least each different ABI supported by glibcs is covered.
On 06/23/2017 05:41 AM, Torvald Riegel wrote: > On Thu, 2017-06-22 at 19:42 -0400, Darius Rad wrote: >> On 06/20/2017 10:20 AM, Torvald Riegel wrote: >>> On Wed, 2017-06-14 at 11:30 -0700, Palmer Dabbelt wrote: >>>> This patch implements various atomic and locking routines on RISC-V, >>>> either via the A extension (when present) or via a Linux system call >>>> that does a compare-and-exchange. This contains both the library >>>> routines and the syscall wrapper. >>> >>> In the overview email you seemed to say that you only support the HW >>> variants that have atomics. If that's so, why don't you just check that >>> this is the case and produce a compile-time error if it isn't? >>> >> >> We do support variants without atomic instructions, although that is not >> the highest priority now. The current implementation uses a Linux >> syscall to emulate atomic operations (like Xtensa, ColdFire). >> Ultimately, we plan to replace this with an operation in the VDSO (like >> ARM) to improve performance. > > Thanks for providing some background. However, elsewhere in the thread > for the whole patch set, you seemed to say that you'll only officially > support target configurations that do have atomics (IIRC, that was a > discussion with Joseph about the target triples). These officially > supported ones are the configurations we'll test with > build-many-glibcs.py. I think that it would be better to only have code > in master that will be compiled by some build through > build-many-glibcs.py (ie, that there's no dead code from that build > coverage perspective). > > Does this clarify my question / concern? > Yes, thank you. We'll ensure that build-many-glibcs.py is consistent with the features in the patch. >>>> +# define ATOMIC_EXCHANGE_USES_CAS 0 >>>> +#else >>>> +# define ATOMIC_EXCHANGE_USES_CAS 1 >>> >>> The code for the old-style atomics doesn't seem to define an exchange; >>> do the compiler builtins have one? >>> >> >> Is that not atomic_exchange_acq and atomic_exchange_rel, or do I >> misunderstand you? > > You're right, you do make amoswap available. The compiler builtins > should use it to. > >>> You need to provide at least a CAS to make this work. Having this file >>> be included from the linux-specific variant is confusing, I think. Do >>> you really need to support glibc not running on a Linux kernel? >>> Otherwise, perhaps just use the linux-specific atomic-machine.h? >>> >> >> I'm not aware of a need to support glibc on anything but Linux, but I >> don't think we want to preclude it unnecessarily, either. Though we can >> use a Linux-specific file until there is a need otherwise. > > The latter sounds better. But if it turns out that you officially > support only target configurations with native atomics, then I'd suggest > to just include that, and then you won't need the syscall fallback. > Understood. We'll use a single file, and locate it in the Linux area only if the atomic syscall is used. >>>> +#ifndef __riscv_atomic >>>> + >>>> +#include <sys/syscall.h> >>>> +#include <sysdep.h> >>>> + >>>> +#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ >>>> + (abort (), (__typeof (*mem)) 0) >>>> + >>>> +#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ >>>> + (abort (), (__typeof (*mem)) 0) >>>> + >>>> +/* The only basic operation needed is compare and exchange. */ >>>> +#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ >>>> + ({ \ >>>> + INTERNAL_SYSCALL_DECL (__err); \ >>>> + (__typeof (*mem)) INTERNAL_SYSCALL (sysriscv, __err, 4, \ >>>> + RISCV_ATOMIC_CMPXCHG, mem, oldval, newval); \ >>>> + }) >>>> + >>>> +#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ >>>> + ({ \ >>>> + INTERNAL_SYSCALL_DECL (__err); \ >>>> + (__typeof (*mem)) INTERNAL_SYSCALL (sysriscv, __err, 4, \ >>>> + RISCV_ATOMIC_CMPXCHG64, mem, oldval, newval); \ >>>> + }) >>>> + >>> >>> You should add a comment explaining what your syscall does exactly, in >>> particular why you don't need to use it for atomic stores. >>> >> >> Understood about explaining the syscall. >> >> Regarding atomic stores, this comment in >> sysdeps/generic/atomic-machine.h suggests they are not necessary: "The >> only basic operation needed is compare and exchange." > > That is true under the assumption that the CAS really is atomic wrt to > atomic stores and loads (maybe we should clarify that...). If the > syscall cannot be interrupted by stores, then you're good (eg, because > there's just a single HW thread and your syscall cannot be interrupted, > or because you stop all other threads in the syscall). > Good to know. We should be safe because the CAS syscall is atomic with respect to loads and stores, due to the first reason you suggest (single processor, uninterruptable).
On Fri, 23 Jun 2017 02:41:29 PDT (-0700), triegel@redhat.com wrote: > On Thu, 2017-06-22 at 19:42 -0400, Darius Rad wrote: >> On 06/20/2017 10:20 AM, Torvald Riegel wrote: >> > On Wed, 2017-06-14 at 11:30 -0700, Palmer Dabbelt wrote: >> >> This patch implements various atomic and locking routines on RISC-V, >> >> either via the A extension (when present) or via a Linux system call >> >> that does a compare-and-exchange. This contains both the library >> >> routines and the syscall wrapper. >> > >> > In the overview email you seemed to say that you only support the HW >> > variants that have atomics. If that's so, why don't you just check that >> > this is the case and produce a compile-time error if it isn't? >> > >> >> We do support variants without atomic instructions, although that is not >> the highest priority now. The current implementation uses a Linux >> syscall to emulate atomic operations (like Xtensa, ColdFire). >> Ultimately, we plan to replace this with an operation in the VDSO (like >> ARM) to improve performance. > > Thanks for providing some background. However, elsewhere in the thread > for the whole patch set, you seemed to say that you'll only officially > support target configurations that do have atomics (IIRC, that was a > discussion with Joseph about the target triples). These officially > supported ones are the configurations we'll test with > build-many-glibcs.py. I think that it would be better to only have code > in master that will be compiled by some build through > build-many-glibcs.py (ie, that there's no dead code from that build > coverage perspective). > > Does this clarify my question / concern? Sorry, I guess I was a bit confused when answering the original question: we only expect users to build standard Linux systems with the following configurations * -march=rv32imac -mabi=ilp32 * -march=rv32imafdc -mabi=ilp32d * -march=rv64imac -mabi=lp64 * -march=rv64imafdc -mabi=lp64d but we want to support the full RISC-V ISA, as we've gotten all the other targets working and want to ensure they don't break. Thus we'll add many configurations to build-many-glibcs.py, including at least * -march=rv32i -mabi=ilp32 * -march=rv32imafdc -mabi=ilp32 * -march=rv32imafdc -mabi=ilp32f * -march=rv64i -mabi=lp64 * -march=rv64imafdc -mabi=lp64 * -march=rv64imafdc -mabi=lp64f which should serve to test all the code: the rv{32,64}i targets test just the base ISA (which will trigger the atomic and soft-float code), and the floating-point ISA with soft-float ABI tests that difference. >> >> +# define ATOMIC_EXCHANGE_USES_CAS 0 >> >> +#else >> >> +# define ATOMIC_EXCHANGE_USES_CAS 1 >> > >> > The code for the old-style atomics doesn't seem to define an exchange; >> > do the compiler builtins have one? >> > >> >> Is that not atomic_exchange_acq and atomic_exchange_rel, or do I >> misunderstand you? > > You're right, you do make amoswap available. The compiler builtins > should use it to. > >> > You need to provide at least a CAS to make this work. Having this file >> > be included from the linux-specific variant is confusing, I think. Do >> > you really need to support glibc not running on a Linux kernel? >> > Otherwise, perhaps just use the linux-specific atomic-machine.h? >> > >> >> I'm not aware of a need to support glibc on anything but Linux, but I >> don't think we want to preclude it unnecessarily, either. Though we can >> use a Linux-specific file until there is a need otherwise. > > The latter sounds better. But if it turns out that you officially > support only target configurations with native atomics, then I'd suggest > to just include that, and then you won't need the syscall fallback. > >> >> +#ifndef __riscv_atomic >> >> + >> >> +#include <sys/syscall.h> >> >> +#include <sysdep.h> >> >> + >> >> +#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ >> >> + (abort (), (__typeof (*mem)) 0) >> >> + >> >> +#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ >> >> + (abort (), (__typeof (*mem)) 0) >> >> + >> >> +/* The only basic operation needed is compare and exchange. */ >> >> +#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ >> >> + ({ \ >> >> + INTERNAL_SYSCALL_DECL (__err); \ >> >> + (__typeof (*mem)) INTERNAL_SYSCALL (sysriscv, __err, 4, \ >> >> + RISCV_ATOMIC_CMPXCHG, mem, oldval, newval); \ >> >> + }) >> >> + >> >> +#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ >> >> + ({ \ >> >> + INTERNAL_SYSCALL_DECL (__err); \ >> >> + (__typeof (*mem)) INTERNAL_SYSCALL (sysriscv, __err, 4, \ >> >> + RISCV_ATOMIC_CMPXCHG64, mem, oldval, newval); \ >> >> + }) >> >> + >> > >> > You should add a comment explaining what your syscall does exactly, in >> > particular why you don't need to use it for atomic stores. >> > >> >> Understood about explaining the syscall. >> >> Regarding atomic stores, this comment in >> sysdeps/generic/atomic-machine.h suggests they are not necessary: "The >> only basic operation needed is compare and exchange." > > That is true under the assumption that the CAS really is atomic wrt to > atomic stores and loads (maybe we should clarify that...). If the > syscall cannot be interrupted by stores, then you're good (eg, because > there's just a single HW thread and your syscall cannot be interrupted, > or because you stop all other threads in the syscall). There's two cases this syscall is used: * The kernel is built without A support. The kernel requires A support in order to enable SMP, so this means it's a uniprocessor system. In that case we just call preempt_{disable,enable} to get the atomicity, which isn't a big deal because we know we're on a uniprocessor. * The kernel is built with A support, but userspace is built without A support. In this case we might be on an SMP system, but can just perform the atomic operation in the kernel directly.
diff --git a/sysdeps/riscv/atomic-machine.h b/sysdeps/riscv/atomic-machine.h new file mode 100644 index 0000000000..c88dc1ce33 --- /dev/null +++ b/sysdeps/riscv/atomic-machine.h @@ -0,0 +1,132 @@ +/* Low-level functions for atomic operations. RISC-V version. + Copyright (C) 2011-2016 Free Software Foundation, Inc. + + Contributed by Andrew Waterman (andrew@sifive.com). + + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +#ifndef _RISCV_BITS_ATOMIC_H +#define _RISCV_BITS_ATOMIC_H 1 + +#include <inttypes.h> + +typedef int32_t atomic32_t; +typedef uint32_t uatomic32_t; +typedef int_fast32_t atomic_fast32_t; +typedef uint_fast32_t uatomic_fast32_t; + +typedef int64_t atomic64_t; +typedef uint64_t uatomic64_t; +typedef int_fast64_t atomic_fast64_t; +typedef uint_fast64_t uatomic_fast64_t; + +typedef intptr_t atomicptr_t; +typedef uintptr_t uatomicptr_t; +typedef intmax_t atomic_max_t; +typedef uintmax_t uatomic_max_t; + +#define atomic_full_barrier() __sync_synchronize() + +#ifdef __riscv_atomic + +#define __HAVE_64B_ATOMICS (__riscv_xlen >= 64) +#define USE_ATOMIC_COMPILER_BUILTINS 1 + +/* MIPS uses swap on machines that have it, and uses CAS on machines that + * don't. This, we use amoswap when the A extension is enabled, and fall back + * to the atomic system call when the A extension is disabled. */ +#ifdef __riscv_atomic +# define ATOMIC_EXCHANGE_USES_CAS 0 +#else +# define ATOMIC_EXCHANGE_USES_CAS 1 +#endif + +#define asm_amo(which, ordering, mem, value) ({ \ + typeof(*mem) __tmp; \ + if (sizeof(__tmp) == 4) \ + asm volatile (which ".w" ordering "\t%0, %z2, %1" \ + : "=r"(__tmp), "+A"(*(mem)) \ + : "rJ"(value)); \ + else if (sizeof(__tmp) == 8) \ + asm volatile (which ".d" ordering "\t%0, %z2, %1" \ + : "=r"(__tmp), "+A"(*(mem)) \ + : "rJ"(value)); \ + else \ + abort(); \ + __tmp; }) + +/* Atomic compare and exchange. */ + +#define atomic_cas(ordering, mem, newval, oldval) ({ \ + typeof(*mem) __tmp; \ + int __tmp2; \ + if (sizeof(__tmp) == 4) \ + asm volatile ("1: lr.w" ordering "\t%0, %2\n" \ + "bne\t%0, %z4, 1f\n" \ + "sc.w" ordering "\t%1, %z3, %2\n" \ + "bnez\t%1, 1b\n" \ + "1:" \ + : "=&r"(__tmp), "=&r"(__tmp2), "+A"(*(mem)) \ + : "rJ"(newval), "rJ"(oldval)); \ + else if (sizeof(__tmp) == 8) \ + asm volatile ("1: lr.d" ordering "\t%0, %2\n" \ + "bne\t%0, %z4, 1f\n" \ + "sc.d" ordering "\t%1, %z3, %2\n" \ + "bnez\t%1, 1b\n" \ + "1:" \ + : "=&r"(__tmp), "=&r"(__tmp2), "+A"(*(mem)) \ + : "rJ"(newval), "rJ"(oldval)); \ + else \ + abort(); \ + __tmp; }) + +#define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ + atomic_cas(".aq", mem, newval, oldval) + +#define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \ + atomic_cas(".rl", mem, newval, oldval) + +/* Atomic exchange (without compare). */ + +#define atomic_exchange_acq(mem, value) asm_amo("amoswap", ".aq", mem, value) +#define atomic_exchange_rel(mem, value) asm_amo("amoswap", ".rl", mem, value) + + +/* Atomically add value and return the previous (unincremented) value. */ + +#define atomic_exchange_and_add(mem, value) asm_amo("amoadd", "", mem, value) + +#define atomic_max(mem, value) asm_amo("amomaxu", "", mem, value) +#define atomic_min(mem, value) asm_amo("amominu", "", mem, value) + +#define atomic_bit_test_set(mem, bit) \ + ({ typeof(*mem) __mask = (typeof(*mem))1 << (bit); \ + asm_amo("amoor", "", mem, __mask) & __mask; }) + +#define catomic_exchange_and_add(mem, value) \ + atomic_exchange_and_add(mem, value) +#define catomic_max(mem, value) atomic_max(mem, value) + +#else /* __riscv_atomic */ + +#define __HAVE_64B_ATOMICS 0 +#define USE_ATOMIC_COMPILER_BUILTINS 0 + +#endif /* !__riscv_atomic */ + +#endif /* bits/atomic.h */ diff --git a/sysdeps/riscv/nptl/bits/pthreadtypes-arch.h b/sysdeps/riscv/nptl/bits/pthreadtypes-arch.h new file mode 100644 index 0000000000..4473b0891a --- /dev/null +++ b/sysdeps/riscv/nptl/bits/pthreadtypes-arch.h @@ -0,0 +1,98 @@ +/* Machine-specific pthread type layouts. RISC-V version. + Copyright (C) 2011-2014, 2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +#ifndef _BITS_PTHREADTYPES_ARCH_H +#define _BITS_PTHREADTYPES_ARCH_H 1 + +#include <endian.h> + +#if __riscv_xlen == 64 +# define __SIZEOF_PTHREAD_ATTR_T 56 +# define __SIZEOF_PTHREAD_MUTEX_T 40 +# define __SIZEOF_PTHREAD_MUTEXATTR_T 4 +# define __SIZEOF_PTHREAD_COND_T 48 +# define __SIZEOF_PTHREAD_CONDATTR_T 4 +# define __SIZEOF_PTHREAD_RWLOCK_T 56 +# define __SIZEOF_PTHREAD_RWLOCKATTR_T 8 +# define __SIZEOF_PTHREAD_BARRIER_T 32 +# define __SIZEOF_PTHREAD_BARRIERATTR_T 4 +#else +# define __SIZEOF_PTHREAD_ATTR_T 36 +# define __SIZEOF_PTHREAD_MUTEX_T 24 +# define __SIZEOF_PTHREAD_MUTEXATTR_T 4 +# define __SIZEOF_PTHREAD_COND_T 48 +# define __SIZEOF_PTHREAD_CONDATTR_T 4 +# define __SIZEOF_PTHREAD_RWLOCK_T 32 +# define __SIZEOF_PTHREAD_RWLOCKATTR_T 8 +# define __SIZEOF_PTHREAD_BARRIER_T 20 +# define __SIZEOF_PTHREAD_BARRIERATTR_T 4 +#endif + +#define __PTHREAD_COMPAT_PADDING_MID +#define __PTHREAD_COMPAT_PADDING_END +#define __PTHREAD_MUTEX_LOCK_ELISION 0 + +#define __LOCK_ALIGNMENT +#define __ONCE_ALIGNMENT + +struct __pthread_rwlock_arch_t +{ +# if __riscv_xlen == 64 + unsigned int __readers; + unsigned int __writers; + unsigned int __wrphase_futex; + unsigned int __writers_futex; + int __cur_writer; + int __shared; + int __pad3; + int __pad4; + unsigned long int __pad1; + unsigned long int __pad2; + /* FLAGS must stay at this position in the structure to maintain + binary compatibility. */ + unsigned int __flags; +# else + unsigned int __readers; + unsigned int __writers; + unsigned int __wrphase_futex; + unsigned int __writers_futex; + int __cur_writer; + int __pad3; +#if __BYTE_ORDER == __BIG_ENDIAN + unsigned char __pad1; + unsigned char __pad2; + unsigned char __shared; + /* FLAGS must stay at this position in the structure to maintain + binary compatibility. */ + unsigned char __flags; +#else + /* FLAGS must stay at this position in the structure to maintain + binary compatibility. */ + unsigned char __flags; + unsigned char __shared; + unsigned char __pad1; + unsigned char __pad2; +#endif + int __writer; +# endif +}; + +#define __PTHREAD_RWLOCK_ELISION_EXTRA 0 + +#endif /* bits/pthreadtypes.h */ diff --git a/sysdeps/riscv/nptl/bits/semaphore.h b/sysdeps/riscv/nptl/bits/semaphore.h new file mode 100644 index 0000000000..003c9c2bcc --- /dev/null +++ b/sysdeps/riscv/nptl/bits/semaphore.h @@ -0,0 +1,33 @@ +/* Copyright (C) 2002, 2005, 2007, 2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +#ifndef _SEMAPHORE_H +# error "Never use <bits/semaphore.h> directly; include <semaphore.h> instead." +#endif + +#define __SIZEOF_SEM_T (4 * __SIZEOF_POINTER__) + +/* Value returned if `sem_open' failed. */ +#define SEM_FAILED ((sem_t *) 0) + + +typedef union +{ + char __size[__SIZEOF_SEM_T]; + long int __align; +} sem_t; diff --git a/sysdeps/riscv/nptl/libc-lowlevellock.c b/sysdeps/riscv/nptl/libc-lowlevellock.c new file mode 100644 index 0000000000..0ecd41e630 --- /dev/null +++ b/sysdeps/riscv/nptl/libc-lowlevellock.c @@ -0,0 +1,8 @@ +/* This kludge works around a libpthread static linking problem: + https://sourceware.org/bugzilla/show_bug.cgi?id=15648 */ + +#ifndef SHARED +# define __lll_lock_wait_private weak_function __lll_lock_wait_private +#endif + +#include <lowlevellock.c> diff --git a/sysdeps/riscv/nptl/pthread_spin_lock.c b/sysdeps/riscv/nptl/pthread_spin_lock.c new file mode 100644 index 0000000000..dd5ffae97f --- /dev/null +++ b/sysdeps/riscv/nptl/pthread_spin_lock.c @@ -0,0 +1,43 @@ +/* Copyright (C) 2005 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +#ifdef __riscv_atomic + +#include <errno.h> + +int pthread_spin_lock(pthread_spinlock_t* lock) +{ + int tmp1, tmp2; + + asm volatile ("\n\ + 1:lw %0, 0(%2)\n\ + li %1, %3\n\ + bnez %0, 1b\n\ + amoswap.w.aq %0, %1, 0(%2)\n\ + bnez %0, 1b" + : "=&r"(tmp1), "=&r"(tmp2) : "r"(lock), "i"(EBUSY) + ); + + return tmp1; +} + +#else /* __riscv_atomic */ + +#include <sysdeps/../nptl/pthread_spin_lock.c> + +#endif /* !__riscv_atomic */ diff --git a/sysdeps/riscv/nptl/pthread_spin_trylock.c b/sysdeps/riscv/nptl/pthread_spin_trylock.c new file mode 100644 index 0000000000..9bd5d80fd6 --- /dev/null +++ b/sysdeps/riscv/nptl/pthread_spin_trylock.c @@ -0,0 +1,43 @@ +/* Copyright (C) 2005 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +#ifdef __riscv_atomic + +#include <errno.h> + +int pthread_spin_trylock(pthread_spinlock_t* lock) +{ + int tmp1, tmp2; + + asm volatile ("\n\ + lw %0, 0(%2)\n\ + li %1, %3\n\ + bnez %0, 1f\n\ + amoswap.w.aq %0, %1, 0(%2)\n\ + 1:" + : "=&r"(tmp1), "=&r"(tmp2) : "r"(lock), "i"(EBUSY) + ); + + return tmp1; +} + +#else /* __riscv_atomic */ + +#include <sysdeps/../nptl/pthread_spin_trylock.c> + +#endif /* !__riscv_atomic */ diff --git a/sysdeps/unix/sysv/linux/riscv/atomic-machine.h b/sysdeps/unix/sysv/linux/riscv/atomic-machine.h new file mode 100644 index 0000000000..5ce6f8da40 --- /dev/null +++ b/sysdeps/unix/sysv/linux/riscv/atomic-machine.h @@ -0,0 +1,53 @@ +/* Low-level functions for atomic operations. RISC-V version. + Copyright (C) 2014-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +#ifndef _LINUX_RISCV_BITS_ATOMIC_H +#define _LINUX_RISCV_BITS_ATOMIC_H 1 + +#include_next <atomic-machine.h> + +#ifndef __riscv_atomic + +#include <sys/syscall.h> +#include <sysdep.h> + +#define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ + (abort (), (__typeof (*mem)) 0) + +#define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ + (abort (), (__typeof (*mem)) 0) + +/* The only basic operation needed is compare and exchange. */ +#define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ + ({ \ + INTERNAL_SYSCALL_DECL (__err); \ + (__typeof (*mem)) INTERNAL_SYSCALL (sysriscv, __err, 4, \ + RISCV_ATOMIC_CMPXCHG, mem, oldval, newval); \ + }) + +#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ + ({ \ + INTERNAL_SYSCALL_DECL (__err); \ + (__typeof (*mem)) INTERNAL_SYSCALL (sysriscv, __err, 4, \ + RISCV_ATOMIC_CMPXCHG64, mem, oldval, newval); \ + }) + +#endif /* __riscv_atomic */ + +#endif /* bits/atomic.h */