Message ID | a1176e19e627dd6a1b8d24c6c457a8ab874b7d12.1659430931.git.christophe.leroy@csgroup.eu (mailing list archive) |
---|---|
State | Accepted |
Headers | show |
Series | [v1,1/3] powerpc: Fix eh field when calling lwarx on PPC32 | expand |
Hi! On Tue, Aug 02, 2022 at 11:02:36AM +0200, Christophe Leroy wrote: > Commit 9401f4e46cf6 ("powerpc: Use lwarx/ldarx directly instead of > PPC_LWARX/LDARX macros") properly handled the eh field of lwarx > in asm/bitops.h but failed to clear it for PPC32 in > asm/simple_spinlock.h > > So, do as in arch_atomic_try_cmpxchg_lock(), set it to 1 if PPC64 > but set it to 0 if PPC32. For that use IS_ENABLED(CONFIG_PPC64) which > returns 1 when CONFIG_PPC64 is set and 0 otherwise. > > Reported-by: Pali Rohár <pali@kernel.org> Reviewed-by: Segher Boessenkool <segher@kernel.crashing.org> > + unsigned int eh = IS_ENABLED(CONFIG_PPC64); > > token = LOCK_TOKEN; > __asm__ __volatile__( > -"1: lwarx %0,0,%2,1\n\ > +"1: lwarx %0,0,%2,%3\n\ > cmpwi 0,%0,0\n\ > bne- 2f\n\ > stwcx. %1,0,%2\n\ > @@ -59,7 +60,7 @@ static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) > PPC_ACQUIRE_BARRIER > "2:" > : "=&r" (tmp) > - : "r" (token), "r" (&lock->slock) > + : "r" (token), "r" (&lock->slock), "i" (eh) > : "cr0", "memory"); That should work yes. But please note that "n" is prefered if a number is required (like here), not some other constant, as allowed by "i". Thanks! Segher
On Tuesday 02 August 2022 11:02:36 Christophe Leroy wrote: > Commit 9401f4e46cf6 ("powerpc: Use lwarx/ldarx directly instead of > PPC_LWARX/LDARX macros") properly handled the eh field of lwarx > in asm/bitops.h but failed to clear it for PPC32 in > asm/simple_spinlock.h > > So, do as in arch_atomic_try_cmpxchg_lock(), set it to 1 if PPC64 > but set it to 0 if PPC32. For that use IS_ENABLED(CONFIG_PPC64) which > returns 1 when CONFIG_PPC64 is set and 0 otherwise. > > Reported-by: Pali Rohár <pali@kernel.org> > Fixes: 9401f4e46cf6 ("powerpc: Use lwarx/ldarx directly instead of PPC_LWARX/LDARX macros") > Cc: stable@vger.kernel.org > Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> This fix works perfectly. Thanks! Tested-by: Pali Rohár <pali@kernel.org> > --- > arch/powerpc/include/asm/simple_spinlock.h | 15 +++++++++------ > 1 file changed, 9 insertions(+), 6 deletions(-) > > diff --git a/arch/powerpc/include/asm/simple_spinlock.h b/arch/powerpc/include/asm/simple_spinlock.h > index 7ae6aeef8464..5095c636a680 100644 > --- a/arch/powerpc/include/asm/simple_spinlock.h > +++ b/arch/powerpc/include/asm/simple_spinlock.h > @@ -48,10 +48,11 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lock) > static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) > { > unsigned long tmp, token; > + unsigned int eh = IS_ENABLED(CONFIG_PPC64); > > token = LOCK_TOKEN; > __asm__ __volatile__( > -"1: lwarx %0,0,%2,1\n\ > +"1: lwarx %0,0,%2,%3\n\ > cmpwi 0,%0,0\n\ > bne- 2f\n\ > stwcx. %1,0,%2\n\ > @@ -59,7 +60,7 @@ static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) > PPC_ACQUIRE_BARRIER > "2:" > : "=&r" (tmp) > - : "r" (token), "r" (&lock->slock) > + : "r" (token), "r" (&lock->slock), "i" (eh) > : "cr0", "memory"); > > return tmp; > @@ -156,9 +157,10 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) > static inline long __arch_read_trylock(arch_rwlock_t *rw) > { > long tmp; > + unsigned int eh = IS_ENABLED(CONFIG_PPC64); > > __asm__ __volatile__( > -"1: lwarx %0,0,%1,1\n" > +"1: lwarx %0,0,%1,%2\n" > __DO_SIGN_EXTEND > " addic. %0,%0,1\n\ > ble- 2f\n" > @@ -166,7 +168,7 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw) > bne- 1b\n" > PPC_ACQUIRE_BARRIER > "2:" : "=&r" (tmp) > - : "r" (&rw->lock) > + : "r" (&rw->lock), "i" (eh) > : "cr0", "xer", "memory"); > > return tmp; > @@ -179,17 +181,18 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw) > static inline long __arch_write_trylock(arch_rwlock_t *rw) > { > long tmp, token; > + unsigned int eh = IS_ENABLED(CONFIG_PPC64); > > token = WRLOCK_TOKEN; > __asm__ __volatile__( > -"1: lwarx %0,0,%2,1\n\ > +"1: lwarx %0,0,%2,%3\n\ > cmpwi 0,%0,0\n\ > bne- 2f\n" > " stwcx. %1,0,%2\n\ > bne- 1b\n" > PPC_ACQUIRE_BARRIER > "2:" : "=&r" (tmp) > - : "r" (token), "r" (&rw->lock) > + : "r" (token), "r" (&rw->lock), "i" (eh) > : "cr0", "memory"); > > return tmp; > -- > 2.36.1 >
On Tue, 2 Aug 2022 11:02:36 +0200, Christophe Leroy wrote: > Commit 9401f4e46cf6 ("powerpc: Use lwarx/ldarx directly instead of > PPC_LWARX/LDARX macros") properly handled the eh field of lwarx > in asm/bitops.h but failed to clear it for PPC32 in > asm/simple_spinlock.h > > So, do as in arch_atomic_try_cmpxchg_lock(), set it to 1 if PPC64 > but set it to 0 if PPC32. For that use IS_ENABLED(CONFIG_PPC64) which > returns 1 when CONFIG_PPC64 is set and 0 otherwise. > > [...] Applied to powerpc/fixes. [1/3] powerpc: Fix eh field when calling lwarx on PPC32 https://git.kernel.org/powerpc/c/18db466a9a306406dab3b134014d9f6ed642471c [2/3] powerpc: Don't hide eh field of lwarx behind a macro https://git.kernel.org/powerpc/c/eb5a33ea31190c189ca4a59de4687b0877662c06 [3/3] powerpc: Make eh value more explicit when using lwarx https://git.kernel.org/powerpc/c/5cccf7a5215d12027e55e247907817631b413c28 cheers
diff --git a/arch/powerpc/include/asm/simple_spinlock.h b/arch/powerpc/include/asm/simple_spinlock.h index 7ae6aeef8464..5095c636a680 100644 --- a/arch/powerpc/include/asm/simple_spinlock.h +++ b/arch/powerpc/include/asm/simple_spinlock.h @@ -48,10 +48,11 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lock) static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) { unsigned long tmp, token; + unsigned int eh = IS_ENABLED(CONFIG_PPC64); token = LOCK_TOKEN; __asm__ __volatile__( -"1: lwarx %0,0,%2,1\n\ +"1: lwarx %0,0,%2,%3\n\ cmpwi 0,%0,0\n\ bne- 2f\n\ stwcx. %1,0,%2\n\ @@ -59,7 +60,7 @@ static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) PPC_ACQUIRE_BARRIER "2:" : "=&r" (tmp) - : "r" (token), "r" (&lock->slock) + : "r" (token), "r" (&lock->slock), "i" (eh) : "cr0", "memory"); return tmp; @@ -156,9 +157,10 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) static inline long __arch_read_trylock(arch_rwlock_t *rw) { long tmp; + unsigned int eh = IS_ENABLED(CONFIG_PPC64); __asm__ __volatile__( -"1: lwarx %0,0,%1,1\n" +"1: lwarx %0,0,%1,%2\n" __DO_SIGN_EXTEND " addic. %0,%0,1\n\ ble- 2f\n" @@ -166,7 +168,7 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw) bne- 1b\n" PPC_ACQUIRE_BARRIER "2:" : "=&r" (tmp) - : "r" (&rw->lock) + : "r" (&rw->lock), "i" (eh) : "cr0", "xer", "memory"); return tmp; @@ -179,17 +181,18 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw) static inline long __arch_write_trylock(arch_rwlock_t *rw) { long tmp, token; + unsigned int eh = IS_ENABLED(CONFIG_PPC64); token = WRLOCK_TOKEN; __asm__ __volatile__( -"1: lwarx %0,0,%2,1\n\ +"1: lwarx %0,0,%2,%3\n\ cmpwi 0,%0,0\n\ bne- 2f\n" " stwcx. %1,0,%2\n\ bne- 1b\n" PPC_ACQUIRE_BARRIER "2:" : "=&r" (tmp) - : "r" (token), "r" (&rw->lock) + : "r" (token), "r" (&rw->lock), "i" (eh) : "cr0", "memory"); return tmp;
Commit 9401f4e46cf6 ("powerpc: Use lwarx/ldarx directly instead of PPC_LWARX/LDARX macros") properly handled the eh field of lwarx in asm/bitops.h but failed to clear it for PPC32 in asm/simple_spinlock.h So, do as in arch_atomic_try_cmpxchg_lock(), set it to 1 if PPC64 but set it to 0 if PPC32. For that use IS_ENABLED(CONFIG_PPC64) which returns 1 when CONFIG_PPC64 is set and 0 otherwise. Reported-by: Pali Rohár <pali@kernel.org> Fixes: 9401f4e46cf6 ("powerpc: Use lwarx/ldarx directly instead of PPC_LWARX/LDARX macros") Cc: stable@vger.kernel.org Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> --- arch/powerpc/include/asm/simple_spinlock.h | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-)