diff mbox

[RFC,v2,6/7] powerpc: atomic: Make atomic{, 64}_xchg and xchg a full barrier

Message ID 1442418575-12297-7-git-send-email-boqun.feng@gmail.com (mailing list archive)
State Superseded, archived
Headers show

Commit Message

Boqun Feng Sept. 16, 2015, 3:49 p.m. UTC
According to memory-barriers.txt, xchg and its atomic{,64}_ versions
need to imply a full barrier, however they are now just RELEASE+ACQUIRE,
which is not a full barrier.

So remove the definition of xchg(), and let __atomic_op_fence() build
the full-barrier versions of these operations.

Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
---
 arch/powerpc/include/asm/cmpxchg.h | 64 --------------------------------------
 1 file changed, 64 deletions(-)

Comments

Peter Zijlstra Oct. 1, 2015, 12:28 p.m. UTC | #1
On Wed, Sep 16, 2015 at 11:49:34PM +0800, Boqun Feng wrote:
> According to memory-barriers.txt, xchg and its atomic{,64}_ versions
> need to imply a full barrier, however they are now just RELEASE+ACQUIRE,
> which is not a full barrier.
> 
> So remove the definition of xchg(), and let __atomic_op_fence() build
> the full-barrier versions of these operations.

Do you want to do a patch for -stable fixing the current implementation?
Boqun Feng Oct. 1, 2015, 11:19 p.m. UTC | #2
Hi Peter,

Please forgive me for the format of my reply. I'm travelling,
and replying from my phone.

2015年10月1日 下午7:28,"Peter Zijlstra" <peterz@infradead.org>写道:
>
> On Wed, Sep 16, 2015 at 11:49:34PM +0800, Boqun Feng wrote:
> > According to memory-barriers.txt, xchg and its atomic{,64}_ versions
> > need to imply a full barrier, however they are now just RELEASE+ACQUIRE,
> > which is not a full barrier.
> >
> > So remove the definition of xchg(), and let __atomic_op_fence() build
> > the full-barrier versions of these operations.
>
> Do you want to do a patch for -stable fixing the current implementation?

Good idea! I didn't think of this before, and I'd love to do the patch,
but thing is that I'm not able to use my laptop until Oct 10th.
I will send the patch once I'm back.
Does that work for you?

Regards,
Boqun
Peter Zijlstra Oct. 2, 2015, 5:25 a.m. UTC | #3
On Fri, Oct 02, 2015 at 07:19:04AM +0800, Boqun Feng wrote:
> Hi Peter,
> 
> Please forgive me for the format of my reply. I'm travelling,
> and replying from my phone.
> 
> 2015年10月1日 下午7:28,"Peter Zijlstra" <peterz@infradead.org>写道:
> >
> > On Wed, Sep 16, 2015 at 11:49:34PM +0800, Boqun Feng wrote:
> > > According to memory-barriers.txt, xchg and its atomic{,64}_ versions
> > > need to imply a full barrier, however they are now just RELEASE+ACQUIRE,
> > > which is not a full barrier.
> > >
> > > So remove the definition of xchg(), and let __atomic_op_fence() build
> > > the full-barrier versions of these operations.
> >
> > Do you want to do a patch for -stable fixing the current implementation?
> 
> Good idea! I didn't think of this before, and I'd love to do the patch,
> but thing is that I'm not able to use my laptop until Oct 10th.
> I will send the patch once I'm back.
> Does that work for you?

Sure, no hurry.
diff mbox

Patch

diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
index f40f295..9f0379a 100644
--- a/arch/powerpc/include/asm/cmpxchg.h
+++ b/arch/powerpc/include/asm/cmpxchg.h
@@ -12,31 +12,7 @@ 
  * Changes the memory location '*ptr' to be val and returns
  * the previous value stored there.
  */
-static __always_inline unsigned long
-__xchg_u32(volatile void *p, unsigned long val)
-{
-	unsigned long prev;
 
-	__asm__ __volatile__(
-	PPC_RELEASE_BARRIER
-"1:	lwarx	%0,0,%2 \n"
-	PPC405_ERR77(0,%2)
-"	stwcx.	%3,0,%2 \n\
-	bne-	1b"
-	PPC_ACQUIRE_BARRIER
-	: "=&r" (prev), "+m" (*(volatile unsigned int *)p)
-	: "r" (p), "r" (val)
-	: "cc", "memory");
-
-	return prev;
-}
-
-/*
- * Atomic exchange
- *
- * Changes the memory location '*ptr' to be val and returns
- * the previous value stored there.
- */
 static __always_inline unsigned long
 __xchg_u32_local(volatile void *p, unsigned long val)
 {
@@ -82,25 +58,6 @@  __xchg_u32_relaxed(u32 *p, unsigned long val)
 
 #ifdef CONFIG_PPC64
 static __always_inline unsigned long
-__xchg_u64(volatile void *p, unsigned long val)
-{
-	unsigned long prev;
-
-	__asm__ __volatile__(
-	PPC_RELEASE_BARRIER
-"1:	ldarx	%0,0,%2 \n"
-	PPC405_ERR77(0,%2)
-"	stdcx.	%3,0,%2 \n\
-	bne-	1b"
-	PPC_ACQUIRE_BARRIER
-	: "=&r" (prev), "+m" (*(volatile unsigned long *)p)
-	: "r" (p), "r" (val)
-	: "cc", "memory");
-
-	return prev;
-}
-
-static __always_inline unsigned long
 __xchg_u64_local(volatile void *p, unsigned long val)
 {
 	unsigned long prev;
@@ -142,21 +99,6 @@  __xchg_u64_relaxed(u64 *p, unsigned long val)
 extern void __xchg_called_with_bad_pointer(void);
 
 static __always_inline unsigned long
-__xchg(volatile void *ptr, unsigned long x, unsigned int size)
-{
-	switch (size) {
-	case 4:
-		return __xchg_u32(ptr, x);
-#ifdef CONFIG_PPC64
-	case 8:
-		return __xchg_u64(ptr, x);
-#endif
-	}
-	__xchg_called_with_bad_pointer();
-	return x;
-}
-
-static __always_inline unsigned long
 __xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
 {
 	switch (size) {
@@ -185,12 +127,6 @@  __xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
 	__xchg_called_with_bad_pointer();
 	return x;
 }
-#define xchg(ptr,x)							     \
-  ({									     \
-     __typeof__(*(ptr)) _x_ = (x);					     \
-     (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
-  })
-
 #define xchg_local(ptr,x)						     \
   ({									     \
      __typeof__(*(ptr)) _x_ = (x);					     \