Commit a45c8a06 authored by Boqun Feng's avatar Boqun Feng Committed by Greg Kroah-Hartman

powerpc: Make {cmp}xchg* and their atomic_ versions fully ordered

commit 81d7a329 upstream.

According to memory-barriers.txt, xchg*, cmpxchg* and their atomic_
versions all need to be fully ordered, however they are now just
RELEASE+ACQUIRE, which are not fully ordered.

So also replace PPC_RELEASE_BARRIER and PPC_ACQUIRE_BARRIER with
PPC_ATOMIC_ENTRY_BARRIER and PPC_ATOMIC_EXIT_BARRIER in
__{cmp,}xchg_{u32,u64} respectively to guarantee fully ordered semantics
of atomic{,64}_{cmp,}xchg() and {cmp,}xchg(), as a complement of commit
b97021f8 ("powerpc: Fix atomic_xxx_return barrier semantics")

This patch depends on patch "powerpc: Make value-returning atomics fully
ordered" for PPC_ATOMIC_ENTRY_BARRIER definition.
Signed-off-by: default avatarBoqun Feng <boqun.feng@gmail.com>
Reviewed-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 2af28251
...@@ -18,12 +18,12 @@ __xchg_u32(volatile void *p, unsigned long val) ...@@ -18,12 +18,12 @@ __xchg_u32(volatile void *p, unsigned long val)
unsigned long prev; unsigned long prev;
__asm__ __volatile__( __asm__ __volatile__(
PPC_RELEASE_BARRIER PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %0,0,%2 \n" "1: lwarx %0,0,%2 \n"
PPC405_ERR77(0,%2) PPC405_ERR77(0,%2)
" stwcx. %3,0,%2 \n\ " stwcx. %3,0,%2 \n\
bne- 1b" bne- 1b"
PPC_ACQUIRE_BARRIER PPC_ATOMIC_EXIT_BARRIER
: "=&r" (prev), "+m" (*(volatile unsigned int *)p) : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
: "r" (p), "r" (val) : "r" (p), "r" (val)
: "cc", "memory"); : "cc", "memory");
...@@ -61,12 +61,12 @@ __xchg_u64(volatile void *p, unsigned long val) ...@@ -61,12 +61,12 @@ __xchg_u64(volatile void *p, unsigned long val)
unsigned long prev; unsigned long prev;
__asm__ __volatile__( __asm__ __volatile__(
PPC_RELEASE_BARRIER PPC_ATOMIC_ENTRY_BARRIER
"1: ldarx %0,0,%2 \n" "1: ldarx %0,0,%2 \n"
PPC405_ERR77(0,%2) PPC405_ERR77(0,%2)
" stdcx. %3,0,%2 \n\ " stdcx. %3,0,%2 \n\
bne- 1b" bne- 1b"
PPC_ACQUIRE_BARRIER PPC_ATOMIC_EXIT_BARRIER
: "=&r" (prev), "+m" (*(volatile unsigned long *)p) : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
: "r" (p), "r" (val) : "r" (p), "r" (val)
: "cc", "memory"); : "cc", "memory");
...@@ -152,14 +152,14 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) ...@@ -152,14 +152,14 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
unsigned int prev; unsigned int prev;
__asm__ __volatile__ ( __asm__ __volatile__ (
PPC_RELEASE_BARRIER PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %0,0,%2 # __cmpxchg_u32\n\ "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
cmpw 0,%0,%3\n\ cmpw 0,%0,%3\n\
bne- 2f\n" bne- 2f\n"
PPC405_ERR77(0,%2) PPC405_ERR77(0,%2)
" stwcx. %4,0,%2\n\ " stwcx. %4,0,%2\n\
bne- 1b" bne- 1b"
PPC_ACQUIRE_BARRIER PPC_ATOMIC_EXIT_BARRIER
"\n\ "\n\
2:" 2:"
: "=&r" (prev), "+m" (*p) : "=&r" (prev), "+m" (*p)
...@@ -198,13 +198,13 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) ...@@ -198,13 +198,13 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
unsigned long prev; unsigned long prev;
__asm__ __volatile__ ( __asm__ __volatile__ (
PPC_RELEASE_BARRIER PPC_ATOMIC_ENTRY_BARRIER
"1: ldarx %0,0,%2 # __cmpxchg_u64\n\ "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
cmpd 0,%0,%3\n\ cmpd 0,%0,%3\n\
bne- 2f\n\ bne- 2f\n\
stdcx. %4,0,%2\n\ stdcx. %4,0,%2\n\
bne- 1b" bne- 1b"
PPC_ACQUIRE_BARRIER PPC_ATOMIC_EXIT_BARRIER
"\n\ "\n\
2:" 2:"
: "=&r" (prev), "+m" (*p) : "=&r" (prev), "+m" (*p)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment