Commit 221a5a6b authored by David S. Miller's avatar David S. Miller

[SPARC64]: Add missing membars for xchg() and cmpxchg().

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b4232896
...@@ -229,6 +229,7 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \ ...@@ -229,6 +229,7 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
static __inline__ unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val) static __inline__ unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
{ {
__asm__ __volatile__( __asm__ __volatile__(
" membar #StoreLoad | #LoadLoad\n"
" mov %0, %%g5\n" " mov %0, %%g5\n"
"1: lduw [%2], %%g7\n" "1: lduw [%2], %%g7\n"
" cas [%2], %%g7, %0\n" " cas [%2], %%g7, %0\n"
...@@ -245,6 +246,7 @@ static __inline__ unsigned long xchg32(__volatile__ unsigned int *m, unsigned in ...@@ -245,6 +246,7 @@ static __inline__ unsigned long xchg32(__volatile__ unsigned int *m, unsigned in
static __inline__ unsigned long xchg64(__volatile__ unsigned long *m, unsigned long val) static __inline__ unsigned long xchg64(__volatile__ unsigned long *m, unsigned long val)
{ {
__asm__ __volatile__( __asm__ __volatile__(
" membar #StoreLoad | #LoadLoad\n"
" mov %0, %%g5\n" " mov %0, %%g5\n"
"1: ldx [%2], %%g7\n" "1: ldx [%2], %%g7\n"
" casx [%2], %%g7, %0\n" " casx [%2], %%g7, %0\n"
...@@ -289,7 +291,8 @@ extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noret ...@@ -289,7 +291,8 @@ extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noret
static __inline__ unsigned long static __inline__ unsigned long
__cmpxchg_u32(volatile int *m, int old, int new) __cmpxchg_u32(volatile int *m, int old, int new)
{ {
__asm__ __volatile__("cas [%2], %3, %0\n\t" __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n"
"cas [%2], %3, %0\n\t"
"membar #StoreLoad | #StoreStore" "membar #StoreLoad | #StoreStore"
: "=&r" (new) : "=&r" (new)
: "0" (new), "r" (m), "r" (old) : "0" (new), "r" (m), "r" (old)
...@@ -301,7 +304,8 @@ __cmpxchg_u32(volatile int *m, int old, int new) ...@@ -301,7 +304,8 @@ __cmpxchg_u32(volatile int *m, int old, int new)
static __inline__ unsigned long static __inline__ unsigned long
__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
{ {
__asm__ __volatile__("casx [%2], %3, %0\n\t" __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n"
"casx [%2], %3, %0\n\t"
"membar #StoreLoad | #StoreStore" "membar #StoreLoad | #StoreStore"
: "=&r" (new) : "=&r" (new)
: "0" (new), "r" (m), "r" (old) : "0" (new), "r" (m), "r" (old)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment