Commit 85fdbe1b authored by Gunnar Larisch's avatar Gunnar Larisch Committed by Linus Torvalds

Add cmpxchg_local to ppc

Add a local processor version of cmpxchg for ppc.

Implements __cmpxchg_u32_local and uses it for 32 bits cmpxchg_local.
It uses the non NMI safe cmpxchg_local_generic for 1, 2 and 8 bytes
cmpxchg_local.
Signed-off-by: default avatarGunnar Larisch <gl@denx.de>
Signed-off-by: default avatarMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Kumar Gala <galak@gate.crashing.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent df80c8c5
...@@ -209,12 +209,34 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new) ...@@ -209,12 +209,34 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new)
return prev; return prev;
} }
static inline unsigned long
__cmpxchg_u32_local(volatile unsigned int *p, unsigned int old,
unsigned int new)
{
unsigned int prev;
__asm__ __volatile__ ("\n\
1: lwarx %0,0,%2 \n\
cmpw 0,%0,%3 \n\
bne 2f \n"
PPC405_ERR77(0,%2)
" stwcx. %4,0,%2 \n\
bne- 1b\n"
"2:"
: "=&r" (prev), "=m" (*p)
: "r" (p), "r" (old), "r" (new), "m" (*p)
: "cc", "memory");
return prev;
}
/* This function doesn't exist, so you'll get a linker error /* This function doesn't exist, so you'll get a linker error
if something tries to do an invalid cmpxchg(). */ if something tries to do an invalid cmpxchg(). */
extern void __cmpxchg_called_with_bad_pointer(void); extern void __cmpxchg_called_with_bad_pointer(void);
static __inline__ unsigned long static __inline__ unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
unsigned int size)
{ {
switch (size) { switch (size) {
case 4: case 4:
...@@ -228,7 +250,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) ...@@ -228,7 +250,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
return old; return old;
} }
#define cmpxchg(ptr,o,n) \ #define cmpxchg(ptr, o, n) \
({ \ ({ \
__typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \ __typeof__(*(ptr)) _n_ = (n); \
...@@ -236,6 +258,31 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) ...@@ -236,6 +258,31 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
(unsigned long)_n_, sizeof(*(ptr))); \ (unsigned long)_n_, sizeof(*(ptr))); \
}) })
#include <asm-generic/cmpxchg-local.h>
static inline unsigned long __cmpxchg_local(volatile void *ptr,
unsigned long old,
unsigned long new, int size)
{
switch (size) {
case 4:
return __cmpxchg_u32_local(ptr, old, new);
default:
return __cmpxchg_local_generic(ptr, old, new, size);
}
return old;
}
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
#define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#define arch_align_stack(x) (x) #define arch_align_stack(x) (x)
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment