Commit 1c854a6c authored by Andreas Larsson's avatar Andreas Larsson Committed by Jiri Slaby

sparc32: Implement xchg and atomic_xchg using ATOMIC_HASH locks

[ Upstream commit 1a17fdc4 ]

Atomicity between xchg and cmpxchg cannot be guaranteed when xchg is
implemented with a swap and cmpxchg is implemented with locks.
Without this, e.g. mcs_spin_lock and mcs_spin_unlock are broken.
Signed-off-by: default avatarAndreas Larsson <andreas@gaisler.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarJiri Slaby <jslaby@suse.cz>
parent a80e35d9
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
extern int __atomic_add_return(int, atomic_t *); extern int __atomic_add_return(int, atomic_t *);
extern int atomic_cmpxchg(atomic_t *, int, int); extern int atomic_cmpxchg(atomic_t *, int, int);
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) extern int atomic_xchg(atomic_t *, int);
extern int __atomic_add_unless(atomic_t *, int, int); extern int __atomic_add_unless(atomic_t *, int, int);
extern void atomic_set(atomic_t *, int); extern void atomic_set(atomic_t *, int);
......
...@@ -11,22 +11,14 @@ ...@@ -11,22 +11,14 @@
#ifndef __ARCH_SPARC_CMPXCHG__ #ifndef __ARCH_SPARC_CMPXCHG__
#define __ARCH_SPARC_CMPXCHG__ #define __ARCH_SPARC_CMPXCHG__
static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val) extern unsigned long __xchg_u32(volatile u32 *m, u32 new);
{
__asm__ __volatile__("swap [%2], %0"
: "=&r" (val)
: "0" (val), "r" (m)
: "memory");
return val;
}
extern void __xchg_called_with_bad_pointer(void); extern void __xchg_called_with_bad_pointer(void);
static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size) static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size)
{ {
switch (size) { switch (size) {
case 4: case 4:
return xchg_u32(ptr, x); return __xchg_u32(ptr, x);
} }
__xchg_called_with_bad_pointer(); __xchg_called_with_bad_pointer();
return x; return x;
......
...@@ -40,6 +40,19 @@ int __atomic_add_return(int i, atomic_t *v) ...@@ -40,6 +40,19 @@ int __atomic_add_return(int i, atomic_t *v)
} }
EXPORT_SYMBOL(__atomic_add_return); EXPORT_SYMBOL(__atomic_add_return);
int atomic_xchg(atomic_t *v, int new)
{
int ret;
unsigned long flags;
spin_lock_irqsave(ATOMIC_HASH(v), flags);
ret = v->counter;
v->counter = new;
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret;
}
EXPORT_SYMBOL(atomic_xchg);
int atomic_cmpxchg(atomic_t *v, int old, int new) int atomic_cmpxchg(atomic_t *v, int old, int new)
{ {
int ret; int ret;
...@@ -132,3 +145,17 @@ unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new) ...@@ -132,3 +145,17 @@ unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
return (unsigned long)prev; return (unsigned long)prev;
} }
EXPORT_SYMBOL(__cmpxchg_u32); EXPORT_SYMBOL(__cmpxchg_u32);
unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
{
unsigned long flags;
u32 prev;
spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
prev = *ptr;
*ptr = new;
spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
return (unsigned long)prev;
}
EXPORT_SYMBOL(__xchg_u32);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment