Commit 7b61fcda authored by Roman Zippel's avatar Roman Zippel Committed by Linus Torvalds

[PATCH] m68k: fix cmpxchg compile errors if CONFIG_RMW_INSNS=n

We require that all archs implement atomic_cmpxchg(), for the generic
version of atomic_add_unless().
Signed-off-by: default avatarRoman Zippel <zippel@linux-m68k.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Adrian Bunk <bunk@stusta.de>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent b707dbe6
...@@ -55,6 +55,7 @@ static inline int atomic_inc_and_test(atomic_t *v) ...@@ -55,6 +55,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
} }
#ifdef CONFIG_RMW_INSNS #ifdef CONFIG_RMW_INSNS
static inline int atomic_add_return(int i, atomic_t *v) static inline int atomic_add_return(int i, atomic_t *v)
{ {
int t, tmp; int t, tmp;
...@@ -82,7 +83,12 @@ static inline int atomic_sub_return(int i, atomic_t *v) ...@@ -82,7 +83,12 @@ static inline int atomic_sub_return(int i, atomic_t *v)
: "g" (i), "2" (atomic_read(v))); : "g" (i), "2" (atomic_read(v)));
return t; return t;
} }
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#else /* !CONFIG_RMW_INSNS */ #else /* !CONFIG_RMW_INSNS */
static inline int atomic_add_return(int i, atomic_t * v) static inline int atomic_add_return(int i, atomic_t * v)
{ {
unsigned long flags; unsigned long flags;
...@@ -110,6 +116,32 @@ static inline int atomic_sub_return(int i, atomic_t * v) ...@@ -110,6 +116,32 @@ static inline int atomic_sub_return(int i, atomic_t * v)
return t; return t;
} }
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
unsigned long flags;
int prev;
local_irq_save(flags);
prev = atomic_read(v);
if (prev == old)
atomic_set(v, new);
local_irq_restore(flags);
return prev;
}
static inline int atomic_xchg(atomic_t *v, int new)
{
unsigned long flags;
int prev;
local_irq_save(flags);
prev = atomic_read(v);
atomic_set(v, new);
local_irq_restore(flags);
return prev;
}
#endif /* !CONFIG_RMW_INSNS */ #endif /* !CONFIG_RMW_INSNS */
#define atomic_dec_return(v) atomic_sub_return(1, (v)) #define atomic_dec_return(v) atomic_sub_return(1, (v))
...@@ -139,9 +171,6 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v) ...@@ -139,9 +171,6 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
__asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask)); __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
} }
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_add_unless(v, a, u) \ #define atomic_add_unless(v, a, u) \
({ \ ({ \
int c, old; \ int c, old; \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment