Commit 304a0d69 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Thomas Gleixner

sparc: Provide atomic_{or,xor,and}

Implement atomic logic ops -- atomic_{or,xor,and}.

These will replace the atomic_{set,clear}_mask functions that are
available on some archs.
Acked-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 658aa514
...@@ -17,10 +17,14 @@ ...@@ -17,10 +17,14 @@
#include <asm/barrier.h> #include <asm/barrier.h>
#include <asm-generic/atomic64.h> #include <asm-generic/atomic64.h>
#define CONFIG_ARCH_HAS_ATOMIC_OR
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
int atomic_add_return(int, atomic_t *); int atomic_add_return(int, atomic_t *);
void atomic_and(int, atomic_t *);
void atomic_or(int, atomic_t *);
void atomic_xor(int, atomic_t *);
int atomic_cmpxchg(atomic_t *, int, int); int atomic_cmpxchg(atomic_t *, int, int);
int atomic_xchg(atomic_t *, int); int atomic_xchg(atomic_t *, int);
int __atomic_add_unless(atomic_t *, int, int); int __atomic_add_unless(atomic_t *, int, int);
......
...@@ -33,6 +33,12 @@ long atomic64_##op##_return(long, atomic64_t *); ...@@ -33,6 +33,12 @@ long atomic64_##op##_return(long, atomic64_t *);
ATOMIC_OPS(add) ATOMIC_OPS(add)
ATOMIC_OPS(sub) ATOMIC_OPS(sub)
#define CONFIG_ARCH_HAS_ATOMIC_OR
ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
......
...@@ -27,22 +27,38 @@ static DEFINE_SPINLOCK(dummy); ...@@ -27,22 +27,38 @@ static DEFINE_SPINLOCK(dummy);
#endif /* SMP */ #endif /* SMP */
#define ATOMIC_OP(op, cop) \ #define ATOMIC_OP_RETURN(op, c_op) \
int atomic_##op##_return(int i, atomic_t *v) \ int atomic_##op##_return(int i, atomic_t *v) \
{ \ { \
int ret; \ int ret; \
unsigned long flags; \ unsigned long flags; \
spin_lock_irqsave(ATOMIC_HASH(v), flags); \ spin_lock_irqsave(ATOMIC_HASH(v), flags); \
\ \
ret = (v->counter cop i); \ ret = (v->counter c_op i); \
\ \
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \ spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
return ret; \ return ret; \
} \ } \
EXPORT_SYMBOL(atomic_##op##_return); EXPORT_SYMBOL(atomic_##op##_return);
ATOMIC_OP(add, +=) #define ATOMIC_OP(op, c_op) \
void atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
spin_lock_irqsave(ATOMIC_HASH(v), flags); \
\
v->counter c_op i; \
\
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
} \
EXPORT_SYMBOL(atomic_##op);
ATOMIC_OP_RETURN(add, +=)
ATOMIC_OP(and, &=)
ATOMIC_OP(or, |=)
ATOMIC_OP(xor, ^=)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
int atomic_xchg(atomic_t *v, int new) int atomic_xchg(atomic_t *v, int new)
......
...@@ -47,6 +47,9 @@ ENDPROC(atomic_##op##_return); ...@@ -47,6 +47,9 @@ ENDPROC(atomic_##op##_return);
ATOMIC_OPS(add) ATOMIC_OPS(add)
ATOMIC_OPS(sub) ATOMIC_OPS(sub)
ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
...@@ -84,6 +87,9 @@ ENDPROC(atomic64_##op##_return); ...@@ -84,6 +87,9 @@ ENDPROC(atomic64_##op##_return);
ATOMIC64_OPS(add) ATOMIC64_OPS(add)
ATOMIC64_OPS(sub) ATOMIC64_OPS(sub)
ATOMIC64_OP(and)
ATOMIC64_OP(or)
ATOMIC64_OP(xor)
#undef ATOMIC64_OPS #undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP_RETURN
......
...@@ -111,6 +111,9 @@ EXPORT_SYMBOL(atomic64_##op##_return); ...@@ -111,6 +111,9 @@ EXPORT_SYMBOL(atomic64_##op##_return);
ATOMIC_OPS(add) ATOMIC_OPS(add)
ATOMIC_OPS(sub) ATOMIC_OPS(sub)
ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment