Commit 212d3be1 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Thomas Gleixner

alpha: Provide atomic_{or,xor,and}

Implement atomic logic ops -- atomic_{or,xor,and}.

These will replace the atomic_{set,clear}_mask functions that are
available on some archs.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 56d1defe
...@@ -29,13 +29,13 @@ ...@@ -29,13 +29,13 @@
* branch back to restart the operation. * branch back to restart the operation.
*/ */
#define ATOMIC_OP(op) \ #define ATOMIC_OP(op, asm_op) \
static __inline__ void atomic_##op(int i, atomic_t * v) \ static __inline__ void atomic_##op(int i, atomic_t * v) \
{ \ { \
unsigned long temp; \ unsigned long temp; \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: ldl_l %0,%1\n" \ "1: ldl_l %0,%1\n" \
" " #op "l %0,%2,%0\n" \ " " #asm_op " %0,%2,%0\n" \
" stl_c %0,%1\n" \ " stl_c %0,%1\n" \
" beq %0,2f\n" \ " beq %0,2f\n" \
".subsection 2\n" \ ".subsection 2\n" \
...@@ -45,15 +45,15 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ ...@@ -45,15 +45,15 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
:"Ir" (i), "m" (v->counter)); \ :"Ir" (i), "m" (v->counter)); \
} \ } \
#define ATOMIC_OP_RETURN(op) \ #define ATOMIC_OP_RETURN(op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \ static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \ { \
long temp, result; \ long temp, result; \
smp_mb(); \ smp_mb(); \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: ldl_l %0,%1\n" \ "1: ldl_l %0,%1\n" \
" " #op "l %0,%3,%2\n" \ " " #asm_op " %0,%3,%2\n" \
" " #op "l %0,%3,%0\n" \ " " #asm_op " %0,%3,%0\n" \
" stl_c %0,%1\n" \ " stl_c %0,%1\n" \
" beq %0,2f\n" \ " beq %0,2f\n" \
".subsection 2\n" \ ".subsection 2\n" \
...@@ -65,13 +65,13 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ...@@ -65,13 +65,13 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return result; \ return result; \
} }
#define ATOMIC64_OP(op) \ #define ATOMIC64_OP(op, asm_op) \
static __inline__ void atomic64_##op(long i, atomic64_t * v) \ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
{ \ { \
unsigned long temp; \ unsigned long temp; \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: ldq_l %0,%1\n" \ "1: ldq_l %0,%1\n" \
" " #op "q %0,%2,%0\n" \ " " #asm_op " %0,%2,%0\n" \
" stq_c %0,%1\n" \ " stq_c %0,%1\n" \
" beq %0,2f\n" \ " beq %0,2f\n" \
".subsection 2\n" \ ".subsection 2\n" \
...@@ -81,15 +81,15 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ ...@@ -81,15 +81,15 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
:"Ir" (i), "m" (v->counter)); \ :"Ir" (i), "m" (v->counter)); \
} \ } \
#define ATOMIC64_OP_RETURN(op) \ #define ATOMIC64_OP_RETURN(op, asm_op) \
static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
{ \ { \
long temp, result; \ long temp, result; \
smp_mb(); \ smp_mb(); \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: ldq_l %0,%1\n" \ "1: ldq_l %0,%1\n" \
" " #op "q %0,%3,%2\n" \ " " #asm_op " %0,%3,%2\n" \
" " #op "q %0,%3,%0\n" \ " " #asm_op " %0,%3,%0\n" \
" stq_c %0,%1\n" \ " stq_c %0,%1\n" \
" beq %0,2f\n" \ " beq %0,2f\n" \
".subsection 2\n" \ ".subsection 2\n" \
...@@ -101,15 +101,28 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ ...@@ -101,15 +101,28 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
return result; \ return result; \
} }
#define ATOMIC_OPS(opg) \ #define ATOMIC_OPS(op) \
ATOMIC_OP(opg) \ ATOMIC_OP(op, op##l) \
ATOMIC_OP_RETURN(opg) \ ATOMIC_OP_RETURN(op, op##l) \
ATOMIC64_OP(opg) \ ATOMIC64_OP(op, op##q) \
ATOMIC64_OP_RETURN(opg) ATOMIC64_OP_RETURN(op, op##q)
ATOMIC_OPS(add) ATOMIC_OPS(add)
ATOMIC_OPS(sub) ATOMIC_OPS(sub)
#define CONFIG_ARCH_HAS_ATOMIC_OR
#define atomic_andnot atomic_andnot
#define atomic64_andnot atomic64_andnot
ATOMIC_OP(and, and)
ATOMIC_OP(andnot, bic)
ATOMIC_OP(or, bis)
ATOMIC_OP(xor, xor)
ATOMIC64_OP(and, and)
ATOMIC64_OP(andnot, bic)
ATOMIC64_OP(or, bis)
ATOMIC64_OP(xor, xor)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP #undef ATOMIC64_OP
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment