Commit b93c7b8c authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

locking,arch,alpha: Fold atomic_ops

Many of the atomic op implementations are the same except for one
instruction; fold the lot into a few CPP macros and reduce LoC.

This also prepares for easy addition of new ops.

Cc: Matt Turner <mattst88@gmail.com>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Richard Henderson <rth@twiddle.net>
Cc: linux-alpha@vger.kernel.org
Link: http://lkml.kernel.org/r/20140508135851.832107183@infradead.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent f6b4ecee
...@@ -29,145 +29,92 @@ ...@@ -29,145 +29,92 @@
* branch back to restart the operation. * branch back to restart the operation.
*/ */
static __inline__ void atomic_add(int i, atomic_t * v) #define ATOMIC_OP(op) \
{ static __inline__ void atomic_##op(int i, atomic_t * v) \
unsigned long temp; { \
__asm__ __volatile__( unsigned long temp; \
"1: ldl_l %0,%1\n" __asm__ __volatile__( \
" addl %0,%2,%0\n" "1: ldl_l %0,%1\n" \
" stl_c %0,%1\n" " " #op "l %0,%2,%0\n" \
" beq %0,2f\n" " stl_c %0,%1\n" \
".subsection 2\n" " beq %0,2f\n" \
"2: br 1b\n" ".subsection 2\n" \
".previous" "2: br 1b\n" \
:"=&r" (temp), "=m" (v->counter) ".previous" \
:"Ir" (i), "m" (v->counter)); :"=&r" (temp), "=m" (v->counter) \
} :"Ir" (i), "m" (v->counter)); \
} \
static __inline__ void atomic64_add(long i, atomic64_t * v)
{ #define ATOMIC_OP_RETURN(op) \
unsigned long temp; static inline int atomic_##op##_return(int i, atomic_t *v) \
__asm__ __volatile__( { \
"1: ldq_l %0,%1\n" long temp, result; \
" addq %0,%2,%0\n" smp_mb(); \
" stq_c %0,%1\n" __asm__ __volatile__( \
" beq %0,2f\n" "1: ldl_l %0,%1\n" \
".subsection 2\n" " " #op "l %0,%3,%2\n" \
"2: br 1b\n" " " #op "l %0,%3,%0\n" \
".previous" " stl_c %0,%1\n" \
:"=&r" (temp), "=m" (v->counter) " beq %0,2f\n" \
:"Ir" (i), "m" (v->counter)); ".subsection 2\n" \
} "2: br 1b\n" \
".previous" \
static __inline__ void atomic_sub(int i, atomic_t * v) :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
{ :"Ir" (i), "m" (v->counter) : "memory"); \
unsigned long temp; smp_mb(); \
__asm__ __volatile__( return result; \
"1: ldl_l %0,%1\n"
" subl %0,%2,%0\n"
" stl_c %0,%1\n"
" beq %0,2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous"
:"=&r" (temp), "=m" (v->counter)
:"Ir" (i), "m" (v->counter));
} }
static __inline__ void atomic64_sub(long i, atomic64_t * v) #define ATOMIC64_OP(op) \
{ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
unsigned long temp; { \
__asm__ __volatile__( unsigned long temp; \
"1: ldq_l %0,%1\n" __asm__ __volatile__( \
" subq %0,%2,%0\n" "1: ldq_l %0,%1\n" \
" stq_c %0,%1\n" " " #op "q %0,%2,%0\n" \
" beq %0,2f\n" " stq_c %0,%1\n" \
".subsection 2\n" " beq %0,2f\n" \
"2: br 1b\n" ".subsection 2\n" \
".previous" "2: br 1b\n" \
:"=&r" (temp), "=m" (v->counter) ".previous" \
:"Ir" (i), "m" (v->counter)); :"=&r" (temp), "=m" (v->counter) \
} :"Ir" (i), "m" (v->counter)); \
} \
/* #define ATOMIC64_OP_RETURN(op) \
* Same as above, but return the result value static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
*/ { \
static inline int atomic_add_return(int i, atomic_t *v) long temp, result; \
{ smp_mb(); \
long temp, result; __asm__ __volatile__( \
smp_mb(); "1: ldq_l %0,%1\n" \
__asm__ __volatile__( " " #op "q %0,%3,%2\n" \
"1: ldl_l %0,%1\n" " " #op "q %0,%3,%0\n" \
" addl %0,%3,%2\n" " stq_c %0,%1\n" \
" addl %0,%3,%0\n" " beq %0,2f\n" \
" stl_c %0,%1\n" ".subsection 2\n" \
" beq %0,2f\n" "2: br 1b\n" \
".subsection 2\n" ".previous" \
"2: br 1b\n" :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
".previous" :"Ir" (i), "m" (v->counter) : "memory"); \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) smp_mb(); \
:"Ir" (i), "m" (v->counter) : "memory"); return result; \
smp_mb();
return result;
} }
static __inline__ long atomic64_add_return(long i, atomic64_t * v) #define ATOMIC_OPS(opg) \
{ ATOMIC_OP(opg) \
long temp, result; ATOMIC_OP_RETURN(opg) \
smp_mb(); ATOMIC64_OP(opg) \
__asm__ __volatile__( ATOMIC64_OP_RETURN(opg)
"1: ldq_l %0,%1\n"
" addq %0,%3,%2\n"
" addq %0,%3,%0\n"
" stq_c %0,%1\n"
" beq %0,2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous"
:"=&r" (temp), "=m" (v->counter), "=&r" (result)
:"Ir" (i), "m" (v->counter) : "memory");
smp_mb();
return result;
}
static __inline__ long atomic_sub_return(int i, atomic_t * v) ATOMIC_OPS(add)
{ ATOMIC_OPS(sub)
long temp, result;
smp_mb();
__asm__ __volatile__(
"1: ldl_l %0,%1\n"
" subl %0,%3,%2\n"
" subl %0,%3,%0\n"
" stl_c %0,%1\n"
" beq %0,2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous"
:"=&r" (temp), "=m" (v->counter), "=&r" (result)
:"Ir" (i), "m" (v->counter) : "memory");
smp_mb();
return result;
}
static __inline__ long atomic64_sub_return(long i, atomic64_t * v) #undef ATOMIC_OPS
{ #undef ATOMIC64_OP_RETURN
long temp, result; #undef ATOMIC64_OP
smp_mb(); #undef ATOMIC_OP_RETURN
__asm__ __volatile__( #undef ATOMIC_OP
"1: ldq_l %0,%1\n"
" subq %0,%3,%2\n"
" subq %0,%3,%0\n"
" stq_c %0,%1\n"
" beq %0,2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous"
:"=&r" (temp), "=m" (v->counter), "=&r" (result)
:"Ir" (i), "m" (v->counter) : "memory");
smp_mb();
return result;
}
#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment