Commit ef31563e authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

locking,arch,mips: Fold atomic_ops

Many of the atomic op implementations are the same except for one
instruction; fold the lot into a few CPP macros and reduce LoC.

This also prepares for easy addition of new ops.
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Maciej W. Rozycki <macro@codesourcery.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Link: http://lkml.kernel.org/r/20140508135852.521548500@infradead.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent d6dfe250
...@@ -40,195 +40,103 @@ ...@@ -40,195 +40,103 @@
*/ */
#define atomic_set(v, i) ((v)->counter = (i)) #define atomic_set(v, i) ((v)->counter = (i))
/* #define ATOMIC_OP(op, c_op, asm_op) \
* atomic_add - add integer to atomic variable static __inline__ void atomic_##op(int i, atomic_t * v) \
* @i: integer value to add { \
* @v: pointer of type atomic_t if (kernel_uses_llsc && R10000_LLSC_WAR) { \
* int temp; \
* Atomically adds @i to @v. \
*/ __asm__ __volatile__( \
static __inline__ void atomic_add(int i, atomic_t * v) " .set arch=r4000 \n" \
{ "1: ll %0, %1 # atomic_" #op " \n" \
if (kernel_uses_llsc && R10000_LLSC_WAR) { " " #asm_op " %0, %2 \n" \
int temp; " sc %0, %1 \n" \
" beqzl %0, 1b \n" \
__asm__ __volatile__( " .set mips0 \n" \
" .set arch=r4000 \n" : "=&r" (temp), "+m" (v->counter) \
"1: ll %0, %1 # atomic_add \n" : "Ir" (i)); \
" addu %0, %2 \n" } else if (kernel_uses_llsc) { \
" sc %0, %1 \n" int temp; \
" beqzl %0, 1b \n" \
" .set mips0 \n" do { \
: "=&r" (temp), "+m" (v->counter) __asm__ __volatile__( \
: "Ir" (i)); " .set arch=r4000 \n" \
} else if (kernel_uses_llsc) { " ll %0, %1 # atomic_" #op "\n" \
int temp; " " #asm_op " %0, %2 \n" \
" sc %0, %1 \n" \
do { " .set mips0 \n" \
__asm__ __volatile__( : "=&r" (temp), "+m" (v->counter) \
" .set arch=r4000 \n" : "Ir" (i)); \
" ll %0, %1 # atomic_add \n" } while (unlikely(!temp)); \
" addu %0, %2 \n" } else { \
" sc %0, %1 \n" unsigned long flags; \
" .set mips0 \n" \
: "=&r" (temp), "+m" (v->counter) raw_local_irq_save(flags); \
: "Ir" (i)); v->counter c_op i; \
} while (unlikely(!temp)); raw_local_irq_restore(flags); \
} else { } \
unsigned long flags; } \
raw_local_irq_save(flags); #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
v->counter += i; static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
raw_local_irq_restore(flags); { \
} int result; \
} \
smp_mb__before_llsc(); \
/* \
* atomic_sub - subtract the atomic variable if (kernel_uses_llsc && R10000_LLSC_WAR) { \
* @i: integer value to subtract int temp; \
* @v: pointer of type atomic_t \
* __asm__ __volatile__( \
* Atomically subtracts @i from @v. " .set arch=r4000 \n" \
*/ "1: ll %1, %2 # atomic_" #op "_return \n" \
static __inline__ void atomic_sub(int i, atomic_t * v) " " #asm_op " %0, %1, %3 \n" \
{ " sc %0, %2 \n" \
if (kernel_uses_llsc && R10000_LLSC_WAR) { " beqzl %0, 1b \n" \
int temp; " addu %0, %1, %3 \n" \
" .set mips0 \n" \
__asm__ __volatile__( : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
" .set arch=r4000 \n" : "Ir" (i)); \
"1: ll %0, %1 # atomic_sub \n" } else if (kernel_uses_llsc) { \
" subu %0, %2 \n" int temp; \
" sc %0, %1 \n" \
" beqzl %0, 1b \n" do { \
" .set mips0 \n" __asm__ __volatile__( \
: "=&r" (temp), "+m" (v->counter) " .set arch=r4000 \n" \
: "Ir" (i)); " ll %1, %2 # atomic_" #op "_return \n" \
} else if (kernel_uses_llsc) { " " #asm_op " %0, %1, %3 \n" \
int temp; " sc %0, %2 \n" \
" .set mips0 \n" \
do { : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
__asm__ __volatile__( : "Ir" (i)); \
" .set arch=r4000 \n" } while (unlikely(!result)); \
" ll %0, %1 # atomic_sub \n" \
" subu %0, %2 \n" result = temp + i; \
" sc %0, %1 \n" } else { \
" .set mips0 \n" unsigned long flags; \
: "=&r" (temp), "+m" (v->counter) \
: "Ir" (i)); raw_local_irq_save(flags); \
} while (unlikely(!temp)); result = v->counter; \
} else { result c_op i; \
unsigned long flags; v->counter = result; \
raw_local_irq_restore(flags); \
raw_local_irq_save(flags); } \
v->counter -= i; \
raw_local_irq_restore(flags); smp_llsc_mb(); \
} \
} return result; \
/*
* Same as above, but return the result value
*/
static __inline__ int atomic_add_return(int i, atomic_t * v)
{
int result;
smp_mb__before_llsc();
if (kernel_uses_llsc && R10000_LLSC_WAR) {
int temp;
__asm__ __volatile__(
" .set arch=r4000 \n"
"1: ll %1, %2 # atomic_add_return \n"
" addu %0, %1, %3 \n"
" sc %0, %2 \n"
" beqzl %0, 1b \n"
" addu %0, %1, %3 \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "+m" (v->counter)
: "Ir" (i));
} else if (kernel_uses_llsc) {
int temp;
do {
__asm__ __volatile__(
" .set arch=r4000 \n"
" ll %1, %2 # atomic_add_return \n"
" addu %0, %1, %3 \n"
" sc %0, %2 \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "+m" (v->counter)
: "Ir" (i));
} while (unlikely(!result));
result = temp + i;
} else {
unsigned long flags;
raw_local_irq_save(flags);
result = v->counter;
result += i;
v->counter = result;
raw_local_irq_restore(flags);
}
smp_llsc_mb();
return result;
} }
static __inline__ int atomic_sub_return(int i, atomic_t * v) #define ATOMIC_OPS(op, c_op, asm_op) \
{ ATOMIC_OP(op, c_op, asm_op) \
int result; ATOMIC_OP_RETURN(op, c_op, asm_op)
smp_mb__before_llsc(); ATOMIC_OPS(add, +=, addu)
ATOMIC_OPS(sub, -=, subu)
if (kernel_uses_llsc && R10000_LLSC_WAR) { #undef ATOMIC_OPS
int temp; #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
__asm__ __volatile__(
" .set arch=r4000 \n"
"1: ll %1, %2 # atomic_sub_return \n"
" subu %0, %1, %3 \n"
" sc %0, %2 \n"
" beqzl %0, 1b \n"
" subu %0, %1, %3 \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
: "memory");
result = temp - i;
} else if (kernel_uses_llsc) {
int temp;
do {
__asm__ __volatile__(
" .set arch=r4000 \n"
" ll %1, %2 # atomic_sub_return \n"
" subu %0, %1, %3 \n"
" sc %0, %2 \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "+m" (v->counter)
: "Ir" (i));
} while (unlikely(!result));
result = temp - i;
} else {
unsigned long flags;
raw_local_irq_save(flags);
result = v->counter;
result -= i;
v->counter = result;
raw_local_irq_restore(flags);
}
smp_llsc_mb();
return result;
}
/* /*
* atomic_sub_if_positive - conditionally subtract integer from atomic variable * atomic_sub_if_positive - conditionally subtract integer from atomic variable
...@@ -407,195 +315,104 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -407,195 +315,104 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
*/ */
#define atomic64_set(v, i) ((v)->counter = (i)) #define atomic64_set(v, i) ((v)->counter = (i))
/* #define ATOMIC64_OP(op, c_op, asm_op) \
* atomic64_add - add integer to atomic variable static __inline__ void atomic64_##op(long i, atomic64_t * v) \
* @i: integer value to add { \
* @v: pointer of type atomic64_t if (kernel_uses_llsc && R10000_LLSC_WAR) { \
* long temp; \
* Atomically adds @i to @v. \
*/ __asm__ __volatile__( \
static __inline__ void atomic64_add(long i, atomic64_t * v) " .set arch=r4000 \n" \
{ "1: lld %0, %1 # atomic64_" #op " \n" \
if (kernel_uses_llsc && R10000_LLSC_WAR) { " " #asm_op " %0, %2 \n" \
long temp; " scd %0, %1 \n" \
" beqzl %0, 1b \n" \
__asm__ __volatile__( " .set mips0 \n" \
" .set arch=r4000 \n" : "=&r" (temp), "+m" (v->counter) \
"1: lld %0, %1 # atomic64_add \n" : "Ir" (i)); \
" daddu %0, %2 \n" } else if (kernel_uses_llsc) { \
" scd %0, %1 \n" long temp; \
" beqzl %0, 1b \n" \
" .set mips0 \n" do { \
: "=&r" (temp), "+m" (v->counter) __asm__ __volatile__( \
: "Ir" (i)); " .set arch=r4000 \n" \
} else if (kernel_uses_llsc) { " lld %0, %1 # atomic64_" #op "\n" \
long temp; " " #asm_op " %0, %2 \n" \
" scd %0, %1 \n" \
do { " .set mips0 \n" \
__asm__ __volatile__( : "=&r" (temp), "+m" (v->counter) \
" .set arch=r4000 \n" : "Ir" (i)); \
" lld %0, %1 # atomic64_add \n" } while (unlikely(!temp)); \
" daddu %0, %2 \n" } else { \
" scd %0, %1 \n" unsigned long flags; \
" .set mips0 \n" \
: "=&r" (temp), "+m" (v->counter) raw_local_irq_save(flags); \
: "Ir" (i)); v->counter c_op i; \
} while (unlikely(!temp)); raw_local_irq_restore(flags); \
} else { } \
unsigned long flags; } \
raw_local_irq_save(flags); #define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
v->counter += i; static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
raw_local_irq_restore(flags); { \
} long result; \
\
smp_mb__before_llsc(); \
\
if (kernel_uses_llsc && R10000_LLSC_WAR) { \
long temp; \
\
__asm__ __volatile__( \
" .set arch=r4000 \n" \
"1: lld %1, %2 # atomic64_" #op "_return\n" \
" " #asm_op " %0, %1, %3 \n" \
" scd %0, %2 \n" \
" beqzl %0, 1b \n" \
" " #asm_op " %0, %1, %3 \n" \
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), "+m" (v->counter) \
: "Ir" (i)); \
} else if (kernel_uses_llsc) { \
long temp; \
\
do { \
__asm__ __volatile__( \
" .set arch=r4000 \n" \
" lld %1, %2 # atomic64_" #op "_return\n" \
" " #asm_op " %0, %1, %3 \n" \
" scd %0, %2 \n" \
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), "=m" (v->counter) \
: "Ir" (i), "m" (v->counter) \
: "memory"); \
} while (unlikely(!result)); \
\
result = temp + i; \
} else { \
unsigned long flags; \
\
raw_local_irq_save(flags); \
result = v->counter; \
result c_op i; \
v->counter = result; \
raw_local_irq_restore(flags); \
} \
\
smp_llsc_mb(); \
\
return result; \
} }
/* #define ATOMIC64_OPS(op, c_op, asm_op) \
* atomic64_sub - subtract the atomic variable ATOMIC64_OP(op, c_op, asm_op) \
* @i: integer value to subtract ATOMIC64_OP_RETURN(op, c_op, asm_op)
* @v: pointer of type atomic64_t
*
* Atomically subtracts @i from @v.
*/
static __inline__ void atomic64_sub(long i, atomic64_t * v)
{
if (kernel_uses_llsc && R10000_LLSC_WAR) {
long temp;
__asm__ __volatile__(
" .set arch=r4000 \n"
"1: lld %0, %1 # atomic64_sub \n"
" dsubu %0, %2 \n"
" scd %0, %1 \n"
" beqzl %0, 1b \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (v->counter)
: "Ir" (i));
} else if (kernel_uses_llsc) {
long temp;
do {
__asm__ __volatile__(
" .set arch=r4000 \n"
" lld %0, %1 # atomic64_sub \n"
" dsubu %0, %2 \n"
" scd %0, %1 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (v->counter)
: "Ir" (i));
} while (unlikely(!temp));
} else {
unsigned long flags;
raw_local_irq_save(flags);
v->counter -= i;
raw_local_irq_restore(flags);
}
}
/*
* Same as above, but return the result value
*/
static __inline__ long atomic64_add_return(long i, atomic64_t * v)
{
long result;
smp_mb__before_llsc(); ATOMIC64_OPS(add, +=, daddu)
ATOMIC64_OPS(sub, -=, dsubu)
if (kernel_uses_llsc && R10000_LLSC_WAR) { #undef ATOMIC64_OPS
long temp; #undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
__asm__ __volatile__(
" .set arch=r4000 \n"
"1: lld %1, %2 # atomic64_add_return \n"
" daddu %0, %1, %3 \n"
" scd %0, %2 \n"
" beqzl %0, 1b \n"
" daddu %0, %1, %3 \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "+m" (v->counter)
: "Ir" (i));
} else if (kernel_uses_llsc) {
long temp;
do {
__asm__ __volatile__(
" .set arch=r4000 \n"
" lld %1, %2 # atomic64_add_return \n"
" daddu %0, %1, %3 \n"
" scd %0, %2 \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
: "memory");
} while (unlikely(!result));
result = temp + i;
} else {
unsigned long flags;
raw_local_irq_save(flags);
result = v->counter;
result += i;
v->counter = result;
raw_local_irq_restore(flags);
}
smp_llsc_mb();
return result;
}
static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
{
long result;
smp_mb__before_llsc();
if (kernel_uses_llsc && R10000_LLSC_WAR) {
long temp;
__asm__ __volatile__(
" .set arch=r4000 \n"
"1: lld %1, %2 # atomic64_sub_return \n"
" dsubu %0, %1, %3 \n"
" scd %0, %2 \n"
" beqzl %0, 1b \n"
" dsubu %0, %1, %3 \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
: "memory");
} else if (kernel_uses_llsc) {
long temp;
do {
__asm__ __volatile__(
" .set arch=r4000 \n"
" lld %1, %2 # atomic64_sub_return \n"
" dsubu %0, %1, %3 \n"
" scd %0, %2 \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
: "memory");
} while (unlikely(!result));
result = temp - i;
} else {
unsigned long flags;
raw_local_irq_save(flags);
result = v->counter;
result -= i;
v->counter = result;
raw_local_irq_restore(flags);
}
smp_llsc_mb();
return result;
}
/* /*
* atomic64_sub_if_positive - conditionally subtract integer from atomic variable * atomic64_sub_if_positive - conditionally subtract integer from atomic variable
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment