Commit 92ba1f53 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

locking,arch,arm64: Fold atomic_ops

Many of the atomic op implementations are the same except for one
instruction; fold the lot into a few CPP macros and reduce LoC.

This also prepares for easy addition of new ops.

Requires the asm_op due to eor.
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chen Gang <gang.chen@asianux.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Russell King <rmk+kernel@arm.linux.org.uk>
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lkml.kernel.org/r/20140508135851.995123148@infradead.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent aee9a554
...@@ -43,69 +43,51 @@ ...@@ -43,69 +43,51 @@
* store exclusive to ensure that these are atomic. We may loop * store exclusive to ensure that these are atomic. We may loop
* to ensure that the update happens. * to ensure that the update happens.
*/ */
static inline void atomic_add(int i, atomic_t *v)
{
unsigned long tmp;
int result;
asm volatile("// atomic_add\n"
"1: ldxr %w0, %2\n"
" add %w0, %w0, %w3\n"
" stxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i));
}
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long tmp;
int result;
asm volatile("// atomic_add_return\n"
"1: ldxr %w0, %2\n"
" add %w0, %w0, %w3\n"
" stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
: "memory");
smp_mb();
return result;
}
static inline void atomic_sub(int i, atomic_t *v)
{
unsigned long tmp;
int result;
asm volatile("// atomic_sub\n" #define ATOMIC_OP(op, asm_op) \
"1: ldxr %w0, %2\n" static inline void atomic_##op(int i, atomic_t *v) \
" sub %w0, %w0, %w3\n" { \
" stxr %w1, %w0, %2\n" unsigned long tmp; \
" cbnz %w1, 1b" int result; \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: "Ir" (i)); asm volatile("// atomic_" #op "\n" \
"1: ldxr %w0, %2\n" \
" " #asm_op " %w0, %w0, %w3\n" \
" stxr %w1, %w0, %2\n" \
" cbnz %w1, 1b" \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: "Ir" (i)); \
} \
#define ATOMIC_OP_RETURN(op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
\
asm volatile("// atomic_" #op "_return\n" \
"1: ldxr %w0, %2\n" \
" " #asm_op " %w0, %w0, %w3\n" \
" stlxr %w1, %w0, %2\n" \
" cbnz %w1, 1b" \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: "Ir" (i) \
: "memory"); \
\
smp_mb(); \
return result; \
} }
static inline int atomic_sub_return(int i, atomic_t *v) #define ATOMIC_OPS(op, asm_op) \
{ ATOMIC_OP(op, asm_op) \
unsigned long tmp; ATOMIC_OP_RETURN(op, asm_op)
int result;
asm volatile("// atomic_sub_return\n" ATOMIC_OPS(add, add)
"1: ldxr %w0, %2\n" ATOMIC_OPS(sub, sub)
" sub %w0, %w0, %w3\n"
" stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
: "memory");
smp_mb(); #undef ATOMIC_OPS
return result; #undef ATOMIC_OP_RETURN
} #undef ATOMIC_OP
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
{ {
...@@ -160,69 +142,50 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -160,69 +142,50 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#define atomic64_read(v) (*(volatile long *)&(v)->counter) #define atomic64_read(v) (*(volatile long *)&(v)->counter)
#define atomic64_set(v,i) (((v)->counter) = (i)) #define atomic64_set(v,i) (((v)->counter) = (i))
static inline void atomic64_add(u64 i, atomic64_t *v) #define ATOMIC64_OP(op, asm_op) \
{ static inline void atomic64_##op(long i, atomic64_t *v) \
long result; { \
unsigned long tmp; long result; \
unsigned long tmp; \
asm volatile("// atomic64_add\n" \
"1: ldxr %0, %2\n" asm volatile("// atomic64_" #op "\n" \
" add %0, %0, %3\n" "1: ldxr %0, %2\n" \
" stxr %w1, %0, %2\n" " " #asm_op " %0, %0, %3\n" \
" cbnz %w1, 1b" " stxr %w1, %0, %2\n" \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) " cbnz %w1, 1b" \
: "Ir" (i)); : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: "Ir" (i)); \
} \
#define ATOMIC64_OP_RETURN(op, asm_op) \
static inline long atomic64_##op##_return(long i, atomic64_t *v) \
{ \
long result; \
unsigned long tmp; \
\
asm volatile("// atomic64_" #op "_return\n" \
"1: ldxr %0, %2\n" \
" " #asm_op " %0, %0, %3\n" \
" stlxr %w1, %0, %2\n" \
" cbnz %w1, 1b" \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: "Ir" (i) \
: "memory"); \
\
smp_mb(); \
return result; \
} }
static inline long atomic64_add_return(long i, atomic64_t *v) #define ATOMIC64_OPS(op, asm_op) \
{ ATOMIC64_OP(op, asm_op) \
long result; ATOMIC64_OP_RETURN(op, asm_op)
unsigned long tmp;
asm volatile("// atomic64_add_return\n" ATOMIC64_OPS(add, add)
"1: ldxr %0, %2\n" ATOMIC64_OPS(sub, sub)
" add %0, %0, %3\n"
" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
: "memory");
smp_mb(); #undef ATOMIC64_OPS
return result; #undef ATOMIC64_OP_RETURN
} #undef ATOMIC64_OP
static inline void atomic64_sub(u64 i, atomic64_t *v)
{
long result;
unsigned long tmp;
asm volatile("// atomic64_sub\n"
"1: ldxr %0, %2\n"
" sub %0, %0, %3\n"
" stxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i));
}
static inline long atomic64_sub_return(long i, atomic64_t *v)
{
long result;
unsigned long tmp;
asm volatile("// atomic64_sub_return\n"
"1: ldxr %0, %2\n"
" sub %0, %0, %3\n"
" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
: "memory");
smp_mb();
return result;
}
static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new) static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment