Commit e39d88ea authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

locking/atomic, arch/m68k: Implement atomic_fetch_{add,sub,and,or,xor}()

Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.

This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: linux-m68k@lists.linux-m68k.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent f6493705
......@@ -38,6 +38,13 @@ static inline void atomic_##op(int i, atomic_t *v) \
#ifdef CONFIG_RMW_INSNS
/*
* Am I reading these CAS loops right in that %2 is the old value and the first
* iteration uses an uninitialized value?
*
* Would it not make sense to add: tmp = atomic_read(v); to avoid this?
*/
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
......@@ -53,6 +60,21 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return t; \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
int t, tmp; \
\
__asm__ __volatile__( \
"1: movel %2,%1\n" \
" " #asm_op "l %3,%1\n" \
" casl %2,%1,%0\n" \
" jne 1b" \
: "+m" (*v), "=&d" (t), "=&d" (tmp) \
: "g" (i), "2" (atomic_read(v))); \
return tmp; \
}
#else
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
......@@ -68,20 +90,43 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
return t; \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
static inline int atomic_fetch_##op(int i, atomic_t * v) \
{ \
unsigned long flags; \
int t; \
\
local_irq_save(flags); \
t = v->counter; \
v->counter c_op i; \
local_irq_restore(flags); \
\
return t; \
}
#endif /* CONFIG_RMW_INSNS */
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
ATOMIC_OP_RETURN(op, c_op, asm_op)
ATOMIC_OP_RETURN(op, c_op, asm_op) \
ATOMIC_FETCH_OP(op, c_op, asm_op)
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
ATOMIC_OP(and, &=, and)
ATOMIC_OP(or, |=, or)
ATOMIC_OP(xor, ^=, eor)
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
ATOMIC_FETCH_OP(op, c_op, asm_op)
#define atomic_fetch_or atomic_fetch_or
ATOMIC_OPS(and, &=, and)
ATOMIC_OPS(or, |=, or)
ATOMIC_OPS(xor, ^=, eor)
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment