Commit 7d9794e7 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

locking/atomic, arch/sh: Implement atomic_fetch_{add,sub,and,or,xor}()

Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.

This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rich Felker <dalias@libc.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: linux-sh@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 56fefbbc
......@@ -43,16 +43,42 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return tmp; \
}
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
#define ATOMIC_FETCH_OP(op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
int res, tmp; \
\
__asm__ __volatile__ ( \
" .align 2 \n\t" \
" mova 1f, r0 \n\t" /* r0 = end point */ \
" mov r15, r1 \n\t" /* r1 = saved sp */ \
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */ \
" mov.l @%2, %0 \n\t" /* load old value */ \
" mov %0, %1 \n\t" /* save old value */ \
" " #op " %3, %0 \n\t" /* $op */ \
" mov.l %0, @%2 \n\t" /* store new value */ \
"1: mov r1, r15 \n\t" /* LOGOUT */ \
: "=&r" (tmp), "=&r" (res), "+r" (v) \
: "r" (i) \
: "memory" , "r0", "r1"); \
\
return res; \
}
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)
#undef ATOMIC_OPS
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS(and)
ATOMIC_OPS(or)
ATOMIC_OPS(xor)
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
......
......@@ -33,15 +33,38 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return temp; \
}
#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
#define ATOMIC_FETCH_OP(op, c_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long temp, flags; \
\
raw_local_irq_save(flags); \
temp = v->counter; \
v->counter c_op i; \
raw_local_irq_restore(flags); \
\
return temp; \
}
#define ATOMIC_OPS(op, c_op) \
ATOMIC_OP(op, c_op) \
ATOMIC_OP_RETURN(op, c_op) \
ATOMIC_FETCH_OP(op, c_op)
ATOMIC_OPS(add, +=)
ATOMIC_OPS(sub, -=)
ATOMIC_OP(and, &=)
ATOMIC_OP(or, |=)
ATOMIC_OP(xor, ^=)
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op) \
ATOMIC_OP(op, c_op) \
ATOMIC_FETCH_OP(op, c_op)
ATOMIC_OPS(and, &=)
ATOMIC_OPS(or, |=)
ATOMIC_OPS(xor, ^=)
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
......
......@@ -48,15 +48,39 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return temp; \
}
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
#define ATOMIC_FETCH_OP(op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long res, temp; \
\
__asm__ __volatile__ ( \
"1: movli.l @%3, %0 ! atomic_fetch_" #op " \n" \
" mov %0, %1 \n" \
" " #op " %2, %0 \n" \
" movco.l %0, @%3 \n" \
" bf 1b \n" \
" synco \n" \
: "=&z" (temp), "=&z" (res) \
: "r" (i), "r" (&v->counter) \
: "t"); \
\
return res; \
}
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)
#undef ATOMIC_OPS
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
ATOMIC_OPS(and)
ATOMIC_OPS(or)
ATOMIC_OPS(xor)
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
......
......@@ -25,6 +25,8 @@
#include <asm/atomic-irq.h>
#endif
#define atomic_fetch_or atomic_fetch_or
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
#define atomic_dec_return(v) atomic_sub_return(1, (v))
#define atomic_inc_return(v) atomic_add_return(1, (v))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment