Commit 56fefbbc authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

locking/atomic, arch/s390: Implement atomic{,64}_fetch_{add,sub,and,or,xor}()

Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.

This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: linux-s390@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent a28cc7bb
...@@ -93,6 +93,11 @@ static inline int atomic_add_return(int i, atomic_t *v) ...@@ -93,6 +93,11 @@ static inline int atomic_add_return(int i, atomic_t *v)
return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i; return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i;
} }
static inline int atomic_fetch_add(int i, atomic_t *v)
{
return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER);
}
static inline void atomic_add(int i, atomic_t *v) static inline void atomic_add(int i, atomic_t *v)
{ {
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
...@@ -114,22 +119,29 @@ static inline void atomic_add(int i, atomic_t *v) ...@@ -114,22 +119,29 @@ static inline void atomic_add(int i, atomic_t *v)
#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0) #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
#define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v) #define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v) #define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
#define atomic_fetch_sub(_i, _v) atomic_fetch_add(-(int)(_i), _v)
#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0) #define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
#define atomic_dec(_v) atomic_sub(1, _v) #define atomic_dec(_v) atomic_sub(1, _v)
#define atomic_dec_return(_v) atomic_sub_return(1, _v) #define atomic_dec_return(_v) atomic_sub_return(1, _v)
#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
#define ATOMIC_OP(op, OP) \ #define ATOMIC_OPS(op, OP) \
static inline void atomic_##op(int i, atomic_t *v) \ static inline void atomic_##op(int i, atomic_t *v) \
{ \ { \
__ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER); \ __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER); \
} \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
return __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_BARRIER); \
} }
ATOMIC_OP(and, AND) #define atomic_fetch_or atomic_fetch_or
ATOMIC_OP(or, OR)
ATOMIC_OP(xor, XOR) ATOMIC_OPS(and, AND)
ATOMIC_OPS(or, OR)
ATOMIC_OPS(xor, XOR)
#undef ATOMIC_OP #undef ATOMIC_OPS
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
...@@ -236,6 +248,11 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v) ...@@ -236,6 +248,11 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i; return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i;
} }
static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
{
return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER);
}
static inline void atomic64_add(long long i, atomic64_t *v) static inline void atomic64_add(long long i, atomic64_t *v)
{ {
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
...@@ -264,17 +281,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, ...@@ -264,17 +281,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
return old; return old;
} }
#define ATOMIC64_OP(op, OP) \ #define ATOMIC64_OPS(op, OP) \
static inline void atomic64_##op(long i, atomic64_t *v) \ static inline void atomic64_##op(long i, atomic64_t *v) \
{ \ { \
__ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER); \ __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER); \
} \
static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
{ \
return __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_BARRIER); \
} }
ATOMIC64_OP(and, AND) ATOMIC64_OPS(and, AND)
ATOMIC64_OP(or, OR) ATOMIC64_OPS(or, OR)
ATOMIC64_OP(xor, XOR) ATOMIC64_OPS(xor, XOR)
#undef ATOMIC64_OP #undef ATOMIC64_OPS
#undef __ATOMIC64_LOOP #undef __ATOMIC64_LOOP
static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
...@@ -315,6 +336,7 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) ...@@ -315,6 +336,7 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
#define atomic64_inc_return(_v) atomic64_add_return(1, _v) #define atomic64_inc_return(_v) atomic64_add_return(1, _v)
#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0) #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v) #define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v)
#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long long)(_i), _v)
#define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v) #define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v)
#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0) #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
#define atomic64_dec(_v) atomic64_sub(1, _v) #define atomic64_dec(_v) atomic64_sub(1, _v)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment