Commit 28aa2bda authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

locking/atomic: Implement...

locking/atomic: Implement atomic{,64,_long}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}()

Now that all the architectures have implemented support for these new
atomic primitives add on the generic infrastructure to expose and use
it.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent e1213332
...@@ -112,6 +112,40 @@ static __always_inline void atomic_long_dec(atomic_long_t *l) ...@@ -112,6 +112,40 @@ static __always_inline void atomic_long_dec(atomic_long_t *l)
ATOMIC_LONG_PFX(_dec)(v); ATOMIC_LONG_PFX(_dec)(v);
} }
#define ATOMIC_LONG_FETCH_OP(op, mo) \
static inline long \
atomic_long_fetch_##op##mo(long i, atomic_long_t *l) \
{ \
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
\
return (long)ATOMIC_LONG_PFX(_fetch_##op##mo)(i, v); \
}
ATOMIC_LONG_FETCH_OP(add, )
ATOMIC_LONG_FETCH_OP(add, _relaxed)
ATOMIC_LONG_FETCH_OP(add, _acquire)
ATOMIC_LONG_FETCH_OP(add, _release)
ATOMIC_LONG_FETCH_OP(sub, )
ATOMIC_LONG_FETCH_OP(sub, _relaxed)
ATOMIC_LONG_FETCH_OP(sub, _acquire)
ATOMIC_LONG_FETCH_OP(sub, _release)
ATOMIC_LONG_FETCH_OP(and, )
ATOMIC_LONG_FETCH_OP(and, _relaxed)
ATOMIC_LONG_FETCH_OP(and, _acquire)
ATOMIC_LONG_FETCH_OP(and, _release)
ATOMIC_LONG_FETCH_OP(andnot, )
ATOMIC_LONG_FETCH_OP(andnot, _relaxed)
ATOMIC_LONG_FETCH_OP(andnot, _acquire)
ATOMIC_LONG_FETCH_OP(andnot, _release)
ATOMIC_LONG_FETCH_OP(or, )
ATOMIC_LONG_FETCH_OP(or, _relaxed)
ATOMIC_LONG_FETCH_OP(or, _acquire)
ATOMIC_LONG_FETCH_OP(or, _release)
ATOMIC_LONG_FETCH_OP(xor, )
ATOMIC_LONG_FETCH_OP(xor, _relaxed)
ATOMIC_LONG_FETCH_OP(xor, _acquire)
ATOMIC_LONG_FETCH_OP(xor, _release)
#define ATOMIC_LONG_OP(op) \ #define ATOMIC_LONG_OP(op) \
static __always_inline void \ static __always_inline void \
atomic_long_##op(long i, atomic_long_t *l) \ atomic_long_##op(long i, atomic_long_t *l) \
...@@ -124,9 +158,9 @@ atomic_long_##op(long i, atomic_long_t *l) \ ...@@ -124,9 +158,9 @@ atomic_long_##op(long i, atomic_long_t *l) \
ATOMIC_LONG_OP(add) ATOMIC_LONG_OP(add)
ATOMIC_LONG_OP(sub) ATOMIC_LONG_OP(sub)
ATOMIC_LONG_OP(and) ATOMIC_LONG_OP(and)
ATOMIC_LONG_OP(andnot)
ATOMIC_LONG_OP(or) ATOMIC_LONG_OP(or)
ATOMIC_LONG_OP(xor) ATOMIC_LONG_OP(xor)
ATOMIC_LONG_OP(andnot)
#undef ATOMIC_LONG_OP #undef ATOMIC_LONG_OP
......
...@@ -61,6 +61,18 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ...@@ -61,6 +61,18 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return c c_op i; \ return c c_op i; \
} }
#define ATOMIC_FETCH_OP(op, c_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
int c, old; \
\
c = v->counter; \
while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
c = old; \
\
return c; \
}
#else #else
#include <linux/irqflags.h> #include <linux/irqflags.h>
...@@ -88,6 +100,20 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ...@@ -88,6 +100,20 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return ret; \ return ret; \
} }
#define ATOMIC_FETCH_OP(op, c_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
int ret; \
\
raw_local_irq_save(flags); \
ret = v->counter; \
v->counter = v->counter c_op i; \
raw_local_irq_restore(flags); \
\
return ret; \
}
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#ifndef atomic_add_return #ifndef atomic_add_return
...@@ -98,6 +124,28 @@ ATOMIC_OP_RETURN(add, +) ...@@ -98,6 +124,28 @@ ATOMIC_OP_RETURN(add, +)
ATOMIC_OP_RETURN(sub, -) ATOMIC_OP_RETURN(sub, -)
#endif #endif
#ifndef atomic_fetch_add
ATOMIC_FETCH_OP(add, +)
#endif
#ifndef atomic_fetch_sub
ATOMIC_FETCH_OP(sub, -)
#endif
#ifndef atomic_fetch_and
ATOMIC_FETCH_OP(and, &)
#endif
#ifndef atomic_fetch_or
#define atomic_fetch_or atomic_fetch_or
ATOMIC_FETCH_OP(or, |)
#endif
#ifndef atomic_fetch_xor
ATOMIC_FETCH_OP(xor, ^)
#endif
#ifndef atomic_and #ifndef atomic_and
ATOMIC_OP(and, &) ATOMIC_OP(and, &)
#endif #endif
...@@ -110,6 +158,7 @@ ATOMIC_OP(or, |) ...@@ -110,6 +158,7 @@ ATOMIC_OP(or, |)
ATOMIC_OP(xor, ^) ATOMIC_OP(xor, ^)
#endif #endif
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
......
...@@ -27,16 +27,23 @@ extern void atomic64_##op(long long a, atomic64_t *v); ...@@ -27,16 +27,23 @@ extern void atomic64_##op(long long a, atomic64_t *v);
#define ATOMIC64_OP_RETURN(op) \ #define ATOMIC64_OP_RETURN(op) \
extern long long atomic64_##op##_return(long long a, atomic64_t *v); extern long long atomic64_##op##_return(long long a, atomic64_t *v);
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) #define ATOMIC64_FETCH_OP(op) \
extern long long atomic64_fetch_##op(long long a, atomic64_t *v);
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
ATOMIC64_OPS(add) ATOMIC64_OPS(add)
ATOMIC64_OPS(sub) ATOMIC64_OPS(sub)
ATOMIC64_OP(and) #undef ATOMIC64_OPS
ATOMIC64_OP(or) #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_FETCH_OP(op)
ATOMIC64_OP(xor)
ATOMIC64_OPS(and)
ATOMIC64_OPS(or)
ATOMIC64_OPS(xor)
#undef ATOMIC64_OPS #undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP #undef ATOMIC64_OP
......
This diff is collapsed.
...@@ -96,17 +96,41 @@ long long atomic64_##op##_return(long long a, atomic64_t *v) \ ...@@ -96,17 +96,41 @@ long long atomic64_##op##_return(long long a, atomic64_t *v) \
} \ } \
EXPORT_SYMBOL(atomic64_##op##_return); EXPORT_SYMBOL(atomic64_##op##_return);
#define ATOMIC64_FETCH_OP(op, c_op) \
long long atomic64_fetch_##op(long long a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
long long val; \
\
raw_spin_lock_irqsave(lock, flags); \
val = v->counter; \
v->counter c_op a; \
raw_spin_unlock_irqrestore(lock, flags); \
return val; \
} \
EXPORT_SYMBOL(atomic64_fetch_##op);
#define ATOMIC64_OPS(op, c_op) \ #define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op, c_op) \ ATOMIC64_OP(op, c_op) \
ATOMIC64_OP_RETURN(op, c_op) ATOMIC64_OP_RETURN(op, c_op) \
ATOMIC64_FETCH_OP(op, c_op)
ATOMIC64_OPS(add, +=) ATOMIC64_OPS(add, +=)
ATOMIC64_OPS(sub, -=) ATOMIC64_OPS(sub, -=)
ATOMIC64_OP(and, &=)
ATOMIC64_OP(or, |=)
ATOMIC64_OP(xor, ^=)
#undef ATOMIC64_OPS #undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op, c_op) \
ATOMIC64_OP_RETURN(op, c_op) \
ATOMIC64_FETCH_OP(op, c_op)
ATOMIC64_OPS(and, &=)
ATOMIC64_OPS(or, |=)
ATOMIC64_OPS(xor, ^=)
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP #undef ATOMIC64_OP
......
...@@ -53,11 +53,25 @@ do { \ ...@@ -53,11 +53,25 @@ do { \
BUG_ON(atomic##bit##_read(&v) != r); \ BUG_ON(atomic##bit##_read(&v) != r); \
} while (0) } while (0)
#define TEST_FETCH(bit, op, c_op, val) \
do { \
atomic##bit##_set(&v, v0); \
r = v0; \
r c_op val; \
BUG_ON(atomic##bit##_##op(val, &v) != v0); \
BUG_ON(atomic##bit##_read(&v) != r); \
} while (0)
#define RETURN_FAMILY_TEST(bit, op, c_op, val) \ #define RETURN_FAMILY_TEST(bit, op, c_op, val) \
do { \ do { \
FAMILY_TEST(TEST_RETURN, bit, op, c_op, val); \ FAMILY_TEST(TEST_RETURN, bit, op, c_op, val); \
} while (0) } while (0)
#define FETCH_FAMILY_TEST(bit, op, c_op, val) \
do { \
FAMILY_TEST(TEST_FETCH, bit, op, c_op, val); \
} while (0)
#define TEST_ARGS(bit, op, init, ret, expect, args...) \ #define TEST_ARGS(bit, op, init, ret, expect, args...) \
do { \ do { \
atomic##bit##_set(&v, init); \ atomic##bit##_set(&v, init); \
...@@ -114,6 +128,16 @@ static __init void test_atomic(void) ...@@ -114,6 +128,16 @@ static __init void test_atomic(void)
RETURN_FAMILY_TEST(, sub_return, -=, onestwos); RETURN_FAMILY_TEST(, sub_return, -=, onestwos);
RETURN_FAMILY_TEST(, sub_return, -=, -one); RETURN_FAMILY_TEST(, sub_return, -=, -one);
FETCH_FAMILY_TEST(, fetch_add, +=, onestwos);
FETCH_FAMILY_TEST(, fetch_add, +=, -one);
FETCH_FAMILY_TEST(, fetch_sub, -=, onestwos);
FETCH_FAMILY_TEST(, fetch_sub, -=, -one);
FETCH_FAMILY_TEST(, fetch_or, |=, v1);
FETCH_FAMILY_TEST(, fetch_and, &=, v1);
FETCH_FAMILY_TEST(, fetch_andnot, &= ~, v1);
FETCH_FAMILY_TEST(, fetch_xor, ^=, v1);
INC_RETURN_FAMILY_TEST(, v0); INC_RETURN_FAMILY_TEST(, v0);
DEC_RETURN_FAMILY_TEST(, v0); DEC_RETURN_FAMILY_TEST(, v0);
...@@ -154,6 +178,16 @@ static __init void test_atomic64(void) ...@@ -154,6 +178,16 @@ static __init void test_atomic64(void)
RETURN_FAMILY_TEST(64, sub_return, -=, onestwos); RETURN_FAMILY_TEST(64, sub_return, -=, onestwos);
RETURN_FAMILY_TEST(64, sub_return, -=, -one); RETURN_FAMILY_TEST(64, sub_return, -=, -one);
FETCH_FAMILY_TEST(64, fetch_add, +=, onestwos);
FETCH_FAMILY_TEST(64, fetch_add, +=, -one);
FETCH_FAMILY_TEST(64, fetch_sub, -=, onestwos);
FETCH_FAMILY_TEST(64, fetch_sub, -=, -one);
FETCH_FAMILY_TEST(64, fetch_or, |=, v1);
FETCH_FAMILY_TEST(64, fetch_and, &=, v1);
FETCH_FAMILY_TEST(64, fetch_andnot, &= ~, v1);
FETCH_FAMILY_TEST(64, fetch_xor, ^=, v1);
INIT(v0); INIT(v0);
atomic64_inc(&v); atomic64_inc(&v);
r += one; r += one;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment