Commit ae8c35c8 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Thomas Gleixner

s390: Provide atomic_{or,xor,and}

Implement atomic logic ops -- atomic_{or,xor,and}.

These will replace the atomic_{set,clear}_mask functions that are
available on some archs.
Acked-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 2a3ed90f
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#define __ATOMIC_OR "lao" #define __ATOMIC_OR "lao"
#define __ATOMIC_AND "lan" #define __ATOMIC_AND "lan"
#define __ATOMIC_ADD "laa" #define __ATOMIC_ADD "laa"
#define __ATOMIC_XOR "lax"
#define __ATOMIC_BARRIER "bcr 14,0\n" #define __ATOMIC_BARRIER "bcr 14,0\n"
#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \ #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
...@@ -49,6 +50,7 @@ ...@@ -49,6 +50,7 @@
#define __ATOMIC_OR "or" #define __ATOMIC_OR "or"
#define __ATOMIC_AND "nr" #define __ATOMIC_AND "nr"
#define __ATOMIC_ADD "ar" #define __ATOMIC_ADD "ar"
#define __ATOMIC_XOR "xr"
#define __ATOMIC_BARRIER "\n" #define __ATOMIC_BARRIER "\n"
#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \ #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
...@@ -118,14 +120,26 @@ static inline void atomic_add(int i, atomic_t *v) ...@@ -118,14 +120,26 @@ static inline void atomic_add(int i, atomic_t *v)
#define atomic_dec_return(_v) atomic_sub_return(1, _v) #define atomic_dec_return(_v) atomic_sub_return(1, _v)
#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) #define ATOMIC_OP(op, OP) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
__ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER); \
}
ATOMIC_OP(and, AND)
ATOMIC_OP(or, OR)
ATOMIC_OP(xor, XOR)
#undef ATOMIC_OP
static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
{ {
__ATOMIC_LOOP(v, ~mask, __ATOMIC_AND, __ATOMIC_NO_BARRIER); atomic_and(~mask, v);
} }
static inline void atomic_set_mask(unsigned int mask, atomic_t *v) static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
{ {
__ATOMIC_LOOP(v, mask, __ATOMIC_OR, __ATOMIC_NO_BARRIER); atomic_or(mask, v);
} }
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
...@@ -167,6 +181,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -167,6 +181,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#define __ATOMIC64_OR "laog" #define __ATOMIC64_OR "laog"
#define __ATOMIC64_AND "lang" #define __ATOMIC64_AND "lang"
#define __ATOMIC64_ADD "laag" #define __ATOMIC64_ADD "laag"
#define __ATOMIC64_XOR "laxg"
#define __ATOMIC64_BARRIER "bcr 14,0\n" #define __ATOMIC64_BARRIER "bcr 14,0\n"
#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \ #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
...@@ -189,6 +204,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -189,6 +204,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#define __ATOMIC64_OR "ogr" #define __ATOMIC64_OR "ogr"
#define __ATOMIC64_AND "ngr" #define __ATOMIC64_AND "ngr"
#define __ATOMIC64_ADD "agr" #define __ATOMIC64_ADD "agr"
#define __ATOMIC64_XOR "xgr"
#define __ATOMIC64_BARRIER "\n" #define __ATOMIC64_BARRIER "\n"
#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \ #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
...@@ -247,16 +263,6 @@ static inline void atomic64_add(long long i, atomic64_t *v) ...@@ -247,16 +263,6 @@ static inline void atomic64_add(long long i, atomic64_t *v)
__ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER); __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
} }
static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
{
__ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND, __ATOMIC64_NO_BARRIER);
}
static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
{
__ATOMIC64_LOOP(v, mask, __ATOMIC64_OR, __ATOMIC64_NO_BARRIER);
}
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
static inline long long atomic64_cmpxchg(atomic64_t *v, static inline long long atomic64_cmpxchg(atomic64_t *v,
...@@ -270,6 +276,19 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, ...@@ -270,6 +276,19 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
return old; return old;
} }
#define ATOMIC64_OP(op, OP) \
static inline void atomic64_##op(long i, atomic64_t *v) \
{ \
__ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER); \
}
#define CONFIG_ARCH_HAS_ATOMIC_OR
ATOMIC64_OP(and, AND)
ATOMIC64_OP(or, OR)
ATOMIC64_OP(xor, XOR)
#undef ATOMIC64_OP
#undef __ATOMIC64_LOOP #undef __ATOMIC64_LOOP
static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment