Commit b53d6bed authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

locking/atomic: Remove linux/atomic.h:atomic_fetch_or()

Since all architectures have this implemented now natively, remove this
dead code.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 28aa2bda
...@@ -153,8 +153,6 @@ ATOMIC_OPS(sub) ...@@ -153,8 +153,6 @@ ATOMIC_OPS(sub)
#define atomic_andnot atomic_andnot #define atomic_andnot atomic_andnot
#define atomic64_andnot atomic64_andnot #define atomic64_andnot atomic64_andnot
#define atomic_fetch_or atomic_fetch_or
#undef ATOMIC_OPS #undef ATOMIC_OPS
#define ATOMIC_OPS(op, asm) \ #define ATOMIC_OPS(op, asm) \
ATOMIC_OP(op, asm) \ ATOMIC_OP(op, asm) \
......
...@@ -189,8 +189,6 @@ ATOMIC_OPS(sub, -=, sub) ...@@ -189,8 +189,6 @@ ATOMIC_OPS(sub, -=, sub)
#define atomic_andnot atomic_andnot #define atomic_andnot atomic_andnot
#define atomic_fetch_or atomic_fetch_or
#undef ATOMIC_OPS #undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \ #define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \ ATOMIC_OP(op, c_op, asm_op) \
......
...@@ -201,8 +201,6 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ ...@@ -201,8 +201,6 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
return val; \ return val; \
} }
#define atomic_fetch_or atomic_fetch_or
static inline int atomic_cmpxchg(atomic_t *v, int old, int new) static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{ {
int ret; int ret;
......
...@@ -128,8 +128,6 @@ ...@@ -128,8 +128,6 @@
#define __atomic_add_unless(v, a, u) ___atomic_add_unless(v, a, u,) #define __atomic_add_unless(v, a, u) ___atomic_add_unless(v, a, u,)
#define atomic_andnot atomic_andnot #define atomic_andnot atomic_andnot
#define atomic_fetch_or atomic_fetch_or
/* /*
* 64-bit atomic operations. * 64-bit atomic operations.
*/ */
......
...@@ -66,8 +66,6 @@ ATOMIC_OP_RETURN(add, add, r) ...@@ -66,8 +66,6 @@ ATOMIC_OP_RETURN(add, add, r)
ATOMIC_FETCH_OP (sub, sub, rKs21) ATOMIC_FETCH_OP (sub, sub, rKs21)
ATOMIC_FETCH_OP (add, add, r) ATOMIC_FETCH_OP (add, add, r)
#define atomic_fetch_or atomic_fetch_or
#define ATOMIC_OPS(op, asm_op) \ #define ATOMIC_OPS(op, asm_op) \
ATOMIC_OP_RETURN(op, asm_op, r) \ ATOMIC_OP_RETURN(op, asm_op, r) \
static inline void atomic_##op(int i, atomic_t *v) \ static inline void atomic_##op(int i, atomic_t *v) \
......
...@@ -74,8 +74,6 @@ static inline void atomic_dec(atomic_t *v) ...@@ -74,8 +74,6 @@ static inline void atomic_dec(atomic_t *v)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
#define atomic_fetch_or atomic_fetch_or
/* /*
* 64-bit atomic ops * 64-bit atomic ops
*/ */
......
...@@ -54,8 +54,6 @@ static inline void atomic_##op(int i, atomic_t *v) \ ...@@ -54,8 +54,6 @@ static inline void atomic_##op(int i, atomic_t *v) \
ATOMIC_OP_RETURN(add, +=) ATOMIC_OP_RETURN(add, +=)
ATOMIC_OP_RETURN(sub, -=) ATOMIC_OP_RETURN(sub, -=)
#define atomic_fetch_or atomic_fetch_or
#define ATOMIC_OPS(op, c_op) \ #define ATOMIC_OPS(op, c_op) \
ATOMIC_OP(op, c_op) \ ATOMIC_OP(op, c_op) \
ATOMIC_FETCH_OP(op, c_op) ATOMIC_FETCH_OP(op, c_op)
......
...@@ -152,8 +152,6 @@ ATOMIC_OPS(sub) ...@@ -152,8 +152,6 @@ ATOMIC_OPS(sub)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
#define atomic_fetch_or atomic_fetch_or
ATOMIC_OPS(and) ATOMIC_OPS(and)
ATOMIC_OPS(or) ATOMIC_OPS(or)
ATOMIC_OPS(xor) ATOMIC_OPS(xor)
......
...@@ -121,8 +121,6 @@ ATOMIC_OPS(sub) ...@@ -121,8 +121,6 @@ ATOMIC_OPS(sub)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
#define atomic_fetch_or atomic_fetch_or
ATOMIC_OPS(and) ATOMIC_OPS(and)
ATOMIC_OPS(or) ATOMIC_OPS(or)
ATOMIC_OPS(xor) ATOMIC_OPS(xor)
......
...@@ -119,8 +119,6 @@ ATOMIC_OPS(sub, -=, sub) ...@@ -119,8 +119,6 @@ ATOMIC_OPS(sub, -=, sub)
ATOMIC_OP(op, c_op, asm_op) \ ATOMIC_OP(op, c_op, asm_op) \
ATOMIC_FETCH_OP(op, c_op, asm_op) ATOMIC_FETCH_OP(op, c_op, asm_op)
#define atomic_fetch_or atomic_fetch_or
ATOMIC_OPS(and, &=, and) ATOMIC_OPS(and, &=, and)
ATOMIC_OPS(or, |=, or) ATOMIC_OPS(or, |=, or)
ATOMIC_OPS(xor, ^=, eor) ATOMIC_OPS(xor, ^=, eor)
......
...@@ -17,8 +17,6 @@ ...@@ -17,8 +17,6 @@
#include <asm/atomic_lnkget.h> #include <asm/atomic_lnkget.h>
#endif #endif
#define atomic_fetch_or atomic_fetch_or
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
#define atomic_dec_return(v) atomic_sub_return(1, (v)) #define atomic_dec_return(v) atomic_sub_return(1, (v))
......
...@@ -194,8 +194,6 @@ ATOMIC_OPS(sub, -=, subu) ...@@ -194,8 +194,6 @@ ATOMIC_OPS(sub, -=, subu)
ATOMIC_OP(op, c_op, asm_op) \ ATOMIC_OP(op, c_op, asm_op) \
ATOMIC_FETCH_OP(op, c_op, asm_op) ATOMIC_FETCH_OP(op, c_op, asm_op)
#define atomic_fetch_or atomic_fetch_or
ATOMIC_OPS(and, &=, and) ATOMIC_OPS(and, &=, and)
ATOMIC_OPS(or, |=, or) ATOMIC_OPS(or, |=, or)
ATOMIC_OPS(xor, ^=, xor) ATOMIC_OPS(xor, ^=, xor)
......
...@@ -113,8 +113,6 @@ ATOMIC_OPS(sub) ...@@ -113,8 +113,6 @@ ATOMIC_OPS(sub)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
#define atomic_fetch_or atomic_fetch_or
ATOMIC_OPS(and) ATOMIC_OPS(and)
ATOMIC_OPS(or) ATOMIC_OPS(or)
ATOMIC_OPS(xor) ATOMIC_OPS(xor)
......
...@@ -148,8 +148,6 @@ ATOMIC_OPS(sub, -=) ...@@ -148,8 +148,6 @@ ATOMIC_OPS(sub, -=)
ATOMIC_OP(op, c_op) \ ATOMIC_OP(op, c_op) \
ATOMIC_FETCH_OP(op, c_op) ATOMIC_FETCH_OP(op, c_op)
#define atomic_fetch_or atomic_fetch_or
ATOMIC_OPS(and, &=) ATOMIC_OPS(and, &=)
ATOMIC_OPS(or, |=) ATOMIC_OPS(or, |=)
ATOMIC_OPS(xor, ^=) ATOMIC_OPS(xor, ^=)
......
...@@ -135,8 +135,6 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ ...@@ -135,8 +135,6 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
return __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_BARRIER); \ return __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_BARRIER); \
} }
#define atomic_fetch_or atomic_fetch_or
ATOMIC_OPS(and, AND) ATOMIC_OPS(and, AND)
ATOMIC_OPS(or, OR) ATOMIC_OPS(or, OR)
ATOMIC_OPS(xor, XOR) ATOMIC_OPS(xor, XOR)
......
...@@ -25,8 +25,6 @@ ...@@ -25,8 +25,6 @@
#include <asm/atomic-irq.h> #include <asm/atomic-irq.h>
#endif #endif
#define atomic_fetch_or atomic_fetch_or
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
#define atomic_dec_return(v) atomic_sub_return(1, (v)) #define atomic_dec_return(v) atomic_sub_return(1, (v))
#define atomic_inc_return(v) atomic_add_return(1, (v)) #define atomic_inc_return(v) atomic_add_return(1, (v))
......
...@@ -5,5 +5,4 @@ ...@@ -5,5 +5,4 @@
#else #else
#include <asm/atomic_32.h> #include <asm/atomic_32.h>
#endif #endif
#define atomic_fetch_or atomic_fetch_or
#endif #endif
...@@ -36,8 +36,6 @@ void atomic_set(atomic_t *, int); ...@@ -36,8 +36,6 @@ void atomic_set(atomic_t *, int);
#define atomic_inc(v) ((void)atomic_add_return( 1, (v))) #define atomic_inc(v) ((void)atomic_add_return( 1, (v)))
#define atomic_dec(v) ((void)atomic_add_return( -1, (v))) #define atomic_dec(v) ((void)atomic_add_return( -1, (v)))
#define atomic_fetch_or atomic_fetch_or
#define atomic_and(i, v) ((void)atomic_fetch_and((i), (v))) #define atomic_and(i, v) ((void)atomic_fetch_and((i), (v)))
#define atomic_or(i, v) ((void)atomic_fetch_or((i), (v))) #define atomic_or(i, v) ((void)atomic_fetch_or((i), (v)))
#define atomic_xor(i, v) ((void)atomic_fetch_xor((i), (v))) #define atomic_xor(i, v) ((void)atomic_fetch_xor((i), (v)))
......
...@@ -48,8 +48,6 @@ static inline int atomic_read(const atomic_t *v) ...@@ -48,8 +48,6 @@ static inline int atomic_read(const atomic_t *v)
#define atomic_fetch_sub(i, v) atomic_fetch_add(-(int)(i), (v)) #define atomic_fetch_sub(i, v) atomic_fetch_add(-(int)(i), (v))
#define atomic_fetch_or atomic_fetch_or
/** /**
* atomic_sub - subtract integer from atomic variable * atomic_sub - subtract integer from atomic variable
* @i: integer value to subtract * @i: integer value to subtract
......
...@@ -217,8 +217,6 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ ...@@ -217,8 +217,6 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
ATOMIC_OP(op) \ ATOMIC_OP(op) \
ATOMIC_FETCH_OP(op, c_op) ATOMIC_FETCH_OP(op, c_op)
#define atomic_fetch_or atomic_fetch_or
ATOMIC_OPS(and, &) ATOMIC_OPS(and, &)
ATOMIC_OPS(or , |) ATOMIC_OPS(or , |)
ATOMIC_OPS(xor, ^) ATOMIC_OPS(xor, ^)
......
...@@ -188,8 +188,6 @@ ATOMIC_OPS(sub) ...@@ -188,8 +188,6 @@ ATOMIC_OPS(sub)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
#define atomic_fetch_or atomic_fetch_or
ATOMIC_OPS(and) ATOMIC_OPS(and)
ATOMIC_OPS(or) ATOMIC_OPS(or)
ATOMIC_OPS(xor) ATOMIC_OPS(xor)
......
...@@ -137,8 +137,6 @@ ATOMIC_FETCH_OP(and, &) ...@@ -137,8 +137,6 @@ ATOMIC_FETCH_OP(and, &)
#endif #endif
#ifndef atomic_fetch_or #ifndef atomic_fetch_or
#define atomic_fetch_or atomic_fetch_or
ATOMIC_FETCH_OP(or, |) ATOMIC_FETCH_OP(or, |)
#endif #endif
......
...@@ -573,27 +573,6 @@ static inline int atomic_dec_if_positive(atomic_t *v) ...@@ -573,27 +573,6 @@ static inline int atomic_dec_if_positive(atomic_t *v)
} }
#endif #endif
/**
* atomic_fetch_or - perform *p |= mask and return old value of *p
* @mask: mask to OR on the atomic_t
* @p: pointer to atomic_t
*/
#ifndef atomic_fetch_or
static inline int atomic_fetch_or(int mask, atomic_t *p)
{
int old, val = atomic_read(p);
for (;;) {
old = atomic_cmpxchg(p, val, val | mask);
if (old == val)
break;
val = old;
}
return old;
}
#endif
#ifdef CONFIG_GENERIC_ATOMIC64 #ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h> #include <asm-generic/atomic64.h>
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment