Commit f05cab00 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/atomics: Remove atomic_inc()/atomic_dec() and friends

Now that atomic_add() and atomic_sub() handle immediate operands,
atomic_inc() and atomic_dec() have no added value compared to the
generic fallback which calls atomic_add(1) and atomic_sub(1).

Also remove atomic_inc_not_zero() which fallsback to
atomic_add_unless() which itself fallsback to
atomic_fetch_add_unless() which now handles immediate operands.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/0bc64a2f18726055093dbb2e479cefc60a409cfd.1632236981.git.christophe.leroy@csgroup.eu
parent 41d65207
...@@ -118,71 +118,6 @@ ATOMIC_OPS(xor, xor, "", K) ...@@ -118,71 +118,6 @@ ATOMIC_OPS(xor, xor, "", K)
#undef ATOMIC_OP_RETURN_RELAXED #undef ATOMIC_OP_RETURN_RELAXED
#undef ATOMIC_OP #undef ATOMIC_OP
static __inline__ void arch_atomic_inc(atomic_t *v)
{
int t;
__asm__ __volatile__(
"1: lwarx %0,0,%2 # atomic_inc\n\
addic %0,%0,1\n"
" stwcx. %0,0,%2 \n\
bne- 1b"
: "=&r" (t), "+m" (v->counter)
: "r" (&v->counter)
: "cc", "xer");
}
#define arch_atomic_inc arch_atomic_inc
static __inline__ int arch_atomic_inc_return_relaxed(atomic_t *v)
{
int t;
__asm__ __volatile__(
"1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
" addic %0,%0,1\n"
" stwcx. %0,0,%2\n"
" bne- 1b"
: "=&r" (t), "+m" (v->counter)
: "r" (&v->counter)
: "cc", "xer");
return t;
}
static __inline__ void arch_atomic_dec(atomic_t *v)
{
int t;
__asm__ __volatile__(
"1: lwarx %0,0,%2 # atomic_dec\n\
addic %0,%0,-1\n"
" stwcx. %0,0,%2\n\
bne- 1b"
: "=&r" (t), "+m" (v->counter)
: "r" (&v->counter)
: "cc", "xer");
}
#define arch_atomic_dec arch_atomic_dec
static __inline__ int arch_atomic_dec_return_relaxed(atomic_t *v)
{
int t;
__asm__ __volatile__(
"1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
" addic %0,%0,-1\n"
" stwcx. %0,0,%2\n"
" bne- 1b"
: "=&r" (t), "+m" (v->counter)
: "r" (&v->counter)
: "cc", "xer");
return t;
}
#define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed
#define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed
#define arch_atomic_cmpxchg(v, o, n) \ #define arch_atomic_cmpxchg(v, o, n) \
(arch_cmpxchg(&((v)->counter), (o), (n))) (arch_cmpxchg(&((v)->counter), (o), (n)))
#define arch_atomic_cmpxchg_relaxed(v, o, n) \ #define arch_atomic_cmpxchg_relaxed(v, o, n) \
...@@ -255,36 +190,6 @@ static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) ...@@ -255,36 +190,6 @@ static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
} }
#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
/**
* atomic_inc_not_zero - increment unless the number is zero
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1, so long as @v is non-zero.
* Returns non-zero if @v was non-zero, and zero otherwise.
*/
static __inline__ int arch_atomic_inc_not_zero(atomic_t *v)
{
int t1, t2;
__asm__ __volatile__ (
PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
cmpwi 0,%0,0\n\
beq- 2f\n\
addic %1,%0,1\n"
" stwcx. %1,0,%2\n\
bne- 1b\n"
PPC_ATOMIC_EXIT_BARRIER
"\n\
2:"
: "=&r" (t1), "=&r" (t2)
: "r" (&v->counter)
: "cc", "xer", "memory");
return t1;
}
#define arch_atomic_inc_not_zero(v) arch_atomic_inc_not_zero((v))
/* /*
* Atomically test *v and decrement if it is greater than 0. * Atomically test *v and decrement if it is greater than 0.
* The function returns the old value of *v minus 1, even if * The function returns the old value of *v minus 1, even if
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment