Commit c7178cde authored by Mark Rutland's avatar Mark Rutland Committed by Peter Zijlstra

locking/atomic: h8300: use asm-generic exclusively

As h8300's implementation of the atomics isn't using any arch-specific
functionality, and its implementation of cmpxchg only uses assembly to
non-atomically swap two elements in memory, we may as well use the
asm-generic atomic.h and cmpxchg.h, and avoid the duplicate code.
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20210525140232.53872-4-mark.rutland@arm.com
parent 201e2c1b
# SPDX-License-Identifier: GPL-2.0
generic-y += asm-offsets.h
generic-y += cmpxchg.h
generic-y += extable.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ARCH_H8300_ATOMIC__
#define __ARCH_H8300_ATOMIC__
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/cmpxchg.h>
#include <asm/irqflags.h>
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*/
#define atomic_read(v) READ_ONCE((v)->counter)
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#define ATOMIC_OP_RETURN(op, c_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
h8300flags flags; \
int ret; \
\
flags = arch_local_irq_save(); \
ret = v->counter c_op i; \
arch_local_irq_restore(flags); \
return ret; \
}
#define ATOMIC_FETCH_OP(op, c_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
h8300flags flags; \
int ret; \
\
flags = arch_local_irq_save(); \
ret = v->counter; \
v->counter c_op i; \
arch_local_irq_restore(flags); \
return ret; \
}
#define ATOMIC_OP(op, c_op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
h8300flags flags; \
\
flags = arch_local_irq_save(); \
v->counter c_op i; \
arch_local_irq_restore(flags); \
}
ATOMIC_OP_RETURN(add, +=)
ATOMIC_OP_RETURN(sub, -=)
#define ATOMIC_OPS(op, c_op) \
ATOMIC_OP(op, c_op) \
ATOMIC_FETCH_OP(op, c_op)
ATOMIC_OPS(and, &=)
ATOMIC_OPS(or, |=)
ATOMIC_OPS(xor, ^=)
ATOMIC_OPS(add, +=)
ATOMIC_OPS(sub, -=)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
h8300flags flags;
flags = arch_local_irq_save();
ret = v->counter;
if (likely(ret == old))
v->counter = new;
arch_local_irq_restore(flags);
return ret;
}
static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int ret;
h8300flags flags;
flags = arch_local_irq_save();
ret = v->counter;
if (ret != u)
v->counter += a;
arch_local_irq_restore(flags);
return ret;
}
#define atomic_fetch_add_unless atomic_fetch_add_unless
#endif /* __ARCH_H8300_ATOMIC __ */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ARCH_H8300_CMPXCHG__
#define __ARCH_H8300_CMPXCHG__
#include <linux/irqflags.h>
#define xchg(ptr, x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
sizeof(*(ptr))))
struct __xchg_dummy { unsigned long a[100]; };
#define __xg(x) ((volatile struct __xchg_dummy *)(x))
static inline unsigned long __xchg(unsigned long x,
volatile void *ptr, int size)
{
unsigned long tmp, flags;
local_irq_save(flags);
switch (size) {
case 1:
__asm__ __volatile__
("mov.b %2,%0\n\t"
"mov.b %1,%2"
: "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)));
break;
case 2:
__asm__ __volatile__
("mov.w %2,%0\n\t"
"mov.w %1,%2"
: "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)));
break;
case 4:
__asm__ __volatile__
("mov.l %2,%0\n\t"
"mov.l %1,%2"
: "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)));
break;
default:
tmp = 0;
}
local_irq_restore(flags);
return tmp;
}
#include <asm-generic/cmpxchg-local.h>
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
#define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#ifndef CONFIG_SMP
#include <asm-generic/cmpxchg.h>
#endif
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#endif /* __ARCH_H8300_CMPXCHG__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment