Commit 658aa514 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Thomas Gleixner

sh: Provide atomic_{or,xor,and}

Implement atomic logic ops -- atomic_{or,xor,and}.

These will replace the atomic_{set,clear}_mask functions that are
available on some archs.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent d0b7eb6f
...@@ -48,47 +48,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ...@@ -48,47 +48,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OPS(add) ATOMIC_OPS(add)
ATOMIC_OPS(sub) ATOMIC_OPS(sub)
#define CONFIG_ARCH_HAS_ATOMIC_OR
ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
int tmp;
unsigned int _mask = ~mask;
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */
" mov.l @%1, %0 \n\t" /* load old value */
" and %2, %0 \n\t" /* add */
" mov.l %0, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (tmp),
"+r" (v)
: "r" (_mask)
: "memory" , "r0", "r1");
}
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
int tmp;
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */
" mov.l @%1, %0 \n\t" /* load old value */
" or %2, %0 \n\t" /* or */
" mov.l %0, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (tmp),
"+r" (v)
: "r" (mask)
: "memory" , "r0", "r1");
}
#endif /* __ASM_SH_ATOMIC_GRB_H */ #endif /* __ASM_SH_ATOMIC_GRB_H */
...@@ -37,27 +37,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ...@@ -37,27 +37,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OPS(add, +=) ATOMIC_OPS(add, +=)
ATOMIC_OPS(sub, -=) ATOMIC_OPS(sub, -=)
ATOMIC_OP(and, &=)
ATOMIC_OP(or, |=)
ATOMIC_OP(xor, ^=)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
unsigned long flags;
raw_local_irq_save(flags);
v->counter &= ~mask;
raw_local_irq_restore(flags);
}
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
unsigned long flags;
raw_local_irq_save(flags);
v->counter |= mask;
raw_local_irq_restore(flags);
}
#endif /* __ASM_SH_ATOMIC_IRQ_H */ #endif /* __ASM_SH_ATOMIC_IRQ_H */
...@@ -52,37 +52,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ...@@ -52,37 +52,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OPS(add) ATOMIC_OPS(add)
ATOMIC_OPS(sub) ATOMIC_OPS(sub)
ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
unsigned long tmp;
__asm__ __volatile__ (
"1: movli.l @%2, %0 ! atomic_clear_mask \n"
" and %1, %0 \n"
" movco.l %0, @%2 \n"
" bf 1b \n"
: "=&z" (tmp)
: "r" (~mask), "r" (&v->counter)
: "t");
}
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
unsigned long tmp;
__asm__ __volatile__ (
"1: movli.l @%2, %0 ! atomic_set_mask \n"
" or %1, %0 \n"
" movco.l %0, @%2 \n"
" bf 1b \n"
: "=&z" (tmp)
: "r" (mask), "r" (&v->counter)
: "t");
}
#endif /* __ASM_SH_ATOMIC_LLSC_H */ #endif /* __ASM_SH_ATOMIC_LLSC_H */
...@@ -25,6 +25,16 @@ ...@@ -25,6 +25,16 @@
#include <asm/atomic-irq.h> #include <asm/atomic-irq.h>
#endif #endif
static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
atomic_and(~mask, v);
}
static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
{
atomic_or(mask, v);
}
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
#define atomic_dec_return(v) atomic_sub_return(1, (v)) #define atomic_dec_return(v) atomic_sub_return(1, (v))
#define atomic_inc_return(v) atomic_add_return(1, (v)) #define atomic_inc_return(v) atomic_add_return(1, (v))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment