Commit ddb7573f authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Thomas Gleixner

mn10300: Provide atomic_{or,xor,and}

Implement atomic logic ops -- atomic_{or,xor,and}.

These will replace the atomic_{set,clear}_mask functions that are
available on some archs.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 27782f27
...@@ -89,6 +89,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ...@@ -89,6 +89,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OPS(add) ATOMIC_OPS(add)
ATOMIC_OPS(sub) ATOMIC_OPS(sub)
#define CONFIG_ARCH_HAS_ATOMIC_OR
ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
...@@ -134,31 +140,9 @@ static inline void atomic_dec(atomic_t *v) ...@@ -134,31 +140,9 @@ static inline void atomic_dec(atomic_t *v)
* *
* Atomically clears the bits set in mask from the memory word specified. * Atomically clears the bits set in mask from the memory word specified.
*/ */
static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
{ {
#ifdef CONFIG_SMP atomic_and(~mask, v);
int status;
asm volatile(
"1: mov %3,(_AAR,%2) \n"
" mov (_ADR,%2),%0 \n"
" and %4,%0 \n"
" mov %0,(_ADR,%2) \n"
" mov (_ADR,%2),%0 \n" /* flush */
" mov (_ASR,%2),%0 \n"
" or %0,%0 \n"
" bne 1b \n"
: "=&r"(status), "=m"(*addr)
: "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask)
: "memory", "cc");
#else
unsigned long flags;
mask = ~mask;
flags = arch_local_cli_save();
*addr &= mask;
arch_local_irq_restore(flags);
#endif
} }
/** /**
...@@ -168,30 +152,9 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) ...@@ -168,30 +152,9 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
* *
* Atomically sets the bits set in mask from the memory word specified. * Atomically sets the bits set in mask from the memory word specified.
*/ */
static inline void atomic_set_mask(unsigned long mask, unsigned long *addr) static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
{ {
#ifdef CONFIG_SMP atomic_or(mask, v);
int status;
asm volatile(
"1: mov %3,(_AAR,%2) \n"
" mov (_ADR,%2),%0 \n"
" or %4,%0 \n"
" mov %0,(_ADR,%2) \n"
" mov (_ADR,%2),%0 \n" /* flush */
" mov (_ASR,%2),%0 \n"
" or %0,%0 \n"
" bne 1b \n"
: "=&r"(status), "=m"(*addr)
: "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask)
: "memory", "cc");
#else
unsigned long flags;
flags = arch_local_cli_save();
*addr |= mask;
arch_local_irq_restore(flags);
#endif
} }
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment