Commit 2a3ed90f authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Thomas Gleixner

xtensa: Provide atomic_{or,xor,and}

Implement atomic logic ops -- atomic_{or,xor,and}.

These will replace the atomic_{set,clear}_mask functions that are
available on some archs.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 304a0d69
......@@ -145,10 +145,26 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
#define CONFIG_ARCH_HAS_ATOMIC_OR
ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
{
atomic_or(mask, v);
}
static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
atomic_and(~mask, v);
}
/**
* atomic_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
......@@ -250,75 +266,6 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
return c;
}
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
#if XCHAL_HAVE_S32C1I
unsigned long tmp;
int result;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" and %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (result), "=&a" (tmp)
: "a" (~mask), "a" (v)
: "memory"
);
#else
unsigned int all_f = -1;
unsigned int vval;
__asm__ __volatile__(
" rsil a15,"__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" xor %1, %4, %3\n"
" and %0, %0, %4\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval), "=a" (mask)
: "a" (v), "a" (all_f), "1" (mask)
: "a15", "memory"
);
#endif
}
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
#if XCHAL_HAVE_S32C1I
unsigned long tmp;
int result;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" or %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (result), "=&a" (tmp)
: "a" (mask), "a" (v)
: "memory"
);
#else
unsigned int vval;
__asm__ __volatile__(
" rsil a15,"__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" or %0, %0, %1\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval)
: "a" (mask), "a" (v)
: "a15", "memory"
);
#endif
}
#endif /* __KERNEL__ */
#endif /* _XTENSA_ATOMIC_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment