Commit d835b6c4 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Thomas Gleixner

blackfin: Provide atomic_{or,xor,and}

Implement atomic logic ops -- atomic_{or,xor,and}.

These will replace the atomic_{set,clear}_mask functions that are
available on some archs.

TODO: use inline asm or at least asm macros to collapse the lot.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent f8a570e2
...@@ -16,19 +16,33 @@ ...@@ -16,19 +16,33 @@
#include <linux/types.h> #include <linux/types.h>
asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr); asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value); asmlinkage int __raw_atomic_add_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value); asmlinkage int __raw_atomic_and_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_or_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value); asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value); asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value);
#define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter) #define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter)
#define atomic_add_return(i, v) __raw_atomic_update_asm(&(v)->counter, i) #define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i)
#define atomic_sub_return(i, v) __raw_atomic_update_asm(&(v)->counter, -(i)) #define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i))
#define CONFIG_ARCH_HAS_ATOMIC_OR
#define atomic_or(i, v) (void)__raw_atomic_or_asm(&(v)->counter, i)
#define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i)
#define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i)
static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
atomic_and(~mask, v);
}
#define atomic_clear_mask(m, v) __raw_atomic_clear_asm(&(v)->counter, m) static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
#define atomic_set_mask(m, v) __raw_atomic_set_asm(&(v)->counter, m) {
atomic_or(mask, v);
}
#endif #endif
......
...@@ -83,11 +83,12 @@ EXPORT_SYMBOL(insl); ...@@ -83,11 +83,12 @@ EXPORT_SYMBOL(insl);
EXPORT_SYMBOL(insl_16); EXPORT_SYMBOL(insl_16);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
EXPORT_SYMBOL(__raw_atomic_update_asm); EXPORT_SYMBOL(__raw_atomic_add_asm);
EXPORT_SYMBOL(__raw_atomic_clear_asm); EXPORT_SYMBOL(__raw_atomic_and_asm);
EXPORT_SYMBOL(__raw_atomic_set_asm); EXPORT_SYMBOL(__raw_atomic_or_asm);
EXPORT_SYMBOL(__raw_atomic_xor_asm); EXPORT_SYMBOL(__raw_atomic_xor_asm);
EXPORT_SYMBOL(__raw_atomic_test_asm); EXPORT_SYMBOL(__raw_atomic_test_asm);
EXPORT_SYMBOL(__raw_xchg_1_asm); EXPORT_SYMBOL(__raw_xchg_1_asm);
EXPORT_SYMBOL(__raw_xchg_2_asm); EXPORT_SYMBOL(__raw_xchg_2_asm);
EXPORT_SYMBOL(__raw_xchg_4_asm); EXPORT_SYMBOL(__raw_xchg_4_asm);
......
...@@ -587,10 +587,10 @@ ENDPROC(___raw_write_unlock_asm) ...@@ -587,10 +587,10 @@ ENDPROC(___raw_write_unlock_asm)
* r0 = ptr * r0 = ptr
* r1 = value * r1 = value
* *
* Add a signed value to a 32bit word and return the new value atomically. * ADD a signed value to a 32bit word and return the new value atomically.
* Clobbers: r3:0, p1:0 * Clobbers: r3:0, p1:0
*/ */
ENTRY(___raw_atomic_update_asm) ENTRY(___raw_atomic_add_asm)
p1 = r0; p1 = r0;
r3 = r1; r3 = r1;
[--sp] = rets; [--sp] = rets;
...@@ -603,19 +603,19 @@ ENTRY(___raw_atomic_update_asm) ...@@ -603,19 +603,19 @@ ENTRY(___raw_atomic_update_asm)
r0 = r3; r0 = r3;
rets = [sp++]; rets = [sp++];
rts; rts;
ENDPROC(___raw_atomic_update_asm) ENDPROC(___raw_atomic_add_asm)
/* /*
* r0 = ptr * r0 = ptr
* r1 = mask * r1 = mask
* *
* Clear the mask bits from a 32bit word and return the old 32bit value * AND the mask bits from a 32bit word and return the old 32bit value
* atomically. * atomically.
* Clobbers: r3:0, p1:0 * Clobbers: r3:0, p1:0
*/ */
ENTRY(___raw_atomic_clear_asm) ENTRY(___raw_atomic_and_asm)
p1 = r0; p1 = r0;
r3 = ~r1; r3 = r1;
[--sp] = rets; [--sp] = rets;
call _get_core_lock; call _get_core_lock;
r2 = [p1]; r2 = [p1];
...@@ -627,17 +627,17 @@ ENTRY(___raw_atomic_clear_asm) ...@@ -627,17 +627,17 @@ ENTRY(___raw_atomic_clear_asm)
r0 = r3; r0 = r3;
rets = [sp++]; rets = [sp++];
rts; rts;
ENDPROC(___raw_atomic_clear_asm) ENDPROC(___raw_atomic_and_asm)
/* /*
* r0 = ptr * r0 = ptr
* r1 = mask * r1 = mask
* *
* Set the mask bits into a 32bit word and return the old 32bit value * OR the mask bits into a 32bit word and return the old 32bit value
* atomically. * atomically.
* Clobbers: r3:0, p1:0 * Clobbers: r3:0, p1:0
*/ */
ENTRY(___raw_atomic_set_asm) ENTRY(___raw_atomic_or_asm)
p1 = r0; p1 = r0;
r3 = r1; r3 = r1;
[--sp] = rets; [--sp] = rets;
...@@ -651,7 +651,7 @@ ENTRY(___raw_atomic_set_asm) ...@@ -651,7 +651,7 @@ ENTRY(___raw_atomic_set_asm)
r0 = r3; r0 = r3;
rets = [sp++]; rets = [sp++];
rts; rts;
ENDPROC(___raw_atomic_set_asm) ENDPROC(___raw_atomic_or_asm)
/* /*
* r0 = ptr * r0 = ptr
...@@ -787,7 +787,7 @@ ENTRY(___raw_bit_set_asm) ...@@ -787,7 +787,7 @@ ENTRY(___raw_bit_set_asm)
r2 = r1; r2 = r1;
r1 = 1; r1 = 1;
r1 <<= r2; r1 <<= r2;
jump ___raw_atomic_set_asm jump ___raw_atomic_or_asm
ENDPROC(___raw_bit_set_asm) ENDPROC(___raw_bit_set_asm)
/* /*
...@@ -798,10 +798,10 @@ ENDPROC(___raw_bit_set_asm) ...@@ -798,10 +798,10 @@ ENDPROC(___raw_bit_set_asm)
* Clobbers: r3:0, p1:0 * Clobbers: r3:0, p1:0
*/ */
ENTRY(___raw_bit_clear_asm) ENTRY(___raw_bit_clear_asm)
r2 = r1; r2 = 1;
r1 = 1; r2 <<= r1;
r1 <<= r2; r1 = ~r2;
jump ___raw_atomic_clear_asm jump ___raw_atomic_and_asm
ENDPROC(___raw_bit_clear_asm) ENDPROC(___raw_bit_clear_asm)
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment