Commit fc9eb93c authored by Will Deacon's avatar Will Deacon

Merge branch 'locking/arch-atomic' of...

Merge branch 'locking/arch-atomic' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into aarch64/for-next/core

Merge in PeterZ's logical atomic ops so that we can implement them in
our subsequent LSE atomics.
parents 772d6835 41b9e9fc
...@@ -29,13 +29,13 @@ ...@@ -29,13 +29,13 @@
* branch back to restart the operation. * branch back to restart the operation.
*/ */
#define ATOMIC_OP(op) \ #define ATOMIC_OP(op, asm_op) \
static __inline__ void atomic_##op(int i, atomic_t * v) \ static __inline__ void atomic_##op(int i, atomic_t * v) \
{ \ { \
unsigned long temp; \ unsigned long temp; \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: ldl_l %0,%1\n" \ "1: ldl_l %0,%1\n" \
" " #op "l %0,%2,%0\n" \ " " #asm_op " %0,%2,%0\n" \
" stl_c %0,%1\n" \ " stl_c %0,%1\n" \
" beq %0,2f\n" \ " beq %0,2f\n" \
".subsection 2\n" \ ".subsection 2\n" \
...@@ -45,15 +45,15 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ ...@@ -45,15 +45,15 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
:"Ir" (i), "m" (v->counter)); \ :"Ir" (i), "m" (v->counter)); \
} \ } \
#define ATOMIC_OP_RETURN(op) \ #define ATOMIC_OP_RETURN(op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \ static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \ { \
long temp, result; \ long temp, result; \
smp_mb(); \ smp_mb(); \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: ldl_l %0,%1\n" \ "1: ldl_l %0,%1\n" \
" " #op "l %0,%3,%2\n" \ " " #asm_op " %0,%3,%2\n" \
" " #op "l %0,%3,%0\n" \ " " #asm_op " %0,%3,%0\n" \
" stl_c %0,%1\n" \ " stl_c %0,%1\n" \
" beq %0,2f\n" \ " beq %0,2f\n" \
".subsection 2\n" \ ".subsection 2\n" \
...@@ -65,13 +65,13 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ...@@ -65,13 +65,13 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
return result; \ return result; \
} }
#define ATOMIC64_OP(op) \ #define ATOMIC64_OP(op, asm_op) \
static __inline__ void atomic64_##op(long i, atomic64_t * v) \ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
{ \ { \
unsigned long temp; \ unsigned long temp; \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: ldq_l %0,%1\n" \ "1: ldq_l %0,%1\n" \
" " #op "q %0,%2,%0\n" \ " " #asm_op " %0,%2,%0\n" \
" stq_c %0,%1\n" \ " stq_c %0,%1\n" \
" beq %0,2f\n" \ " beq %0,2f\n" \
".subsection 2\n" \ ".subsection 2\n" \
...@@ -81,15 +81,15 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ ...@@ -81,15 +81,15 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
:"Ir" (i), "m" (v->counter)); \ :"Ir" (i), "m" (v->counter)); \
} \ } \
#define ATOMIC64_OP_RETURN(op) \ #define ATOMIC64_OP_RETURN(op, asm_op) \
static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
{ \ { \
long temp, result; \ long temp, result; \
smp_mb(); \ smp_mb(); \
__asm__ __volatile__( \ __asm__ __volatile__( \
"1: ldq_l %0,%1\n" \ "1: ldq_l %0,%1\n" \
" " #op "q %0,%3,%2\n" \ " " #asm_op " %0,%3,%2\n" \
" " #op "q %0,%3,%0\n" \ " " #asm_op " %0,%3,%0\n" \
" stq_c %0,%1\n" \ " stq_c %0,%1\n" \
" beq %0,2f\n" \ " beq %0,2f\n" \
".subsection 2\n" \ ".subsection 2\n" \
...@@ -101,15 +101,27 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ ...@@ -101,15 +101,27 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
return result; \ return result; \
} }
#define ATOMIC_OPS(opg) \ #define ATOMIC_OPS(op) \
ATOMIC_OP(opg) \ ATOMIC_OP(op, op##l) \
ATOMIC_OP_RETURN(opg) \ ATOMIC_OP_RETURN(op, op##l) \
ATOMIC64_OP(opg) \ ATOMIC64_OP(op, op##q) \
ATOMIC64_OP_RETURN(opg) ATOMIC64_OP_RETURN(op, op##q)
ATOMIC_OPS(add) ATOMIC_OPS(add)
ATOMIC_OPS(sub) ATOMIC_OPS(sub)
#define atomic_andnot atomic_andnot
#define atomic64_andnot atomic64_andnot
ATOMIC_OP(and, and)
ATOMIC_OP(andnot, bic)
ATOMIC_OP(or, bis)
ATOMIC_OP(xor, xor)
ATOMIC64_OP(and, and)
ATOMIC64_OP(andnot, bic)
ATOMIC64_OP(or, bis)
ATOMIC64_OP(xor, xor)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP #undef ATOMIC64_OP
......
...@@ -143,9 +143,13 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ...@@ -143,9 +143,13 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OPS(add, +=, add) ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub) ATOMIC_OPS(sub, -=, sub)
ATOMIC_OP(and, &=, and)
#define atomic_clear_mask(mask, v) atomic_and(~(mask), (v)) #define atomic_andnot atomic_andnot
ATOMIC_OP(and, &=, and)
ATOMIC_OP(andnot, &= ~, bic)
ATOMIC_OP(or, |=, or)
ATOMIC_OP(xor, ^=, xor)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
......
...@@ -194,6 +194,13 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -194,6 +194,13 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
ATOMIC_OPS(add, +=, add) ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub) ATOMIC_OPS(sub, -=, sub)
#define atomic_andnot atomic_andnot
ATOMIC_OP(and, &=, and)
ATOMIC_OP(andnot, &= ~, bic)
ATOMIC_OP(or, |=, orr)
ATOMIC_OP(xor, ^=, eor)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
...@@ -321,6 +328,13 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \ ...@@ -321,6 +328,13 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
ATOMIC64_OPS(add, adds, adc) ATOMIC64_OPS(add, adds, adc)
ATOMIC64_OPS(sub, subs, sbc) ATOMIC64_OPS(sub, subs, sbc)
#define atomic64_andnot atomic64_andnot
ATOMIC64_OP(and, and, and)
ATOMIC64_OP(andnot, bic, bic)
ATOMIC64_OP(or, orr, orr)
ATOMIC64_OP(xor, eor, eor)
#undef ATOMIC64_OPS #undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP #undef ATOMIC64_OP
......
...@@ -85,6 +85,13 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ...@@ -85,6 +85,13 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OPS(add, add) ATOMIC_OPS(add, add)
ATOMIC_OPS(sub, sub) ATOMIC_OPS(sub, sub)
#define atomic_andnot atomic_andnot
ATOMIC_OP(and, and)
ATOMIC_OP(andnot, bic)
ATOMIC_OP(or, orr)
ATOMIC_OP(xor, eor)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
...@@ -183,6 +190,13 @@ static inline long atomic64_##op##_return(long i, atomic64_t *v) \ ...@@ -183,6 +190,13 @@ static inline long atomic64_##op##_return(long i, atomic64_t *v) \
ATOMIC64_OPS(add, add) ATOMIC64_OPS(add, add)
ATOMIC64_OPS(sub, sub) ATOMIC64_OPS(sub, sub)
#define atomic64_andnot atomic64_andnot
ATOMIC64_OP(and, and)
ATOMIC64_OP(andnot, bic)
ATOMIC64_OP(or, orr)
ATOMIC64_OP(xor, eor)
#undef ATOMIC64_OPS #undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP #undef ATOMIC64_OP
......
...@@ -44,6 +44,18 @@ static inline int __atomic_##op##_return(int i, atomic_t *v) \ ...@@ -44,6 +44,18 @@ static inline int __atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OP_RETURN(sub, sub, rKs21) ATOMIC_OP_RETURN(sub, sub, rKs21)
ATOMIC_OP_RETURN(add, add, r) ATOMIC_OP_RETURN(add, add, r)
#define ATOMIC_OP(op, asm_op) \
ATOMIC_OP_RETURN(op, asm_op, r) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
(void)__atomic_##op##_return(i, v); \
}
ATOMIC_OP(and, and)
ATOMIC_OP(or, or)
ATOMIC_OP(xor, eor)
#undef ATOMIC_OP
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
/* /*
......
...@@ -16,19 +16,21 @@ ...@@ -16,19 +16,21 @@
#include <linux/types.h> #include <linux/types.h>
asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr); asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value); asmlinkage int __raw_atomic_add_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value); asmlinkage int __raw_atomic_and_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_or_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value); asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value);
asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value); asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value);
#define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter) #define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter)
#define atomic_add_return(i, v) __raw_atomic_update_asm(&(v)->counter, i) #define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i)
#define atomic_sub_return(i, v) __raw_atomic_update_asm(&(v)->counter, -(i)) #define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i))
#define atomic_clear_mask(m, v) __raw_atomic_clear_asm(&(v)->counter, m) #define atomic_or(i, v) (void)__raw_atomic_or_asm(&(v)->counter, i)
#define atomic_set_mask(m, v) __raw_atomic_set_asm(&(v)->counter, m) #define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i)
#define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i)
#endif #endif
......
...@@ -83,11 +83,12 @@ EXPORT_SYMBOL(insl); ...@@ -83,11 +83,12 @@ EXPORT_SYMBOL(insl);
EXPORT_SYMBOL(insl_16); EXPORT_SYMBOL(insl_16);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
EXPORT_SYMBOL(__raw_atomic_update_asm); EXPORT_SYMBOL(__raw_atomic_add_asm);
EXPORT_SYMBOL(__raw_atomic_clear_asm); EXPORT_SYMBOL(__raw_atomic_and_asm);
EXPORT_SYMBOL(__raw_atomic_set_asm); EXPORT_SYMBOL(__raw_atomic_or_asm);
EXPORT_SYMBOL(__raw_atomic_xor_asm); EXPORT_SYMBOL(__raw_atomic_xor_asm);
EXPORT_SYMBOL(__raw_atomic_test_asm); EXPORT_SYMBOL(__raw_atomic_test_asm);
EXPORT_SYMBOL(__raw_xchg_1_asm); EXPORT_SYMBOL(__raw_xchg_1_asm);
EXPORT_SYMBOL(__raw_xchg_2_asm); EXPORT_SYMBOL(__raw_xchg_2_asm);
EXPORT_SYMBOL(__raw_xchg_4_asm); EXPORT_SYMBOL(__raw_xchg_4_asm);
......
...@@ -587,10 +587,10 @@ ENDPROC(___raw_write_unlock_asm) ...@@ -587,10 +587,10 @@ ENDPROC(___raw_write_unlock_asm)
* r0 = ptr * r0 = ptr
* r1 = value * r1 = value
* *
* Add a signed value to a 32bit word and return the new value atomically. * ADD a signed value to a 32bit word and return the new value atomically.
* Clobbers: r3:0, p1:0 * Clobbers: r3:0, p1:0
*/ */
ENTRY(___raw_atomic_update_asm) ENTRY(___raw_atomic_add_asm)
p1 = r0; p1 = r0;
r3 = r1; r3 = r1;
[--sp] = rets; [--sp] = rets;
...@@ -603,19 +603,19 @@ ENTRY(___raw_atomic_update_asm) ...@@ -603,19 +603,19 @@ ENTRY(___raw_atomic_update_asm)
r0 = r3; r0 = r3;
rets = [sp++]; rets = [sp++];
rts; rts;
ENDPROC(___raw_atomic_update_asm) ENDPROC(___raw_atomic_add_asm)
/* /*
* r0 = ptr * r0 = ptr
* r1 = mask * r1 = mask
* *
* Clear the mask bits from a 32bit word and return the old 32bit value * AND the mask bits from a 32bit word and return the old 32bit value
* atomically. * atomically.
* Clobbers: r3:0, p1:0 * Clobbers: r3:0, p1:0
*/ */
ENTRY(___raw_atomic_clear_asm) ENTRY(___raw_atomic_and_asm)
p1 = r0; p1 = r0;
r3 = ~r1; r3 = r1;
[--sp] = rets; [--sp] = rets;
call _get_core_lock; call _get_core_lock;
r2 = [p1]; r2 = [p1];
...@@ -627,17 +627,17 @@ ENTRY(___raw_atomic_clear_asm) ...@@ -627,17 +627,17 @@ ENTRY(___raw_atomic_clear_asm)
r0 = r3; r0 = r3;
rets = [sp++]; rets = [sp++];
rts; rts;
ENDPROC(___raw_atomic_clear_asm) ENDPROC(___raw_atomic_and_asm)
/* /*
* r0 = ptr * r0 = ptr
* r1 = mask * r1 = mask
* *
* Set the mask bits into a 32bit word and return the old 32bit value * OR the mask bits into a 32bit word and return the old 32bit value
* atomically. * atomically.
* Clobbers: r3:0, p1:0 * Clobbers: r3:0, p1:0
*/ */
ENTRY(___raw_atomic_set_asm) ENTRY(___raw_atomic_or_asm)
p1 = r0; p1 = r0;
r3 = r1; r3 = r1;
[--sp] = rets; [--sp] = rets;
...@@ -651,7 +651,7 @@ ENTRY(___raw_atomic_set_asm) ...@@ -651,7 +651,7 @@ ENTRY(___raw_atomic_set_asm)
r0 = r3; r0 = r3;
rets = [sp++]; rets = [sp++];
rts; rts;
ENDPROC(___raw_atomic_set_asm) ENDPROC(___raw_atomic_or_asm)
/* /*
* r0 = ptr * r0 = ptr
...@@ -787,7 +787,7 @@ ENTRY(___raw_bit_set_asm) ...@@ -787,7 +787,7 @@ ENTRY(___raw_bit_set_asm)
r2 = r1; r2 = r1;
r1 = 1; r1 = 1;
r1 <<= r2; r1 <<= r2;
jump ___raw_atomic_set_asm jump ___raw_atomic_or_asm
ENDPROC(___raw_bit_set_asm) ENDPROC(___raw_bit_set_asm)
/* /*
...@@ -798,10 +798,10 @@ ENDPROC(___raw_bit_set_asm) ...@@ -798,10 +798,10 @@ ENDPROC(___raw_bit_set_asm)
* Clobbers: r3:0, p1:0 * Clobbers: r3:0, p1:0
*/ */
ENTRY(___raw_bit_clear_asm) ENTRY(___raw_bit_clear_asm)
r2 = r1; r2 = 1;
r1 = 1; r2 <<= r1;
r1 <<= r2; r1 = ~r2;
jump ___raw_atomic_clear_asm jump ___raw_atomic_and_asm
ENDPROC(___raw_bit_clear_asm) ENDPROC(___raw_bit_clear_asm)
/* /*
......
...@@ -195,7 +195,7 @@ void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg) ...@@ -195,7 +195,7 @@ void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
local_irq_save(flags); local_irq_save(flags);
for_each_cpu(cpu, cpumask) { for_each_cpu(cpu, cpumask) {
bfin_ipi_data = &per_cpu(bfin_ipi, cpu); bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
atomic_set_mask((1 << msg), &bfin_ipi_data->bits); atomic_or((1 << msg), &bfin_ipi_data->bits);
atomic_inc(&bfin_ipi_data->count); atomic_inc(&bfin_ipi_data->count);
} }
local_irq_restore(flags); local_irq_restore(flags);
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
#define _ASM_ATOMIC_H #define _ASM_ATOMIC_H
#include <linux/types.h> #include <linux/types.h>
#include <asm/spr-regs.h>
#include <asm/cmpxchg.h> #include <asm/cmpxchg.h>
#include <asm/barrier.h> #include <asm/barrier.h>
...@@ -23,6 +22,8 @@ ...@@ -23,6 +22,8 @@
#error not SMP safe #error not SMP safe
#endif #endif
#include <asm/atomic_defs.h>
/* /*
* Atomic operations that C can't guarantee us. Useful for * Atomic operations that C can't guarantee us. Useful for
* resource counting etc.. * resource counting etc..
...@@ -34,56 +35,26 @@ ...@@ -34,56 +35,26 @@
#define atomic_read(v) ACCESS_ONCE((v)->counter) #define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v, i) (((v)->counter) = (i)) #define atomic_set(v, i) (((v)->counter) = (i))
#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS static inline int atomic_inc_return(atomic_t *v)
static inline int atomic_add_return(int i, atomic_t *v)
{ {
unsigned long val; return __atomic_add_return(1, &v->counter);
}
asm("0: \n" static inline int atomic_dec_return(atomic_t *v)
" orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */ {
" ckeq icc3,cc7 \n" return __atomic_sub_return(1, &v->counter);
" ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */ }
" orcr cc7,cc7,cc3 \n" /* set CC3 to true */
" add%I2 %1,%2,%1 \n"
" cst.p %1,%M0 ,cc3,#1 \n"
" corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
" beq icc3,#0,0b \n"
: "+U"(v->counter), "=&r"(val)
: "NPr"(i)
: "memory", "cc7", "cc3", "icc3"
);
return val; static inline int atomic_add_return(int i, atomic_t *v)
{
return __atomic_add_return(i, &v->counter);
} }
static inline int atomic_sub_return(int i, atomic_t *v) static inline int atomic_sub_return(int i, atomic_t *v)
{ {
unsigned long val; return __atomic_sub_return(i, &v->counter);
asm("0: \n"
" orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
" ckeq icc3,cc7 \n"
" ld.p %M0,%1 \n" /* LD.P/ORCR must be atomic */
" orcr cc7,cc7,cc3 \n" /* set CC3 to true */
" sub%I2 %1,%2,%1 \n"
" cst.p %1,%M0 ,cc3,#1 \n"
" corcc gr29,gr29,gr0 ,cc3,#1 \n" /* clear ICC3.Z if store happens */
" beq icc3,#0,0b \n"
: "+U"(v->counter), "=&r"(val)
: "NPr"(i)
: "memory", "cc7", "cc3", "icc3"
);
return val;
} }
#else
extern int atomic_add_return(int i, atomic_t *v);
extern int atomic_sub_return(int i, atomic_t *v);
#endif
static inline int atomic_add_negative(int i, atomic_t *v) static inline int atomic_add_negative(int i, atomic_t *v)
{ {
return atomic_add_return(i, v) < 0; return atomic_add_return(i, v) < 0;
...@@ -101,17 +72,14 @@ static inline void atomic_sub(int i, atomic_t *v) ...@@ -101,17 +72,14 @@ static inline void atomic_sub(int i, atomic_t *v)
static inline void atomic_inc(atomic_t *v) static inline void atomic_inc(atomic_t *v)
{ {
atomic_add_return(1, v); atomic_inc_return(v);
} }
static inline void atomic_dec(atomic_t *v) static inline void atomic_dec(atomic_t *v)
{ {
atomic_sub_return(1, v); atomic_dec_return(v);
} }
#define atomic_dec_return(v) atomic_sub_return(1, (v))
#define atomic_inc_return(v) atomic_add_return(1, (v))
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
...@@ -120,18 +88,19 @@ static inline void atomic_dec(atomic_t *v) ...@@ -120,18 +88,19 @@ static inline void atomic_dec(atomic_t *v)
* 64-bit atomic ops * 64-bit atomic ops
*/ */
typedef struct { typedef struct {
volatile long long counter; long long counter;
} atomic64_t; } atomic64_t;
#define ATOMIC64_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) }
static inline long long atomic64_read(atomic64_t *v) static inline long long atomic64_read(const atomic64_t *v)
{ {
long long counter; long long counter;
asm("ldd%I1 %M1,%0" asm("ldd%I1 %M1,%0"
: "=e"(counter) : "=e"(counter)
: "m"(v->counter)); : "m"(v->counter));
return counter; return counter;
} }
...@@ -142,10 +111,25 @@ static inline void atomic64_set(atomic64_t *v, long long i) ...@@ -142,10 +111,25 @@ static inline void atomic64_set(atomic64_t *v, long long i)
: "e"(i)); : "e"(i));
} }
extern long long atomic64_inc_return(atomic64_t *v); static inline long long atomic64_inc_return(atomic64_t *v)
extern long long atomic64_dec_return(atomic64_t *v); {
extern long long atomic64_add_return(long long i, atomic64_t *v); return __atomic64_add_return(1, &v->counter);
extern long long atomic64_sub_return(long long i, atomic64_t *v); }
static inline long long atomic64_dec_return(atomic64_t *v)
{
return __atomic64_sub_return(1, &v->counter);
}
static inline long long atomic64_add_return(long long i, atomic64_t *v)
{
return __atomic64_add_return(i, &v->counter);
}
static inline long long atomic64_sub_return(long long i, atomic64_t *v)
{
return __atomic64_sub_return(i, &v->counter);
}
static inline long long atomic64_add_negative(long long i, atomic64_t *v) static inline long long atomic64_add_negative(long long i, atomic64_t *v)
{ {
...@@ -176,6 +160,7 @@ static inline void atomic64_dec(atomic64_t *v) ...@@ -176,6 +160,7 @@ static inline void atomic64_dec(atomic64_t *v)
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
#define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0) #define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0)
#define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new)) #define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new))
#define atomic_xchg(v, new) (xchg(&(v)->counter, new)) #define atomic_xchg(v, new) (xchg(&(v)->counter, new))
#define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter)) #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
...@@ -196,5 +181,21 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -196,5 +181,21 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
return c; return c;
} }
#define ATOMIC_OP(op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
(void)__atomic32_fetch_##op(i, &v->counter); \
} \
\
static inline void atomic64_##op(long long i, atomic64_t *v) \
{ \
(void)__atomic64_fetch_##op(i, &v->counter); \
}
ATOMIC_OP(or)
ATOMIC_OP(and)
ATOMIC_OP(xor)
#undef ATOMIC_OP
#endif /* _ASM_ATOMIC_H */ #endif /* _ASM_ATOMIC_H */
#include <asm/spr-regs.h>
#ifdef __ATOMIC_LIB__
#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
#define ATOMIC_QUALS
#define ATOMIC_EXPORT(x) EXPORT_SYMBOL(x)
#else /* !OUTOFLINE && LIB */
#define ATOMIC_OP_RETURN(op)
#define ATOMIC_FETCH_OP(op)
#endif /* OUTOFLINE */
#else /* !__ATOMIC_LIB__ */
#define ATOMIC_EXPORT(x)
#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
#define ATOMIC_OP_RETURN(op) \
extern int __atomic_##op##_return(int i, int *v); \
extern long long __atomic64_##op##_return(long long i, long long *v);
#define ATOMIC_FETCH_OP(op) \
extern int __atomic32_fetch_##op(int i, int *v); \
extern long long __atomic64_fetch_##op(long long i, long long *v);
#else /* !OUTOFLINE && !LIB */
#define ATOMIC_QUALS static inline
#endif /* OUTOFLINE */
#endif /* __ATOMIC_LIB__ */
/*
* Note on the 64 bit inline asm variants...
*
* CSTD is a conditional instruction and needs a constrained memory reference.
* Normally 'U' provides the correct constraints for conditional instructions
* and this is used for the 32 bit version, however 'U' does not appear to work
* for 64 bit values (gcc-4.9)
*
* The exact constraint is that conditional instructions cannot deal with an
* immediate displacement in the memory reference, so what we do is we read the
* address through a volatile cast into a local variable in order to insure we
* _have_ to compute the correct address without displacement. This allows us
* to use the regular 'm' for the memory address.
*
* Furthermore, the %Ln operand, which prints the low word register (r+1),
* really only works for registers, this means we cannot allow immediate values
* for the 64 bit versions -- like we do for the 32 bit ones.
*
*/
#ifndef ATOMIC_OP_RETURN
#define ATOMIC_OP_RETURN(op) \
ATOMIC_QUALS int __atomic_##op##_return(int i, int *v) \
{ \
int val; \
\
asm volatile( \
"0: \n" \
" orcc gr0,gr0,gr0,icc3 \n" \
" ckeq icc3,cc7 \n" \
" ld.p %M0,%1 \n" \
" orcr cc7,cc7,cc3 \n" \
" "#op"%I2 %1,%2,%1 \n" \
" cst.p %1,%M0 ,cc3,#1 \n" \
" corcc gr29,gr29,gr0 ,cc3,#1 \n" \
" beq icc3,#0,0b \n" \
: "+U"(*v), "=&r"(val) \
: "NPr"(i) \
: "memory", "cc7", "cc3", "icc3" \
); \
\
return val; \
} \
ATOMIC_EXPORT(__atomic_##op##_return); \
\
ATOMIC_QUALS long long __atomic64_##op##_return(long long i, long long *v) \
{ \
long long *__v = READ_ONCE(v); \
long long val; \
\
asm volatile( \
"0: \n" \
" orcc gr0,gr0,gr0,icc3 \n" \
" ckeq icc3,cc7 \n" \
" ldd.p %M0,%1 \n" \
" orcr cc7,cc7,cc3 \n" \
" "#op"cc %L1,%L2,%L1,icc0 \n" \
" "#op"x %1,%2,%1,icc0 \n" \
" cstd.p %1,%M0 ,cc3,#1 \n" \
" corcc gr29,gr29,gr0 ,cc3,#1 \n" \
" beq icc3,#0,0b \n" \
: "+m"(*__v), "=&e"(val) \
: "e"(i) \
: "memory", "cc7", "cc3", "icc0", "icc3" \
); \
\
return val; \
} \
ATOMIC_EXPORT(__atomic64_##op##_return);
#endif
#ifndef ATOMIC_FETCH_OP
#define ATOMIC_FETCH_OP(op) \
ATOMIC_QUALS int __atomic32_fetch_##op(int i, int *v) \
{ \
int old, tmp; \
\
asm volatile( \
"0: \n" \
" orcc gr0,gr0,gr0,icc3 \n" \
" ckeq icc3,cc7 \n" \
" ld.p %M0,%1 \n" \
" orcr cc7,cc7,cc3 \n" \
" "#op"%I3 %1,%3,%2 \n" \
" cst.p %2,%M0 ,cc3,#1 \n" \
" corcc gr29,gr29,gr0 ,cc3,#1 \n" \
" beq icc3,#0,0b \n" \
: "+U"(*v), "=&r"(old), "=r"(tmp) \
: "NPr"(i) \
: "memory", "cc7", "cc3", "icc3" \
); \
\
return old; \
} \
ATOMIC_EXPORT(__atomic32_fetch_##op); \
\
ATOMIC_QUALS long long __atomic64_fetch_##op(long long i, long long *v) \
{ \
long long *__v = READ_ONCE(v); \
long long old, tmp; \
\
asm volatile( \
"0: \n" \
" orcc gr0,gr0,gr0,icc3 \n" \
" ckeq icc3,cc7 \n" \
" ldd.p %M0,%1 \n" \
" orcr cc7,cc7,cc3 \n" \
" "#op" %L1,%L3,%L2 \n" \
" "#op" %1,%3,%2 \n" \
" cstd.p %2,%M0 ,cc3,#1 \n" \
" corcc gr29,gr29,gr0 ,cc3,#1 \n" \
" beq icc3,#0,0b \n" \
: "+m"(*__v), "=&e"(old), "=e"(tmp) \
: "e"(i) \
: "memory", "cc7", "cc3", "icc3" \
); \
\
return old; \
} \
ATOMIC_EXPORT(__atomic64_fetch_##op);
#endif
ATOMIC_FETCH_OP(or)
ATOMIC_FETCH_OP(and)
ATOMIC_FETCH_OP(xor)
ATOMIC_OP_RETURN(add)
ATOMIC_OP_RETURN(sub)
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_QUALS
#undef ATOMIC_EXPORT
...@@ -25,109 +25,30 @@ ...@@ -25,109 +25,30 @@
#include <asm-generic/bitops/ffz.h> #include <asm-generic/bitops/ffz.h>
#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS #include <asm/atomic.h>
static inline
unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v)
{
unsigned long old, tmp;
asm volatile(
"0: \n"
" orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
" ckeq icc3,cc7 \n"
" ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
" orcr cc7,cc7,cc3 \n" /* set CC3 to true */
" and%I3 %1,%3,%2 \n"
" cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
" corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
" beq icc3,#0,0b \n"
: "+U"(*v), "=&r"(old), "=r"(tmp)
: "NPr"(~mask)
: "memory", "cc7", "cc3", "icc3"
);
return old;
}
static inline
unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v)
{
unsigned long old, tmp;
asm volatile(
"0: \n"
" orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
" ckeq icc3,cc7 \n"
" ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
" orcr cc7,cc7,cc3 \n" /* set CC3 to true */
" or%I3 %1,%3,%2 \n"
" cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
" corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
" beq icc3,#0,0b \n"
: "+U"(*v), "=&r"(old), "=r"(tmp)
: "NPr"(mask)
: "memory", "cc7", "cc3", "icc3"
);
return old;
}
static inline
unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v)
{
unsigned long old, tmp;
asm volatile(
"0: \n"
" orcc gr0,gr0,gr0,icc3 \n" /* set ICC3.Z */
" ckeq icc3,cc7 \n"
" ld.p %M0,%1 \n" /* LD.P/ORCR are atomic */
" orcr cc7,cc7,cc3 \n" /* set CC3 to true */
" xor%I3 %1,%3,%2 \n"
" cst.p %2,%M0 ,cc3,#1 \n" /* if store happens... */
" corcc gr29,gr29,gr0 ,cc3,#1 \n" /* ... clear ICC3.Z */
" beq icc3,#0,0b \n"
: "+U"(*v), "=&r"(old), "=r"(tmp)
: "NPr"(mask)
: "memory", "cc7", "cc3", "icc3"
);
return old;
}
#else
extern unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v);
extern unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v);
extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v);
#endif
#define atomic_clear_mask(mask, v) atomic_test_and_ANDNOT_mask((mask), (v))
#define atomic_set_mask(mask, v) atomic_test_and_OR_mask((mask), (v))
static inline int test_and_clear_bit(unsigned long nr, volatile void *addr) static inline int test_and_clear_bit(unsigned long nr, volatile void *addr)
{ {
volatile unsigned long *ptr = addr; unsigned int *ptr = (void *)addr;
unsigned long mask = 1UL << (nr & 31); unsigned int mask = 1UL << (nr & 31);
ptr += nr >> 5; ptr += nr >> 5;
return (atomic_test_and_ANDNOT_mask(mask, ptr) & mask) != 0; return (__atomic32_fetch_and(~mask, ptr) & mask) != 0;
} }
static inline int test_and_set_bit(unsigned long nr, volatile void *addr) static inline int test_and_set_bit(unsigned long nr, volatile void *addr)
{ {
volatile unsigned long *ptr = addr; unsigned int *ptr = (void *)addr;
unsigned long mask = 1UL << (nr & 31); unsigned int mask = 1UL << (nr & 31);
ptr += nr >> 5; ptr += nr >> 5;
return (atomic_test_and_OR_mask(mask, ptr) & mask) != 0; return (__atomic32_fetch_or(mask, ptr) & mask) != 0;
} }
static inline int test_and_change_bit(unsigned long nr, volatile void *addr) static inline int test_and_change_bit(unsigned long nr, volatile void *addr)
{ {
volatile unsigned long *ptr = addr; unsigned int *ptr = (void *)addr;
unsigned long mask = 1UL << (nr & 31); unsigned int mask = 1UL << (nr & 31);
ptr += nr >> 5; ptr += nr >> 5;
return (atomic_test_and_XOR_mask(mask, ptr) & mask) != 0; return (__atomic32_fetch_xor(mask, ptr) & mask) != 0;
} }
static inline void clear_bit(unsigned long nr, volatile void *addr) static inline void clear_bit(unsigned long nr, volatile void *addr)
......
...@@ -109,13 +109,13 @@ static struct frv_dma_channel frv_dma_channels[FRV_DMA_NCHANS] = { ...@@ -109,13 +109,13 @@ static struct frv_dma_channel frv_dma_channels[FRV_DMA_NCHANS] = {
static DEFINE_RWLOCK(frv_dma_channels_lock); static DEFINE_RWLOCK(frv_dma_channels_lock);
unsigned long frv_dma_inprogress; unsigned int frv_dma_inprogress;
#define frv_clear_dma_inprogress(channel) \ #define frv_clear_dma_inprogress(channel) \
atomic_clear_mask(1 << (channel), &frv_dma_inprogress); (void)__atomic32_fetch_and(~(1 << (channel)), &frv_dma_inprogress);
#define frv_set_dma_inprogress(channel) \ #define frv_set_dma_inprogress(channel) \
atomic_set_mask(1 << (channel), &frv_dma_inprogress); (void)__atomic32_fetch_or(1 << (channel), &frv_dma_inprogress);
/*****************************************************************************/ /*****************************************************************************/
/* /*
......
...@@ -58,11 +58,6 @@ EXPORT_SYMBOL(__outsl_ns); ...@@ -58,11 +58,6 @@ EXPORT_SYMBOL(__outsl_ns);
EXPORT_SYMBOL(__insl_ns); EXPORT_SYMBOL(__insl_ns);
#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS #ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
EXPORT_SYMBOL(atomic_test_and_ANDNOT_mask);
EXPORT_SYMBOL(atomic_test_and_OR_mask);
EXPORT_SYMBOL(atomic_test_and_XOR_mask);
EXPORT_SYMBOL(atomic_add_return);
EXPORT_SYMBOL(atomic_sub_return);
EXPORT_SYMBOL(__xchg_32); EXPORT_SYMBOL(__xchg_32);
EXPORT_SYMBOL(__cmpxchg_32); EXPORT_SYMBOL(__cmpxchg_32);
#endif #endif
......
...@@ -5,4 +5,4 @@ ...@@ -5,4 +5,4 @@
lib-y := \ lib-y := \
__ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \ __ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \
checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \ checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \
outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o atomic-lib.o
#include <linux/export.h>
#include <asm/atomic.h>
#define __ATOMIC_LIB__
#include <asm/atomic_defs.h>
...@@ -17,116 +17,6 @@ ...@@ -17,116 +17,6 @@
.text .text
.balign 4 .balign 4
###############################################################################
#
# unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v);
#
###############################################################################
.globl atomic_test_and_ANDNOT_mask
.type atomic_test_and_ANDNOT_mask,@function
atomic_test_and_ANDNOT_mask:
not.p gr8,gr10
0:
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
ckeq icc3,cc7
ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
orcr cc7,cc7,cc3 /* set CC3 to true */
and gr8,gr10,gr11
cst.p gr11,@(gr9,gr0) ,cc3,#1
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
beq icc3,#0,0b
bralr
.size atomic_test_and_ANDNOT_mask, .-atomic_test_and_ANDNOT_mask
###############################################################################
#
# unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v);
#
###############################################################################
.globl atomic_test_and_OR_mask
.type atomic_test_and_OR_mask,@function
atomic_test_and_OR_mask:
or.p gr8,gr8,gr10
0:
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
ckeq icc3,cc7
ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
orcr cc7,cc7,cc3 /* set CC3 to true */
or gr8,gr10,gr11
cst.p gr11,@(gr9,gr0) ,cc3,#1
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
beq icc3,#0,0b
bralr
.size atomic_test_and_OR_mask, .-atomic_test_and_OR_mask
###############################################################################
#
# unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v);
#
###############################################################################
.globl atomic_test_and_XOR_mask
.type atomic_test_and_XOR_mask,@function
atomic_test_and_XOR_mask:
or.p gr8,gr8,gr10
0:
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
ckeq icc3,cc7
ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
orcr cc7,cc7,cc3 /* set CC3 to true */
xor gr8,gr10,gr11
cst.p gr11,@(gr9,gr0) ,cc3,#1
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
beq icc3,#0,0b
bralr
.size atomic_test_and_XOR_mask, .-atomic_test_and_XOR_mask
###############################################################################
#
# int atomic_add_return(int i, atomic_t *v)
#
###############################################################################
.globl atomic_add_return
.type atomic_add_return,@function
atomic_add_return:
or.p gr8,gr8,gr10
0:
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
ckeq icc3,cc7
ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
orcr cc7,cc7,cc3 /* set CC3 to true */
add gr8,gr10,gr8
cst.p gr8,@(gr9,gr0) ,cc3,#1
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
beq icc3,#0,0b
bralr
.size atomic_add_return, .-atomic_add_return
###############################################################################
#
# int atomic_sub_return(int i, atomic_t *v)
#
###############################################################################
.globl atomic_sub_return
.type atomic_sub_return,@function
atomic_sub_return:
or.p gr8,gr8,gr10
0:
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
ckeq icc3,cc7
ld.p @(gr9,gr0),gr8 /* LD.P/ORCR must be atomic */
orcr cc7,cc7,cc3 /* set CC3 to true */
sub gr8,gr10,gr8
cst.p gr8,@(gr9,gr0) ,cc3,#1
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
beq icc3,#0,0b
bralr
.size atomic_sub_return, .-atomic_sub_return
############################################################################### ###############################################################################
# #
# uint32_t __xchg_32(uint32_t i, uint32_t *v) # uint32_t __xchg_32(uint32_t i, uint32_t *v)
......
...@@ -18,100 +18,6 @@ ...@@ -18,100 +18,6 @@
.balign 4 .balign 4
###############################################################################
#
# long long atomic64_inc_return(atomic64_t *v)
#
###############################################################################
.globl atomic64_inc_return
.type atomic64_inc_return,@function
atomic64_inc_return:
or.p gr8,gr8,gr10
0:
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
ckeq icc3,cc7
ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
orcr cc7,cc7,cc3 /* set CC3 to true */
addicc gr9,#1,gr9,icc0
addxi gr8,#0,gr8,icc0
cstd.p gr8,@(gr10,gr0) ,cc3,#1
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
beq icc3,#0,0b
bralr
.size atomic64_inc_return, .-atomic64_inc_return
###############################################################################
#
# long long atomic64_dec_return(atomic64_t *v)
#
###############################################################################
.globl atomic64_dec_return
.type atomic64_dec_return,@function
atomic64_dec_return:
or.p gr8,gr8,gr10
0:
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
ckeq icc3,cc7
ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
orcr cc7,cc7,cc3 /* set CC3 to true */
subicc gr9,#1,gr9,icc0
subxi gr8,#0,gr8,icc0
cstd.p gr8,@(gr10,gr0) ,cc3,#1
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
beq icc3,#0,0b
bralr
.size atomic64_dec_return, .-atomic64_dec_return
###############################################################################
#
# long long atomic64_add_return(long long i, atomic64_t *v)
#
###############################################################################
.globl atomic64_add_return
.type atomic64_add_return,@function
atomic64_add_return:
or.p gr8,gr8,gr4
or gr9,gr9,gr5
0:
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
ckeq icc3,cc7
ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
orcr cc7,cc7,cc3 /* set CC3 to true */
addcc gr9,gr5,gr9,icc0
addx gr8,gr4,gr8,icc0
cstd.p gr8,@(gr10,gr0) ,cc3,#1
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
beq icc3,#0,0b
bralr
.size atomic64_add_return, .-atomic64_add_return
###############################################################################
#
# long long atomic64_sub_return(long long i, atomic64_t *v)
#
###############################################################################
.globl atomic64_sub_return
.type atomic64_sub_return,@function
atomic64_sub_return:
or.p gr8,gr8,gr4
or gr9,gr9,gr5
0:
orcc gr0,gr0,gr0,icc3 /* set ICC3.Z */
ckeq icc3,cc7
ldd.p @(gr10,gr0),gr8 /* LDD.P/ORCR must be atomic */
orcr cc7,cc7,cc3 /* set CC3 to true */
subcc gr9,gr5,gr9,icc0
subx gr8,gr4,gr8,icc0
cstd.p gr8,@(gr10,gr0) ,cc3,#1
corcc gr29,gr29,gr0 ,cc3,#1 /* clear ICC3.Z if store happens */
beq icc3,#0,0b
bralr
.size atomic64_sub_return, .-atomic64_sub_return
############################################################################### ###############################################################################
# #
# uint64_t __xchg_64(uint64_t i, uint64_t *v) # uint64_t __xchg_64(uint64_t i, uint64_t *v)
......
...@@ -16,83 +16,52 @@ ...@@ -16,83 +16,52 @@
#include <linux/kernel.h> #include <linux/kernel.h>
static inline int atomic_add_return(int i, atomic_t *v) #define ATOMIC_OP_RETURN(op, c_op) \
{ static inline int atomic_##op##_return(int i, atomic_t *v) \
h8300flags flags; { \
int ret; h8300flags flags; \
int ret; \
flags = arch_local_irq_save(); \
ret = v->counter += i; flags = arch_local_irq_save(); \
arch_local_irq_restore(flags); ret = v->counter c_op i; \
return ret; arch_local_irq_restore(flags); \
return ret; \
} }
#define atomic_add(i, v) atomic_add_return(i, v) #define ATOMIC_OP(op, c_op) \
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) static inline void atomic_##op(int i, atomic_t *v) \
{ \
static inline int atomic_sub_return(int i, atomic_t *v) h8300flags flags; \
{ \
h8300flags flags; flags = arch_local_irq_save(); \
int ret; v->counter c_op i; \
arch_local_irq_restore(flags); \
flags = arch_local_irq_save();
ret = v->counter -= i;
arch_local_irq_restore(flags);
return ret;
} }
#define atomic_sub(i, v) atomic_sub_return(i, v) ATOMIC_OP_RETURN(add, +=)
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) ATOMIC_OP_RETURN(sub, -=)
static inline int atomic_inc_return(atomic_t *v) ATOMIC_OP(and, &=)
{ ATOMIC_OP(or, |=)
h8300flags flags; ATOMIC_OP(xor, ^=)
int ret;
flags = arch_local_irq_save(); #undef ATOMIC_OP_RETURN
v->counter++; #undef ATOMIC_OP
ret = v->counter;
arch_local_irq_restore(flags);
return ret;
}
#define atomic_inc(v) atomic_inc_return(v)
/* #define atomic_add(i, v) (void)atomic_add_return(i, v)
* atomic_inc_and_test - increment and test #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
static inline int atomic_dec_return(atomic_t *v) #define atomic_sub(i, v) (void)atomic_sub_return(i, v)
{ #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
h8300flags flags;
int ret;
flags = arch_local_irq_save(); #define atomic_inc_return(v) atomic_add_return(1, v)
--v->counter; #define atomic_dec_return(v) atomic_sub_return(1, v)
ret = v->counter;
arch_local_irq_restore(flags);
return ret;
}
#define atomic_dec(v) atomic_dec_return(v) #define atomic_inc(v) (void)atomic_inc_return(v)
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
static inline int atomic_dec_and_test(atomic_t *v) #define atomic_dec(v) (void)atomic_dec_return(v)
{ #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
h8300flags flags;
int ret;
flags = arch_local_irq_save();
--v->counter;
ret = v->counter;
arch_local_irq_restore(flags);
return ret == 0;
}
static inline int atomic_cmpxchg(atomic_t *v, int old, int new) static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{ {
...@@ -120,40 +89,4 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -120,40 +89,4 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
return ret; return ret;
} }
static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
{
unsigned char ccr;
unsigned long tmp;
__asm__ __volatile__("stc ccr,%w3\n\t"
"orc #0x80,ccr\n\t"
"mov.l %0,%1\n\t"
"and.l %2,%1\n\t"
"mov.l %1,%0\n\t"
"ldc %w3,ccr"
: "=m"(*v), "=r"(tmp)
: "g"(~(mask)), "r"(ccr));
}
static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
{
unsigned char ccr;
unsigned long tmp;
__asm__ __volatile__("stc ccr,%w3\n\t"
"orc #0x80,ccr\n\t"
"mov.l %0,%1\n\t"
"or.l %2,%1\n\t"
"mov.l %1,%0\n\t"
"ldc %w3,ccr"
: "=m"(*v), "=r"(tmp)
: "g"(~(mask)), "r"(ccr));
}
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#endif /* __ARCH_H8300_ATOMIC __ */ #endif /* __ARCH_H8300_ATOMIC __ */
...@@ -132,6 +132,10 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ...@@ -132,6 +132,10 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OPS(add) ATOMIC_OPS(add)
ATOMIC_OPS(sub) ATOMIC_OPS(sub)
ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
......
...@@ -45,8 +45,6 @@ ia64_atomic_##op (int i, atomic_t *v) \ ...@@ -45,8 +45,6 @@ ia64_atomic_##op (int i, atomic_t *v) \
ATOMIC_OP(add, +) ATOMIC_OP(add, +)
ATOMIC_OP(sub, -) ATOMIC_OP(sub, -)
#undef ATOMIC_OP
#define atomic_add_return(i,v) \ #define atomic_add_return(i,v) \
({ \ ({ \
int __ia64_aar_i = (i); \ int __ia64_aar_i = (i); \
...@@ -71,6 +69,16 @@ ATOMIC_OP(sub, -) ...@@ -71,6 +69,16 @@ ATOMIC_OP(sub, -)
: ia64_atomic_sub(__ia64_asr_i, v); \ : ia64_atomic_sub(__ia64_asr_i, v); \
}) })
ATOMIC_OP(and, &)
ATOMIC_OP(or, |)
ATOMIC_OP(xor, ^)
#define atomic_and(i,v) (void)ia64_atomic_and(i,v)
#define atomic_or(i,v) (void)ia64_atomic_or(i,v)
#define atomic_xor(i,v) (void)ia64_atomic_xor(i,v)
#undef ATOMIC_OP
#define ATOMIC64_OP(op, c_op) \ #define ATOMIC64_OP(op, c_op) \
static __inline__ long \ static __inline__ long \
ia64_atomic64_##op (__s64 i, atomic64_t *v) \ ia64_atomic64_##op (__s64 i, atomic64_t *v) \
...@@ -89,8 +97,6 @@ ia64_atomic64_##op (__s64 i, atomic64_t *v) \ ...@@ -89,8 +97,6 @@ ia64_atomic64_##op (__s64 i, atomic64_t *v) \
ATOMIC64_OP(add, +) ATOMIC64_OP(add, +)
ATOMIC64_OP(sub, -) ATOMIC64_OP(sub, -)
#undef ATOMIC64_OP
#define atomic64_add_return(i,v) \ #define atomic64_add_return(i,v) \
({ \ ({ \
long __ia64_aar_i = (i); \ long __ia64_aar_i = (i); \
...@@ -115,6 +121,16 @@ ATOMIC64_OP(sub, -) ...@@ -115,6 +121,16 @@ ATOMIC64_OP(sub, -)
: ia64_atomic64_sub(__ia64_asr_i, v); \ : ia64_atomic64_sub(__ia64_asr_i, v); \
}) })
ATOMIC64_OP(and, &)
ATOMIC64_OP(or, |)
ATOMIC64_OP(xor, ^)
#define atomic64_and(i,v) (void)ia64_atomic64_and(i,v)
#define atomic64_or(i,v) (void)ia64_atomic64_or(i,v)
#define atomic64_xor(i,v) (void)ia64_atomic64_xor(i,v)
#undef ATOMIC64_OP
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
......
...@@ -94,6 +94,10 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \ ...@@ -94,6 +94,10 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OPS(add) ATOMIC_OPS(add)
ATOMIC_OPS(sub) ATOMIC_OPS(sub)
ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
...@@ -239,45 +243,4 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -239,45 +243,4 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
return c; return c;
} }
static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr)
{
unsigned long flags;
unsigned long tmp;
local_irq_save(flags);
__asm__ __volatile__ (
"# atomic_clear_mask \n\t"
DCACHE_CLEAR("%0", "r5", "%1")
M32R_LOCK" %0, @%1; \n\t"
"and %0, %2; \n\t"
M32R_UNLOCK" %0, @%1; \n\t"
: "=&r" (tmp)
: "r" (addr), "r" (~mask)
: "memory"
__ATOMIC_CLOBBER
);
local_irq_restore(flags);
}
static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr)
{
unsigned long flags;
unsigned long tmp;
local_irq_save(flags);
__asm__ __volatile__ (
"# atomic_set_mask \n\t"
DCACHE_CLEAR("%0", "r5", "%1")
M32R_LOCK" %0, @%1; \n\t"
"or %0, %2; \n\t"
M32R_UNLOCK" %0, @%1; \n\t"
: "=&r" (tmp)
: "r" (addr), "r" (mask)
: "memory"
__ATOMIC_CLOBBER
);
local_irq_restore(flags);
}
#endif /* _ASM_M32R_ATOMIC_H */ #endif /* _ASM_M32R_ATOMIC_H */
...@@ -156,7 +156,7 @@ void smp_flush_cache_all(void) ...@@ -156,7 +156,7 @@ void smp_flush_cache_all(void)
cpumask_clear_cpu(smp_processor_id(), &cpumask); cpumask_clear_cpu(smp_processor_id(), &cpumask);
spin_lock(&flushcache_lock); spin_lock(&flushcache_lock);
mask=cpumask_bits(&cpumask); mask=cpumask_bits(&cpumask);
atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); atomic_or(*mask, (atomic_t *)&flushcache_cpumask);
send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0); send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
_flush_cache_copyback_all(); _flush_cache_copyback_all();
while (flushcache_cpumask) while (flushcache_cpumask)
...@@ -407,7 +407,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, ...@@ -407,7 +407,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
flush_vma = vma; flush_vma = vma;
flush_va = va; flush_va = va;
mask=cpumask_bits(&cpumask); mask=cpumask_bits(&cpumask);
atomic_set_mask(*mask, (atomic_t *)&flush_cpumask); atomic_or(*mask, (atomic_t *)&flush_cpumask);
/* /*
* We have to send the IPI only to * We have to send the IPI only to
......
...@@ -77,6 +77,10 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \ ...@@ -77,6 +77,10 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
ATOMIC_OPS(add, +=, add) ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub) ATOMIC_OPS(sub, -=, sub)
ATOMIC_OP(and, &=, and)
ATOMIC_OP(or, |=, or)
ATOMIC_OP(xor, ^=, eor)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
...@@ -170,16 +174,6 @@ static inline int atomic_add_negative(int i, atomic_t *v) ...@@ -170,16 +174,6 @@ static inline int atomic_add_negative(int i, atomic_t *v)
return c != 0; return c != 0;
} }
static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
{
__asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask)));
}
static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
{
__asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
}
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
{ {
int c, old; int c, old;
......
...@@ -74,44 +74,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ...@@ -74,44 +74,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OPS(add) ATOMIC_OPS(add)
ATOMIC_OPS(sub) ATOMIC_OPS(sub)
ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
int temp;
asm volatile (
"1: LNKGETD %0, [%1]\n"
" AND %0, %0, %2\n"
" LNKSETD [%1] %0\n"
" DEFR %0, TXSTAT\n"
" ANDT %0, %0, #HI(0x3f000000)\n"
" CMPT %0, #HI(0x02000000)\n"
" BNZ 1b\n"
: "=&d" (temp)
: "da" (&v->counter), "bd" (~mask)
: "cc");
}
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
int temp;
asm volatile (
"1: LNKGETD %0, [%1]\n"
" OR %0, %0, %2\n"
" LNKSETD [%1], %0\n"
" DEFR %0, TXSTAT\n"
" ANDT %0, %0, #HI(0x3f000000)\n"
" CMPT %0, #HI(0x02000000)\n"
" BNZ 1b\n"
: "=&d" (temp)
: "da" (&v->counter), "bd" (mask)
: "cc");
}
static inline int atomic_cmpxchg(atomic_t *v, int old, int new) static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{ {
int result, temp; int result, temp;
......
...@@ -68,31 +68,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ...@@ -68,31 +68,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OPS(add, +=) ATOMIC_OPS(add, +=)
ATOMIC_OPS(sub, -=) ATOMIC_OPS(sub, -=)
ATOMIC_OP(and, &=)
ATOMIC_OP(or, |=)
ATOMIC_OP(xor, ^=)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
unsigned long flags;
__global_lock1(flags);
fence();
v->counter &= ~mask;
__global_unlock1(flags);
}
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
unsigned long flags;
__global_lock1(flags);
fence();
v->counter |= mask;
__global_unlock1(flags);
}
static inline int atomic_cmpxchg(atomic_t *v, int old, int new) static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{ {
int ret; int ret;
......
...@@ -137,6 +137,10 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ ...@@ -137,6 +137,10 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
ATOMIC_OPS(add, +=, addu) ATOMIC_OPS(add, +=, addu)
ATOMIC_OPS(sub, -=, subu) ATOMIC_OPS(sub, -=, subu)
ATOMIC_OP(and, &=, and)
ATOMIC_OP(or, |=, or)
ATOMIC_OP(xor, ^=, xor)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
...@@ -416,6 +420,9 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ ...@@ -416,6 +420,9 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
ATOMIC64_OPS(add, +=, daddu) ATOMIC64_OPS(add, +=, daddu)
ATOMIC64_OPS(sub, -=, dsubu) ATOMIC64_OPS(sub, -=, dsubu)
ATOMIC64_OP(and, &=, and)
ATOMIC64_OP(or, |=, or)
ATOMIC64_OP(xor, ^=, xor)
#undef ATOMIC64_OPS #undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP_RETURN
......
...@@ -89,6 +89,10 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ...@@ -89,6 +89,10 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OPS(add) ATOMIC_OPS(add)
ATOMIC_OPS(sub) ATOMIC_OPS(sub)
ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
...@@ -127,73 +131,6 @@ static inline void atomic_dec(atomic_t *v) ...@@ -127,73 +131,6 @@ static inline void atomic_dec(atomic_t *v)
#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
/**
* atomic_clear_mask - Atomically clear bits in memory
* @mask: Mask of the bits to be cleared
* @v: pointer to word in memory
*
* Atomically clears the bits set in mask from the memory word specified.
*/
static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
{
#ifdef CONFIG_SMP
int status;
asm volatile(
"1: mov %3,(_AAR,%2) \n"
" mov (_ADR,%2),%0 \n"
" and %4,%0 \n"
" mov %0,(_ADR,%2) \n"
" mov (_ADR,%2),%0 \n" /* flush */
" mov (_ASR,%2),%0 \n"
" or %0,%0 \n"
" bne 1b \n"
: "=&r"(status), "=m"(*addr)
: "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask)
: "memory", "cc");
#else
unsigned long flags;
mask = ~mask;
flags = arch_local_cli_save();
*addr &= mask;
arch_local_irq_restore(flags);
#endif
}
/**
* atomic_set_mask - Atomically set bits in memory
* @mask: Mask of the bits to be set
* @v: pointer to word in memory
*
* Atomically sets the bits set in mask from the memory word specified.
*/
static inline void atomic_set_mask(unsigned long mask, unsigned long *addr)
{
#ifdef CONFIG_SMP
int status;
asm volatile(
"1: mov %3,(_AAR,%2) \n"
" mov (_ADR,%2),%0 \n"
" or %4,%0 \n"
" mov %0,(_ADR,%2) \n"
" mov (_ADR,%2),%0 \n" /* flush */
" mov (_ASR,%2),%0 \n"
" or %0,%0 \n"
" bne 1b \n"
: "=&r"(status), "=m"(*addr)
: "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask)
: "memory", "cc");
#else
unsigned long flags;
flags = arch_local_cli_save();
*addr |= mask;
arch_local_irq_restore(flags);
#endif
}
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#endif /* _ASM_ATOMIC_H */ #endif /* _ASM_ATOMIC_H */
...@@ -119,7 +119,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, ...@@ -119,7 +119,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
flush_mm = mm; flush_mm = mm;
flush_va = va; flush_va = va;
#if NR_CPUS <= BITS_PER_LONG #if NR_CPUS <= BITS_PER_LONG
atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]); atomic_or(cpumask.bits[0], (atomic_t *)&flush_cpumask.bits[0]);
#else #else
#error Not supported. #error Not supported.
#endif #endif
......
...@@ -126,6 +126,10 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \ ...@@ -126,6 +126,10 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OPS(add, +=) ATOMIC_OPS(add, +=)
ATOMIC_OPS(sub, -=) ATOMIC_OPS(sub, -=)
ATOMIC_OP(and, &=)
ATOMIC_OP(or, |=)
ATOMIC_OP(xor, ^=)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
...@@ -185,6 +189,9 @@ static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \ ...@@ -185,6 +189,9 @@ static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
ATOMIC64_OPS(add, +=) ATOMIC64_OPS(add, +=)
ATOMIC64_OPS(sub, -=) ATOMIC64_OPS(sub, -=)
ATOMIC64_OP(and, &=)
ATOMIC64_OP(or, |=)
ATOMIC64_OP(xor, ^=)
#undef ATOMIC64_OPS #undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP_RETURN
......
...@@ -67,6 +67,10 @@ static __inline__ int atomic_##op##_return(int a, atomic_t *v) \ ...@@ -67,6 +67,10 @@ static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
ATOMIC_OPS(add, add) ATOMIC_OPS(add, add)
ATOMIC_OPS(sub, subf) ATOMIC_OPS(sub, subf)
ATOMIC_OP(and, and)
ATOMIC_OP(or, or)
ATOMIC_OP(xor, xor)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
...@@ -304,6 +308,9 @@ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \ ...@@ -304,6 +308,9 @@ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
ATOMIC64_OPS(add, add) ATOMIC64_OPS(add, add)
ATOMIC64_OPS(sub, subf) ATOMIC64_OPS(sub, subf)
ATOMIC64_OP(and, and)
ATOMIC64_OP(or, or)
ATOMIC64_OP(xor, xor)
#undef ATOMIC64_OPS #undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP_RETURN
......
...@@ -595,25 +595,6 @@ _GLOBAL(copy_page) ...@@ -595,25 +595,6 @@ _GLOBAL(copy_page)
li r11,4 li r11,4
b 2b b 2b
/*
* void atomic_clear_mask(atomic_t mask, atomic_t *addr)
* void atomic_set_mask(atomic_t mask, atomic_t *addr);
*/
_GLOBAL(atomic_clear_mask)
10: lwarx r5,0,r4
andc r5,r5,r3
PPC405_ERR77(0,r4)
stwcx. r5,0,r4
bne- 10b
blr
_GLOBAL(atomic_set_mask)
10: lwarx r5,0,r4
or r5,r5,r3
PPC405_ERR77(0,r4)
stwcx. r5,0,r4
bne- 10b
blr
/* /*
* Extended precision shifts. * Extended precision shifts.
* *
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#define __ATOMIC_OR "lao" #define __ATOMIC_OR "lao"
#define __ATOMIC_AND "lan" #define __ATOMIC_AND "lan"
#define __ATOMIC_ADD "laa" #define __ATOMIC_ADD "laa"
#define __ATOMIC_XOR "lax"
#define __ATOMIC_BARRIER "bcr 14,0\n" #define __ATOMIC_BARRIER "bcr 14,0\n"
#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \ #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
...@@ -49,6 +50,7 @@ ...@@ -49,6 +50,7 @@
#define __ATOMIC_OR "or" #define __ATOMIC_OR "or"
#define __ATOMIC_AND "nr" #define __ATOMIC_AND "nr"
#define __ATOMIC_ADD "ar" #define __ATOMIC_ADD "ar"
#define __ATOMIC_XOR "xr"
#define __ATOMIC_BARRIER "\n" #define __ATOMIC_BARRIER "\n"
#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \ #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
...@@ -118,15 +120,17 @@ static inline void atomic_add(int i, atomic_t *v) ...@@ -118,15 +120,17 @@ static inline void atomic_add(int i, atomic_t *v)
#define atomic_dec_return(_v) atomic_sub_return(1, _v) #define atomic_dec_return(_v) atomic_sub_return(1, _v)
#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) #define ATOMIC_OP(op, OP) \
{ static inline void atomic_##op(int i, atomic_t *v) \
__ATOMIC_LOOP(v, ~mask, __ATOMIC_AND, __ATOMIC_NO_BARRIER); { \
__ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER); \
} }
static inline void atomic_set_mask(unsigned int mask, atomic_t *v) ATOMIC_OP(and, AND)
{ ATOMIC_OP(or, OR)
__ATOMIC_LOOP(v, mask, __ATOMIC_OR, __ATOMIC_NO_BARRIER); ATOMIC_OP(xor, XOR)
}
#undef ATOMIC_OP
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
...@@ -167,6 +171,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -167,6 +171,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#define __ATOMIC64_OR "laog" #define __ATOMIC64_OR "laog"
#define __ATOMIC64_AND "lang" #define __ATOMIC64_AND "lang"
#define __ATOMIC64_ADD "laag" #define __ATOMIC64_ADD "laag"
#define __ATOMIC64_XOR "laxg"
#define __ATOMIC64_BARRIER "bcr 14,0\n" #define __ATOMIC64_BARRIER "bcr 14,0\n"
#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \ #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
...@@ -189,6 +194,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -189,6 +194,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#define __ATOMIC64_OR "ogr" #define __ATOMIC64_OR "ogr"
#define __ATOMIC64_AND "ngr" #define __ATOMIC64_AND "ngr"
#define __ATOMIC64_ADD "agr" #define __ATOMIC64_ADD "agr"
#define __ATOMIC64_XOR "xgr"
#define __ATOMIC64_BARRIER "\n" #define __ATOMIC64_BARRIER "\n"
#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \ #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
...@@ -247,16 +253,6 @@ static inline void atomic64_add(long long i, atomic64_t *v) ...@@ -247,16 +253,6 @@ static inline void atomic64_add(long long i, atomic64_t *v)
__ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER); __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
} }
static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
{
__ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND, __ATOMIC64_NO_BARRIER);
}
static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
{
__ATOMIC64_LOOP(v, mask, __ATOMIC64_OR, __ATOMIC64_NO_BARRIER);
}
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
static inline long long atomic64_cmpxchg(atomic64_t *v, static inline long long atomic64_cmpxchg(atomic64_t *v,
...@@ -270,6 +266,17 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, ...@@ -270,6 +266,17 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
return old; return old;
} }
#define ATOMIC64_OP(op, OP) \
static inline void atomic64_##op(long i, atomic64_t *v) \
{ \
__ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER); \
}
ATOMIC64_OP(and, AND)
ATOMIC64_OP(or, OR)
ATOMIC64_OP(xor, XOR)
#undef ATOMIC64_OP
#undef __ATOMIC64_LOOP #undef __ATOMIC64_LOOP
static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
......
...@@ -381,7 +381,7 @@ static void disable_sync_clock(void *dummy) ...@@ -381,7 +381,7 @@ static void disable_sync_clock(void *dummy)
* increase the "sequence" counter to avoid the race of an * increase the "sequence" counter to avoid the race of an
* etr event and the complete recovery against get_sync_clock. * etr event and the complete recovery against get_sync_clock.
*/ */
atomic_clear_mask(0x80000000, sw_ptr); atomic_andnot(0x80000000, sw_ptr);
atomic_inc(sw_ptr); atomic_inc(sw_ptr);
} }
...@@ -392,7 +392,7 @@ static void disable_sync_clock(void *dummy) ...@@ -392,7 +392,7 @@ static void disable_sync_clock(void *dummy)
static void enable_sync_clock(void) static void enable_sync_clock(void)
{ {
atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word); atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
atomic_set_mask(0x80000000, sw_ptr); atomic_or(0x80000000, sw_ptr);
} }
/* /*
......
...@@ -170,20 +170,20 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu) ...@@ -170,20 +170,20 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
static void __set_cpu_idle(struct kvm_vcpu *vcpu) static void __set_cpu_idle(struct kvm_vcpu *vcpu)
{ {
atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
} }
static void __unset_cpu_idle(struct kvm_vcpu *vcpu) static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
{ {
atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
} }
static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
{ {
atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
&vcpu->arch.sie_block->cpuflags); &vcpu->arch.sie_block->cpuflags);
vcpu->arch.sie_block->lctl = 0x0000; vcpu->arch.sie_block->lctl = 0x0000;
vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
...@@ -196,7 +196,7 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) ...@@ -196,7 +196,7 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
{ {
atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); atomic_or(flag, &vcpu->arch.sie_block->cpuflags);
} }
static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
...@@ -919,7 +919,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) ...@@ -919,7 +919,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
spin_unlock(&li->lock); spin_unlock(&li->lock);
/* clear pending external calls set by sigp interpretation facility */ /* clear pending external calls set by sigp interpretation facility */
atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags); atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0; vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0;
} }
...@@ -1020,7 +1020,7 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) ...@@ -1020,7 +1020,7 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
li->irq.ext = irq->u.ext; li->irq.ext = irq->u.ext;
set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
return 0; return 0;
} }
...@@ -1035,7 +1035,7 @@ static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id) ...@@ -1035,7 +1035,7 @@ static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id)
/* another external call is pending */ /* another external call is pending */
return -EBUSY; return -EBUSY;
} }
atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
return 0; return 0;
} }
...@@ -1061,7 +1061,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) ...@@ -1061,7 +1061,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
return -EBUSY; return -EBUSY;
*extcall = irq->u.extcall; *extcall = irq->u.extcall;
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
return 0; return 0;
} }
...@@ -1133,7 +1133,7 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, ...@@ -1133,7 +1133,7 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
set_bit(irq->u.emerg.code, li->sigp_emerg_pending); set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
return 0; return 0;
} }
...@@ -1177,7 +1177,7 @@ static int __inject_ckc(struct kvm_vcpu *vcpu) ...@@ -1177,7 +1177,7 @@ static int __inject_ckc(struct kvm_vcpu *vcpu)
0, 0, 2); 0, 0, 2);
set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
return 0; return 0;
} }
...@@ -1190,7 +1190,7 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu) ...@@ -1190,7 +1190,7 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
0, 0, 2); 0, 0, 2);
set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
return 0; return 0;
} }
...@@ -1369,13 +1369,13 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type) ...@@ -1369,13 +1369,13 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type)
spin_lock(&li->lock); spin_lock(&li->lock);
switch (type) { switch (type) {
case KVM_S390_MCHK: case KVM_S390_MCHK:
atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); atomic_or(CPUSTAT_STOP_INT, li->cpuflags);
break; break;
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags); atomic_or(CPUSTAT_IO_INT, li->cpuflags);
break; break;
default: default:
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
break; break;
} }
spin_unlock(&li->lock); spin_unlock(&li->lock);
......
...@@ -1215,12 +1215,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -1215,12 +1215,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
} }
restore_access_regs(vcpu->run->s.regs.acrs); restore_access_regs(vcpu->run->s.regs.acrs);
gmap_enable(vcpu->arch.gmap); gmap_enable(vcpu->arch.gmap);
atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
} }
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{ {
atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
gmap_disable(vcpu->arch.gmap); gmap_disable(vcpu->arch.gmap);
if (test_kvm_facility(vcpu->kvm, 129)) { if (test_kvm_facility(vcpu->kvm, 129)) {
save_fp_ctl(&vcpu->run->s.regs.fpc); save_fp_ctl(&vcpu->run->s.regs.fpc);
...@@ -1320,9 +1320,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -1320,9 +1320,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
CPUSTAT_STOPPED); CPUSTAT_STOPPED);
if (test_kvm_facility(vcpu->kvm, 78)) if (test_kvm_facility(vcpu->kvm, 78))
atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags); atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
else if (test_kvm_facility(vcpu->kvm, 8)) else if (test_kvm_facility(vcpu->kvm, 8))
atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags); atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
kvm_s390_vcpu_setup_model(vcpu); kvm_s390_vcpu_setup_model(vcpu);
...@@ -1422,24 +1422,24 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) ...@@ -1422,24 +1422,24 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu) void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
{ {
atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
exit_sie(vcpu); exit_sie(vcpu);
} }
void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu) void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
{ {
atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
} }
static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu) static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
{ {
atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20); atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
exit_sie(vcpu); exit_sie(vcpu);
} }
static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
{ {
atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20); atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
} }
/* /*
...@@ -1448,7 +1448,7 @@ static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) ...@@ -1448,7 +1448,7 @@ static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
* return immediately. */ * return immediately. */
void exit_sie(struct kvm_vcpu *vcpu) void exit_sie(struct kvm_vcpu *vcpu)
{ {
atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
cpu_relax(); cpu_relax();
} }
...@@ -1672,19 +1672,19 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, ...@@ -1672,19 +1672,19 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
if (dbg->control & KVM_GUESTDBG_ENABLE) { if (dbg->control & KVM_GUESTDBG_ENABLE) {
vcpu->guest_debug = dbg->control; vcpu->guest_debug = dbg->control;
/* enforce guest PER */ /* enforce guest PER */
atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
if (dbg->control & KVM_GUESTDBG_USE_HW_BP) if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
rc = kvm_s390_import_bp_data(vcpu, dbg); rc = kvm_s390_import_bp_data(vcpu, dbg);
} else { } else {
atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
vcpu->arch.guestdbg.last_bp = 0; vcpu->arch.guestdbg.last_bp = 0;
} }
if (rc) { if (rc) {
vcpu->guest_debug = 0; vcpu->guest_debug = 0;
kvm_s390_clear_bp_data(vcpu); kvm_s390_clear_bp_data(vcpu);
atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
} }
return rc; return rc;
...@@ -1771,7 +1771,7 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) ...@@ -1771,7 +1771,7 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
if (!ibs_enabled(vcpu)) { if (!ibs_enabled(vcpu)) {
trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
atomic_set_mask(CPUSTAT_IBS, atomic_or(CPUSTAT_IBS,
&vcpu->arch.sie_block->cpuflags); &vcpu->arch.sie_block->cpuflags);
} }
goto retry; goto retry;
...@@ -1780,7 +1780,7 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) ...@@ -1780,7 +1780,7 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) { if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
if (ibs_enabled(vcpu)) { if (ibs_enabled(vcpu)) {
trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
atomic_clear_mask(CPUSTAT_IBS, atomic_andnot(CPUSTAT_IBS,
&vcpu->arch.sie_block->cpuflags); &vcpu->arch.sie_block->cpuflags);
} }
goto retry; goto retry;
...@@ -2280,7 +2280,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) ...@@ -2280,7 +2280,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
__disable_ibs_on_all_vcpus(vcpu->kvm); __disable_ibs_on_all_vcpus(vcpu->kvm);
} }
atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
/* /*
* Another VCPU might have used IBS while we were offline. * Another VCPU might have used IBS while we were offline.
* Let's play safe and flush the VCPU at startup. * Let's play safe and flush the VCPU at startup.
...@@ -2306,7 +2306,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) ...@@ -2306,7 +2306,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
kvm_s390_clear_stop_irq(vcpu); kvm_s390_clear_stop_irq(vcpu);
atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
__disable_ibs_on_vcpu(vcpu); __disable_ibs_on_vcpu(vcpu);
for (i = 0; i < online_vcpus; i++) { for (i = 0; i < online_vcpus; i++) {
......
...@@ -48,47 +48,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ...@@ -48,47 +48,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OPS(add) ATOMIC_OPS(add)
ATOMIC_OPS(sub) ATOMIC_OPS(sub)
ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
int tmp;
unsigned int _mask = ~mask;
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */
" mov.l @%1, %0 \n\t" /* load old value */
" and %2, %0 \n\t" /* add */
" mov.l %0, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (tmp),
"+r" (v)
: "r" (_mask)
: "memory" , "r0", "r1");
}
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
int tmp;
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */
" mov.l @%1, %0 \n\t" /* load old value */
" or %2, %0 \n\t" /* or */
" mov.l %0, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (tmp),
"+r" (v)
: "r" (mask)
: "memory" , "r0", "r1");
}
#endif /* __ASM_SH_ATOMIC_GRB_H */ #endif /* __ASM_SH_ATOMIC_GRB_H */
...@@ -37,27 +37,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ...@@ -37,27 +37,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OPS(add, +=) ATOMIC_OPS(add, +=)
ATOMIC_OPS(sub, -=) ATOMIC_OPS(sub, -=)
ATOMIC_OP(and, &=)
ATOMIC_OP(or, |=)
ATOMIC_OP(xor, ^=)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
unsigned long flags;
raw_local_irq_save(flags);
v->counter &= ~mask;
raw_local_irq_restore(flags);
}
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
unsigned long flags;
raw_local_irq_save(flags);
v->counter |= mask;
raw_local_irq_restore(flags);
}
#endif /* __ASM_SH_ATOMIC_IRQ_H */ #endif /* __ASM_SH_ATOMIC_IRQ_H */
...@@ -52,37 +52,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ ...@@ -52,37 +52,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
ATOMIC_OPS(add) ATOMIC_OPS(add)
ATOMIC_OPS(sub) ATOMIC_OPS(sub)
ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
unsigned long tmp;
__asm__ __volatile__ (
"1: movli.l @%2, %0 ! atomic_clear_mask \n"
" and %1, %0 \n"
" movco.l %0, @%2 \n"
" bf 1b \n"
: "=&z" (tmp)
: "r" (~mask), "r" (&v->counter)
: "t");
}
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
unsigned long tmp;
__asm__ __volatile__ (
"1: movli.l @%2, %0 ! atomic_set_mask \n"
" or %1, %0 \n"
" movco.l %0, @%2 \n"
" bf 1b \n"
: "=&z" (tmp)
: "r" (mask), "r" (&v->counter)
: "t");
}
#endif /* __ASM_SH_ATOMIC_LLSC_H */ #endif /* __ASM_SH_ATOMIC_LLSC_H */
...@@ -17,10 +17,12 @@ ...@@ -17,10 +17,12 @@
#include <asm/barrier.h> #include <asm/barrier.h>
#include <asm-generic/atomic64.h> #include <asm-generic/atomic64.h>
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
int atomic_add_return(int, atomic_t *); int atomic_add_return(int, atomic_t *);
void atomic_and(int, atomic_t *);
void atomic_or(int, atomic_t *);
void atomic_xor(int, atomic_t *);
int atomic_cmpxchg(atomic_t *, int, int); int atomic_cmpxchg(atomic_t *, int, int);
int atomic_xchg(atomic_t *, int); int atomic_xchg(atomic_t *, int);
int __atomic_add_unless(atomic_t *, int, int); int __atomic_add_unless(atomic_t *, int, int);
......
...@@ -33,6 +33,10 @@ long atomic64_##op##_return(long, atomic64_t *); ...@@ -33,6 +33,10 @@ long atomic64_##op##_return(long, atomic64_t *);
ATOMIC_OPS(add) ATOMIC_OPS(add)
ATOMIC_OPS(sub) ATOMIC_OPS(sub)
ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
......
...@@ -27,22 +27,38 @@ static DEFINE_SPINLOCK(dummy); ...@@ -27,22 +27,38 @@ static DEFINE_SPINLOCK(dummy);
#endif /* SMP */ #endif /* SMP */
#define ATOMIC_OP(op, cop) \ #define ATOMIC_OP_RETURN(op, c_op) \
int atomic_##op##_return(int i, atomic_t *v) \ int atomic_##op##_return(int i, atomic_t *v) \
{ \ { \
int ret; \ int ret; \
unsigned long flags; \ unsigned long flags; \
spin_lock_irqsave(ATOMIC_HASH(v), flags); \ spin_lock_irqsave(ATOMIC_HASH(v), flags); \
\ \
ret = (v->counter cop i); \ ret = (v->counter c_op i); \
\ \
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \ spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
return ret; \ return ret; \
} \ } \
EXPORT_SYMBOL(atomic_##op##_return); EXPORT_SYMBOL(atomic_##op##_return);
ATOMIC_OP(add, +=) #define ATOMIC_OP(op, c_op) \
void atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
spin_lock_irqsave(ATOMIC_HASH(v), flags); \
\
v->counter c_op i; \
\
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
} \
EXPORT_SYMBOL(atomic_##op);
ATOMIC_OP_RETURN(add, +=)
ATOMIC_OP(and, &=)
ATOMIC_OP(or, |=)
ATOMIC_OP(xor, ^=)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
int atomic_xchg(atomic_t *v, int new) int atomic_xchg(atomic_t *v, int new)
......
...@@ -47,6 +47,9 @@ ENDPROC(atomic_##op##_return); ...@@ -47,6 +47,9 @@ ENDPROC(atomic_##op##_return);
ATOMIC_OPS(add) ATOMIC_OPS(add)
ATOMIC_OPS(sub) ATOMIC_OPS(sub)
ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
...@@ -84,6 +87,9 @@ ENDPROC(atomic64_##op##_return); ...@@ -84,6 +87,9 @@ ENDPROC(atomic64_##op##_return);
ATOMIC64_OPS(add) ATOMIC64_OPS(add)
ATOMIC64_OPS(sub) ATOMIC64_OPS(sub)
ATOMIC64_OP(and)
ATOMIC64_OP(or)
ATOMIC64_OP(xor)
#undef ATOMIC64_OPS #undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP_RETURN
......
...@@ -111,6 +111,9 @@ EXPORT_SYMBOL(atomic64_##op##_return); ...@@ -111,6 +111,9 @@ EXPORT_SYMBOL(atomic64_##op##_return);
ATOMIC_OPS(add) ATOMIC_OPS(add)
ATOMIC_OPS(sub) ATOMIC_OPS(sub)
ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
......
...@@ -34,6 +34,19 @@ static inline void atomic_add(int i, atomic_t *v) ...@@ -34,6 +34,19 @@ static inline void atomic_add(int i, atomic_t *v)
_atomic_xchg_add(&v->counter, i); _atomic_xchg_add(&v->counter, i);
} }
#define ATOMIC_OP(op) \
unsigned long _atomic_##op(volatile unsigned long *p, unsigned long mask); \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
_atomic_##op((unsigned long *)&v->counter, i); \
}
ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)
#undef ATOMIC_OP
/** /**
* atomic_add_return - add integer and return * atomic_add_return - add integer and return
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
...@@ -113,6 +126,17 @@ static inline void atomic64_add(long long i, atomic64_t *v) ...@@ -113,6 +126,17 @@ static inline void atomic64_add(long long i, atomic64_t *v)
_atomic64_xchg_add(&v->counter, i); _atomic64_xchg_add(&v->counter, i);
} }
#define ATOMIC64_OP(op) \
long long _atomic64_##op(long long *v, long long n); \
static inline void atomic64_##op(long long i, atomic64_t *v) \
{ \
_atomic64_##op(&v->counter, i); \
}
ATOMIC64_OP(and)
ATOMIC64_OP(or)
ATOMIC64_OP(xor)
/** /**
* atomic64_add_return - add integer and return * atomic64_add_return - add integer and return
* @v: pointer of type atomic64_t * @v: pointer of type atomic64_t
...@@ -225,6 +249,7 @@ extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n); ...@@ -225,6 +249,7 @@ extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n);
extern struct __get_user __atomic_xchg_add_unless(volatile int *p, extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
int *lock, int o, int n); int *lock, int o, int n);
extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
extern struct __get_user __atomic_and(volatile int *p, int *lock, int n);
extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
extern long long __atomic64_cmpxchg(volatile long long *p, int *lock, extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
...@@ -234,6 +259,9 @@ extern long long __atomic64_xchg_add(volatile long long *p, int *lock, ...@@ -234,6 +259,9 @@ extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
long long n); long long n);
extern long long __atomic64_xchg_add_unless(volatile long long *p, extern long long __atomic64_xchg_add_unless(volatile long long *p,
int *lock, long long o, long long n); int *lock, long long o, long long n);
extern long long __atomic64_and(volatile long long *p, int *lock, long long n);
extern long long __atomic64_or(volatile long long *p, int *lock, long long n);
extern long long __atomic64_xor(volatile long long *p, int *lock, long long n);
/* Return failure from the atomic wrappers. */ /* Return failure from the atomic wrappers. */
struct __get_user __atomic_bad_address(int __user *addr); struct __get_user __atomic_bad_address(int __user *addr);
......
...@@ -58,6 +58,26 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -58,6 +58,26 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
return oldval; return oldval;
} }
static inline void atomic_and(int i, atomic_t *v)
{
__insn_fetchand4((void *)&v->counter, i);
}
static inline void atomic_or(int i, atomic_t *v)
{
__insn_fetchor4((void *)&v->counter, i);
}
static inline void atomic_xor(int i, atomic_t *v)
{
int guess, oldval = v->counter;
do {
guess = oldval;
__insn_mtspr(SPR_CMPEXCH_VALUE, guess);
oldval = __insn_cmpexch4(&v->counter, guess ^ i);
} while (guess != oldval);
}
/* Now the true 64-bit operations. */ /* Now the true 64-bit operations. */
#define ATOMIC64_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) }
...@@ -91,6 +111,26 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u) ...@@ -91,6 +111,26 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
return oldval != u; return oldval != u;
} }
static inline void atomic64_and(long i, atomic64_t *v)
{
__insn_fetchand((void *)&v->counter, i);
}
static inline void atomic64_or(long i, atomic64_t *v)
{
__insn_fetchor((void *)&v->counter, i);
}
static inline void atomic64_xor(long i, atomic64_t *v)
{
long guess, oldval = v->counter;
do {
guess = oldval;
__insn_mtspr(SPR_CMPEXCH_VALUE, guess);
oldval = __insn_cmpexch(&v->counter, guess ^ i);
} while (guess != oldval);
}
#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v)) #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
#define atomic64_sub(i, v) atomic64_add(-(i), (v)) #define atomic64_sub(i, v) atomic64_add(-(i), (v))
#define atomic64_inc_return(v) atomic64_add_return(1, (v)) #define atomic64_inc_return(v) atomic64_add_return(1, (v))
......
...@@ -94,6 +94,12 @@ unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask) ...@@ -94,6 +94,12 @@ unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask)
} }
EXPORT_SYMBOL(_atomic_or); EXPORT_SYMBOL(_atomic_or);
unsigned long _atomic_and(volatile unsigned long *p, unsigned long mask)
{
return __atomic_and((int *)p, __atomic_setup(p), mask).val;
}
EXPORT_SYMBOL(_atomic_and);
unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask) unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask)
{ {
return __atomic_andn((int *)p, __atomic_setup(p), mask).val; return __atomic_andn((int *)p, __atomic_setup(p), mask).val;
...@@ -136,6 +142,23 @@ long long _atomic64_cmpxchg(long long *v, long long o, long long n) ...@@ -136,6 +142,23 @@ long long _atomic64_cmpxchg(long long *v, long long o, long long n)
} }
EXPORT_SYMBOL(_atomic64_cmpxchg); EXPORT_SYMBOL(_atomic64_cmpxchg);
long long _atomic64_and(long long *v, long long n)
{
return __atomic64_and(v, __atomic_setup(v), n);
}
EXPORT_SYMBOL(_atomic64_and);
long long _atomic64_or(long long *v, long long n)
{
return __atomic64_or(v, __atomic_setup(v), n);
}
EXPORT_SYMBOL(_atomic64_or);
long long _atomic64_xor(long long *v, long long n)
{
return __atomic64_xor(v, __atomic_setup(v), n);
}
EXPORT_SYMBOL(_atomic64_xor);
/* /*
* If any of the atomic or futex routines hit a bad address (not in * If any of the atomic or futex routines hit a bad address (not in
......
...@@ -178,6 +178,7 @@ atomic_op _xchg_add, 32, "add r24, r22, r2" ...@@ -178,6 +178,7 @@ atomic_op _xchg_add, 32, "add r24, r22, r2"
atomic_op _xchg_add_unless, 32, \ atomic_op _xchg_add_unless, 32, \
"sne r26, r22, r2; { bbns r26, 3f; add r24, r22, r3 }" "sne r26, r22, r2; { bbns r26, 3f; add r24, r22, r3 }"
atomic_op _or, 32, "or r24, r22, r2" atomic_op _or, 32, "or r24, r22, r2"
atomic_op _and, 32, "and r24, r22, r2"
atomic_op _andn, 32, "nor r2, r2, zero; and r24, r22, r2" atomic_op _andn, 32, "nor r2, r2, zero; and r24, r22, r2"
atomic_op _xor, 32, "xor r24, r22, r2" atomic_op _xor, 32, "xor r24, r22, r2"
...@@ -191,6 +192,9 @@ atomic_op 64_xchg_add_unless, 64, \ ...@@ -191,6 +192,9 @@ atomic_op 64_xchg_add_unless, 64, \
{ bbns r26, 3f; add r24, r22, r4 }; \ { bbns r26, 3f; add r24, r22, r4 }; \
{ bbns r27, 3f; add r25, r23, r5 }; \ { bbns r27, 3f; add r25, r23, r5 }; \
slt_u r26, r24, r22; add r25, r25, r26" slt_u r26, r24, r22; add r25, r25, r26"
atomic_op 64_or, 64, "{ or r24, r22, r2; or r25, r23, r3 }"
atomic_op 64_and, 64, "{ and r24, r22, r2; and r25, r23, r3 }"
atomic_op 64_xor, 64, "{ xor r24, r22, r2; xor r25, r23, r3 }"
jrp lr /* happy backtracer */ jrp lr /* happy backtracer */
......
...@@ -182,6 +182,21 @@ static inline int atomic_xchg(atomic_t *v, int new) ...@@ -182,6 +182,21 @@ static inline int atomic_xchg(atomic_t *v, int new)
return xchg(&v->counter, new); return xchg(&v->counter, new);
} }
#define ATOMIC_OP(op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
asm volatile(LOCK_PREFIX #op"l %1,%0" \
: "+m" (v->counter) \
: "ir" (i) \
: "memory"); \
}
ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)
#undef ATOMIC_OP
/** /**
* __atomic_add_unless - add unless the number is already a given value * __atomic_add_unless - add unless the number is already a given value
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
...@@ -219,16 +234,6 @@ static __always_inline short int atomic_inc_short(short int *v) ...@@ -219,16 +234,6 @@ static __always_inline short int atomic_inc_short(short int *v)
return *v; return *v;
} }
/* These are x86-specific, used by some header files */
#define atomic_clear_mask(mask, addr) \
asm volatile(LOCK_PREFIX "andl %0,%1" \
: : "r" (~(mask)), "m" (*(addr)) : "memory")
#define atomic_set_mask(mask, addr) \
asm volatile(LOCK_PREFIX "orl %0,%1" \
: : "r" ((unsigned)(mask)), "m" (*(addr)) \
: "memory")
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
# include <asm/atomic64_32.h> # include <asm/atomic64_32.h>
#else #else
......
...@@ -313,4 +313,18 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) ...@@ -313,4 +313,18 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
#undef alternative_atomic64 #undef alternative_atomic64
#undef __alternative_atomic64 #undef __alternative_atomic64
#define ATOMIC64_OP(op, c_op) \
static inline void atomic64_##op(long long i, atomic64_t *v) \
{ \
long long old, c = 0; \
while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c) \
c = old; \
}
ATOMIC64_OP(and, &)
ATOMIC64_OP(or, |)
ATOMIC64_OP(xor, ^)
#undef ATOMIC64_OP
#endif /* _ASM_X86_ATOMIC64_32_H */ #endif /* _ASM_X86_ATOMIC64_32_H */
...@@ -220,4 +220,19 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) ...@@ -220,4 +220,19 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
return dec; return dec;
} }
#define ATOMIC64_OP(op) \
static inline void atomic64_##op(long i, atomic64_t *v) \
{ \
asm volatile(LOCK_PREFIX #op"q %1,%0" \
: "+m" (v->counter) \
: "er" (i) \
: "memory"); \
}
ATOMIC64_OP(and)
ATOMIC64_OP(or)
ATOMIC64_OP(xor)
#undef ATOMIC64_OP
#endif /* _ASM_X86_ATOMIC64_64_H */ #endif /* _ASM_X86_ATOMIC64_64_H */
...@@ -145,6 +145,10 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \ ...@@ -145,6 +145,10 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
ATOMIC_OPS(add) ATOMIC_OPS(add)
ATOMIC_OPS(sub) ATOMIC_OPS(sub)
ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
...@@ -250,75 +254,6 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -250,75 +254,6 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
return c; return c;
} }
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
#if XCHAL_HAVE_S32C1I
unsigned long tmp;
int result;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" and %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (result), "=&a" (tmp)
: "a" (~mask), "a" (v)
: "memory"
);
#else
unsigned int all_f = -1;
unsigned int vval;
__asm__ __volatile__(
" rsil a15,"__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" xor %1, %4, %3\n"
" and %0, %0, %4\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval), "=a" (mask)
: "a" (v), "a" (all_f), "1" (mask)
: "a15", "memory"
);
#endif
}
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
#if XCHAL_HAVE_S32C1I
unsigned long tmp;
int result;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" or %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (result), "=&a" (tmp)
: "a" (mask), "a" (v)
: "memory"
);
#else
unsigned int vval;
__asm__ __volatile__(
" rsil a15,"__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" or %0, %0, %1\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval)
: "a" (mask), "a" (v)
: "a15", "memory"
);
#endif
}
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _XTENSA_ATOMIC_H */ #endif /* _XTENSA_ATOMIC_H */
...@@ -748,7 +748,7 @@ static int i915_drm_resume(struct drm_device *dev) ...@@ -748,7 +748,7 @@ static int i915_drm_resume(struct drm_device *dev)
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (i915_gem_init_hw(dev)) { if (i915_gem_init_hw(dev)) {
DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter); atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
} }
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
......
...@@ -5091,7 +5091,7 @@ int i915_gem_init(struct drm_device *dev) ...@@ -5091,7 +5091,7 @@ int i915_gem_init(struct drm_device *dev)
* for all other failure, such as an allocation failure, bail. * for all other failure, such as an allocation failure, bail.
*/ */
DRM_ERROR("Failed to initialize GPU, declaring it wedged\n"); DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter); atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
ret = 0; ret = 0;
} }
......
...@@ -2446,7 +2446,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev) ...@@ -2446,7 +2446,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
kobject_uevent_env(&dev->primary->kdev->kobj, kobject_uevent_env(&dev->primary->kdev->kobj,
KOBJ_CHANGE, reset_done_event); KOBJ_CHANGE, reset_done_event);
} else { } else {
atomic_set_mask(I915_WEDGED, &error->reset_counter); atomic_or(I915_WEDGED, &error->reset_counter);
} }
/* /*
...@@ -2574,7 +2574,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged, ...@@ -2574,7 +2574,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
i915_report_and_clear_eir(dev); i915_report_and_clear_eir(dev);
if (wedged) { if (wedged) {
atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, atomic_or(I915_RESET_IN_PROGRESS_FLAG,
&dev_priv->gpu_error.reset_counter); &dev_priv->gpu_error.reset_counter);
/* /*
......
...@@ -529,7 +529,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, ...@@ -529,7 +529,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
list_add_tail(&port->list, &adapter->port_list); list_add_tail(&port->list, &adapter->port_list);
write_unlock_irq(&adapter->port_list_lock); write_unlock_irq(&adapter->port_list_lock);
atomic_set_mask(status | ZFCP_STATUS_COMMON_RUNNING, &port->status); atomic_or(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);
return port; return port;
......
...@@ -190,7 +190,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, ...@@ -190,7 +190,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
if (!(act_status & ZFCP_STATUS_ERP_NO_REF)) if (!(act_status & ZFCP_STATUS_ERP_NO_REF))
if (scsi_device_get(sdev)) if (scsi_device_get(sdev))
return NULL; return NULL;
atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE,
&zfcp_sdev->status); &zfcp_sdev->status);
erp_action = &zfcp_sdev->erp_action; erp_action = &zfcp_sdev->erp_action;
memset(erp_action, 0, sizeof(struct zfcp_erp_action)); memset(erp_action, 0, sizeof(struct zfcp_erp_action));
...@@ -206,7 +206,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, ...@@ -206,7 +206,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
if (!get_device(&port->dev)) if (!get_device(&port->dev))
return NULL; return NULL;
zfcp_erp_action_dismiss_port(port); zfcp_erp_action_dismiss_port(port);
atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
erp_action = &port->erp_action; erp_action = &port->erp_action;
memset(erp_action, 0, sizeof(struct zfcp_erp_action)); memset(erp_action, 0, sizeof(struct zfcp_erp_action));
erp_action->port = port; erp_action->port = port;
...@@ -217,7 +217,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, ...@@ -217,7 +217,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
case ZFCP_ERP_ACTION_REOPEN_ADAPTER: case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
kref_get(&adapter->ref); kref_get(&adapter->ref);
zfcp_erp_action_dismiss_adapter(adapter); zfcp_erp_action_dismiss_adapter(adapter);
atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status); atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
erp_action = &adapter->erp_action; erp_action = &adapter->erp_action;
memset(erp_action, 0, sizeof(struct zfcp_erp_action)); memset(erp_action, 0, sizeof(struct zfcp_erp_action));
if (!(atomic_read(&adapter->status) & if (!(atomic_read(&adapter->status) &
...@@ -254,7 +254,7 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, ...@@ -254,7 +254,7 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev); act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev);
if (!act) if (!act)
goto out; goto out;
atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status); atomic_or(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
++adapter->erp_total_count; ++adapter->erp_total_count;
list_add_tail(&act->list, &adapter->erp_ready_head); list_add_tail(&act->list, &adapter->erp_ready_head);
wake_up(&adapter->erp_ready_wq); wake_up(&adapter->erp_ready_wq);
...@@ -486,14 +486,14 @@ static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) ...@@ -486,14 +486,14 @@ static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
{ {
if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status)) if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status))
zfcp_dbf_rec_run("eraubl1", &adapter->erp_action); zfcp_dbf_rec_run("eraubl1", &adapter->erp_action);
atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
} }
static void zfcp_erp_port_unblock(struct zfcp_port *port) static void zfcp_erp_port_unblock(struct zfcp_port *port)
{ {
if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status)) if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status))
zfcp_dbf_rec_run("erpubl1", &port->erp_action); zfcp_dbf_rec_run("erpubl1", &port->erp_action);
atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status); atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
} }
static void zfcp_erp_lun_unblock(struct scsi_device *sdev) static void zfcp_erp_lun_unblock(struct scsi_device *sdev)
...@@ -502,7 +502,7 @@ static void zfcp_erp_lun_unblock(struct scsi_device *sdev) ...@@ -502,7 +502,7 @@ static void zfcp_erp_lun_unblock(struct scsi_device *sdev)
if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status)) if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status))
zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action); zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action);
atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status); atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status);
} }
static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action) static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
...@@ -642,7 +642,7 @@ static void zfcp_erp_wakeup(struct zfcp_adapter *adapter) ...@@ -642,7 +642,7 @@ static void zfcp_erp_wakeup(struct zfcp_adapter *adapter)
read_lock_irqsave(&adapter->erp_lock, flags); read_lock_irqsave(&adapter->erp_lock, flags);
if (list_empty(&adapter->erp_ready_head) && if (list_empty(&adapter->erp_ready_head) &&
list_empty(&adapter->erp_running_head)) { list_empty(&adapter->erp_running_head)) {
atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, atomic_andnot(ZFCP_STATUS_ADAPTER_ERP_PENDING,
&adapter->status); &adapter->status);
wake_up(&adapter->erp_done_wqh); wake_up(&adapter->erp_done_wqh);
} }
...@@ -665,16 +665,16 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action) ...@@ -665,16 +665,16 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
int sleep = 1; int sleep = 1;
struct zfcp_adapter *adapter = erp_action->adapter; struct zfcp_adapter *adapter = erp_action->adapter;
atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status); atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status);
for (retries = 7; retries; retries--) { for (retries = 7; retries; retries--) {
atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
&adapter->status); &adapter->status);
write_lock_irq(&adapter->erp_lock); write_lock_irq(&adapter->erp_lock);
zfcp_erp_action_to_running(erp_action); zfcp_erp_action_to_running(erp_action);
write_unlock_irq(&adapter->erp_lock); write_unlock_irq(&adapter->erp_lock);
if (zfcp_fsf_exchange_config_data(erp_action)) { if (zfcp_fsf_exchange_config_data(erp_action)) {
atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
&adapter->status); &adapter->status);
return ZFCP_ERP_FAILED; return ZFCP_ERP_FAILED;
} }
...@@ -692,7 +692,7 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action) ...@@ -692,7 +692,7 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action)
sleep *= 2; sleep *= 2;
} }
atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
&adapter->status); &adapter->status);
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_XCONFIG_OK)) if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_XCONFIG_OK))
...@@ -764,7 +764,7 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act) ...@@ -764,7 +764,7 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act)
/* all ports and LUNs are closed */ /* all ports and LUNs are closed */
zfcp_erp_clear_adapter_status(adapter, ZFCP_STATUS_COMMON_OPEN); zfcp_erp_clear_adapter_status(adapter, ZFCP_STATUS_COMMON_OPEN);
atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
} }
...@@ -773,7 +773,7 @@ static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act) ...@@ -773,7 +773,7 @@ static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act)
struct zfcp_adapter *adapter = act->adapter; struct zfcp_adapter *adapter = act->adapter;
if (zfcp_qdio_open(adapter->qdio)) { if (zfcp_qdio_open(adapter->qdio)) {
atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
&adapter->status); &adapter->status);
return ZFCP_ERP_FAILED; return ZFCP_ERP_FAILED;
...@@ -784,7 +784,7 @@ static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act) ...@@ -784,7 +784,7 @@ static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act)
return ZFCP_ERP_FAILED; return ZFCP_ERP_FAILED;
} }
atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &adapter->status); atomic_or(ZFCP_STATUS_COMMON_OPEN, &adapter->status);
return ZFCP_ERP_SUCCEEDED; return ZFCP_ERP_SUCCEEDED;
} }
...@@ -948,7 +948,7 @@ static void zfcp_erp_lun_strategy_clearstati(struct scsi_device *sdev) ...@@ -948,7 +948,7 @@ static void zfcp_erp_lun_strategy_clearstati(struct scsi_device *sdev)
{ {
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED, atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED,
&zfcp_sdev->status); &zfcp_sdev->status);
} }
...@@ -1187,18 +1187,18 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action) ...@@ -1187,18 +1187,18 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
switch (erp_action->action) { switch (erp_action->action) {
case ZFCP_ERP_ACTION_REOPEN_LUN: case ZFCP_ERP_ACTION_REOPEN_LUN:
zfcp_sdev = sdev_to_zfcp(erp_action->sdev); zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE,
&zfcp_sdev->status); &zfcp_sdev->status);
break; break;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
case ZFCP_ERP_ACTION_REOPEN_PORT: case ZFCP_ERP_ACTION_REOPEN_PORT:
atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE,
&erp_action->port->status); &erp_action->port->status);
break; break;
case ZFCP_ERP_ACTION_REOPEN_ADAPTER: case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE,
&erp_action->adapter->status); &erp_action->adapter->status);
break; break;
} }
...@@ -1422,19 +1422,19 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask) ...@@ -1422,19 +1422,19 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
unsigned long flags; unsigned long flags;
u32 common_mask = mask & ZFCP_COMMON_FLAGS; u32 common_mask = mask & ZFCP_COMMON_FLAGS;
atomic_set_mask(mask, &adapter->status); atomic_or(mask, &adapter->status);
if (!common_mask) if (!common_mask)
return; return;
read_lock_irqsave(&adapter->port_list_lock, flags); read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list) list_for_each_entry(port, &adapter->port_list, list)
atomic_set_mask(common_mask, &port->status); atomic_or(common_mask, &port->status);
read_unlock_irqrestore(&adapter->port_list_lock, flags); read_unlock_irqrestore(&adapter->port_list_lock, flags);
spin_lock_irqsave(adapter->scsi_host->host_lock, flags); spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, adapter->scsi_host) __shost_for_each_device(sdev, adapter->scsi_host)
atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status); atomic_or(common_mask, &sdev_to_zfcp(sdev)->status);
spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags); spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
} }
...@@ -1453,7 +1453,7 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask) ...@@ -1453,7 +1453,7 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
u32 common_mask = mask & ZFCP_COMMON_FLAGS; u32 common_mask = mask & ZFCP_COMMON_FLAGS;
u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
atomic_clear_mask(mask, &adapter->status); atomic_andnot(mask, &adapter->status);
if (!common_mask) if (!common_mask)
return; return;
...@@ -1463,7 +1463,7 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask) ...@@ -1463,7 +1463,7 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
read_lock_irqsave(&adapter->port_list_lock, flags); read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list) { list_for_each_entry(port, &adapter->port_list, list) {
atomic_clear_mask(common_mask, &port->status); atomic_andnot(common_mask, &port->status);
if (clear_counter) if (clear_counter)
atomic_set(&port->erp_counter, 0); atomic_set(&port->erp_counter, 0);
} }
...@@ -1471,7 +1471,7 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask) ...@@ -1471,7 +1471,7 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
spin_lock_irqsave(adapter->scsi_host->host_lock, flags); spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, adapter->scsi_host) { __shost_for_each_device(sdev, adapter->scsi_host) {
atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status); atomic_andnot(common_mask, &sdev_to_zfcp(sdev)->status);
if (clear_counter) if (clear_counter)
atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
} }
...@@ -1491,7 +1491,7 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask) ...@@ -1491,7 +1491,7 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
u32 common_mask = mask & ZFCP_COMMON_FLAGS; u32 common_mask = mask & ZFCP_COMMON_FLAGS;
unsigned long flags; unsigned long flags;
atomic_set_mask(mask, &port->status); atomic_or(mask, &port->status);
if (!common_mask) if (!common_mask)
return; return;
...@@ -1499,7 +1499,7 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask) ...@@ -1499,7 +1499,7 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags); spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, port->adapter->scsi_host) __shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port) if (sdev_to_zfcp(sdev)->port == port)
atomic_set_mask(common_mask, atomic_or(common_mask,
&sdev_to_zfcp(sdev)->status); &sdev_to_zfcp(sdev)->status);
spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags); spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
} }
...@@ -1518,7 +1518,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask) ...@@ -1518,7 +1518,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
unsigned long flags; unsigned long flags;
atomic_clear_mask(mask, &port->status); atomic_andnot(mask, &port->status);
if (!common_mask) if (!common_mask)
return; return;
...@@ -1529,7 +1529,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask) ...@@ -1529,7 +1529,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags); spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, port->adapter->scsi_host) __shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port) { if (sdev_to_zfcp(sdev)->port == port) {
atomic_clear_mask(common_mask, atomic_andnot(common_mask,
&sdev_to_zfcp(sdev)->status); &sdev_to_zfcp(sdev)->status);
if (clear_counter) if (clear_counter)
atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
...@@ -1546,7 +1546,7 @@ void zfcp_erp_set_lun_status(struct scsi_device *sdev, u32 mask) ...@@ -1546,7 +1546,7 @@ void zfcp_erp_set_lun_status(struct scsi_device *sdev, u32 mask)
{ {
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
atomic_set_mask(mask, &zfcp_sdev->status); atomic_or(mask, &zfcp_sdev->status);
} }
/** /**
...@@ -1558,7 +1558,7 @@ void zfcp_erp_clear_lun_status(struct scsi_device *sdev, u32 mask) ...@@ -1558,7 +1558,7 @@ void zfcp_erp_clear_lun_status(struct scsi_device *sdev, u32 mask)
{ {
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
atomic_clear_mask(mask, &zfcp_sdev->status); atomic_andnot(mask, &zfcp_sdev->status);
if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
atomic_set(&zfcp_sdev->erp_counter, 0); atomic_set(&zfcp_sdev->erp_counter, 0);
......
...@@ -508,7 +508,7 @@ static void zfcp_fc_adisc_handler(void *data) ...@@ -508,7 +508,7 @@ static void zfcp_fc_adisc_handler(void *data)
/* port is good, unblock rport without going through erp */ /* port is good, unblock rport without going through erp */
zfcp_scsi_schedule_rport_register(port); zfcp_scsi_schedule_rport_register(port);
out: out:
atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
put_device(&port->dev); put_device(&port->dev);
kmem_cache_free(zfcp_fc_req_cache, fc_req); kmem_cache_free(zfcp_fc_req_cache, fc_req);
} }
...@@ -564,14 +564,14 @@ void zfcp_fc_link_test_work(struct work_struct *work) ...@@ -564,14 +564,14 @@ void zfcp_fc_link_test_work(struct work_struct *work)
if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST) if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST)
goto out; goto out;
atomic_set_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); atomic_or(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
retval = zfcp_fc_adisc(port); retval = zfcp_fc_adisc(port);
if (retval == 0) if (retval == 0)
return; return;
/* send of ADISC was not possible */ /* send of ADISC was not possible */
atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
zfcp_erp_port_forced_reopen(port, 0, "fcltwk1"); zfcp_erp_port_forced_reopen(port, 0, "fcltwk1");
out: out:
...@@ -640,7 +640,7 @@ static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh) ...@@ -640,7 +640,7 @@ static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh)
if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC)) if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC))
return; return;
atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status); atomic_andnot(ZFCP_STATUS_COMMON_NOESC, &port->status);
if ((port->supported_classes != 0) || if ((port->supported_classes != 0) ||
!list_empty(&port->unit_list)) !list_empty(&port->unit_list))
......
...@@ -114,7 +114,7 @@ static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, ...@@ -114,7 +114,7 @@ static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED) if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
return; return;
atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); atomic_or(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
zfcp_scsi_schedule_rports_block(adapter); zfcp_scsi_schedule_rports_block(adapter);
...@@ -345,7 +345,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) ...@@ -345,7 +345,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3"); zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3");
break; break;
case FSF_PROT_HOST_CONNECTION_INITIALIZING: case FSF_PROT_HOST_CONNECTION_INITIALIZING:
atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, atomic_or(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
&adapter->status); &adapter->status);
break; break;
case FSF_PROT_DUPLICATE_REQUEST_ID: case FSF_PROT_DUPLICATE_REQUEST_ID:
...@@ -554,7 +554,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) ...@@ -554,7 +554,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1"); zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1");
return; return;
} }
atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
&adapter->status); &adapter->status);
break; break;
case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
...@@ -567,7 +567,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) ...@@ -567,7 +567,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
/* avoids adapter shutdown to be able to recognize /* avoids adapter shutdown to be able to recognize
* events such as LINK UP */ * events such as LINK UP */
atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
&adapter->status); &adapter->status);
zfcp_fsf_link_down_info_eval(req, zfcp_fsf_link_down_info_eval(req,
&qtcb->header.fsf_status_qual.link_down_info); &qtcb->header.fsf_status_qual.link_down_info);
...@@ -1394,9 +1394,9 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req) ...@@ -1394,9 +1394,9 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
break; break;
case FSF_GOOD: case FSF_GOOD:
port->handle = header->port_handle; port->handle = header->port_handle;
atomic_set_mask(ZFCP_STATUS_COMMON_OPEN | atomic_or(ZFCP_STATUS_COMMON_OPEN |
ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_BOXED, atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED,
&port->status); &port->status);
/* check whether D_ID has changed during open */ /* check whether D_ID has changed during open */
/* /*
...@@ -1677,10 +1677,10 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) ...@@ -1677,10 +1677,10 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
case FSF_PORT_BOXED: case FSF_PORT_BOXED:
/* can't use generic zfcp_erp_modify_port_status because /* can't use generic zfcp_erp_modify_port_status because
* ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
shost_for_each_device(sdev, port->adapter->scsi_host) shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port) if (sdev_to_zfcp(sdev)->port == port)
atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
&sdev_to_zfcp(sdev)->status); &sdev_to_zfcp(sdev)->status);
zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED); zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
...@@ -1700,10 +1700,10 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) ...@@ -1700,10 +1700,10 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
/* can't use generic zfcp_erp_modify_port_status because /* can't use generic zfcp_erp_modify_port_status because
* ZFCP_STATUS_COMMON_OPEN must not be reset for the port * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
*/ */
atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
shost_for_each_device(sdev, port->adapter->scsi_host) shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port) if (sdev_to_zfcp(sdev)->port == port)
atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
&sdev_to_zfcp(sdev)->status); &sdev_to_zfcp(sdev)->status);
break; break;
} }
...@@ -1766,7 +1766,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req) ...@@ -1766,7 +1766,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
zfcp_sdev = sdev_to_zfcp(sdev); zfcp_sdev = sdev_to_zfcp(sdev);
atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED |
ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_STATUS_COMMON_ACCESS_BOXED,
&zfcp_sdev->status); &zfcp_sdev->status);
...@@ -1822,7 +1822,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req) ...@@ -1822,7 +1822,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
case FSF_GOOD: case FSF_GOOD:
zfcp_sdev->lun_handle = header->lun_handle; zfcp_sdev->lun_handle = header->lun_handle;
atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); atomic_or(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
break; break;
} }
} }
...@@ -1913,7 +1913,7 @@ static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req) ...@@ -1913,7 +1913,7 @@ static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
} }
break; break;
case FSF_GOOD: case FSF_GOOD:
atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); atomic_andnot(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
break; break;
} }
} }
......
...@@ -349,7 +349,7 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio) ...@@ -349,7 +349,7 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio)
/* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
spin_lock_irq(&qdio->req_q_lock); spin_lock_irq(&qdio->req_q_lock);
atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
spin_unlock_irq(&qdio->req_q_lock); spin_unlock_irq(&qdio->req_q_lock);
wake_up(&qdio->req_q_wq); wake_up(&qdio->req_q_wq);
...@@ -384,7 +384,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio) ...@@ -384,7 +384,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP) if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
return -EIO; return -EIO;
atomic_clear_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED, atomic_andnot(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
&qdio->adapter->status); &qdio->adapter->status);
zfcp_qdio_setup_init_data(&init_data, qdio); zfcp_qdio_setup_init_data(&init_data, qdio);
...@@ -396,14 +396,14 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio) ...@@ -396,14 +396,14 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
goto failed_qdio; goto failed_qdio;
if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED) if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED, atomic_or(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
&qdio->adapter->status); &qdio->adapter->status);
if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) { if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) {
atomic_set_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status); atomic_or(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER; qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
} else { } else {
atomic_clear_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status); atomic_andnot(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1; qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
} }
...@@ -427,7 +427,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio) ...@@ -427,7 +427,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
/* set index of first available SBALS / number of available SBALS */ /* set index of first available SBALS / number of available SBALS */
qdio->req_q_idx = 0; qdio->req_q_idx = 0;
atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q); atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
if (adapter->scsi_host) { if (adapter->scsi_host) {
adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req; adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req;
...@@ -499,6 +499,6 @@ void zfcp_qdio_siosl(struct zfcp_adapter *adapter) ...@@ -499,6 +499,6 @@ void zfcp_qdio_siosl(struct zfcp_adapter *adapter)
rc = ccw_device_siosl(adapter->ccw_device); rc = ccw_device_siosl(adapter->ccw_device);
if (!rc) if (!rc)
atomic_set_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED, atomic_or(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
&adapter->status); &adapter->status);
} }
...@@ -98,15 +98,16 @@ ATOMIC_OP_RETURN(add, +) ...@@ -98,15 +98,16 @@ ATOMIC_OP_RETURN(add, +)
ATOMIC_OP_RETURN(sub, -) ATOMIC_OP_RETURN(sub, -)
#endif #endif
#ifndef atomic_clear_mask #ifndef atomic_and
ATOMIC_OP(and, &) ATOMIC_OP(and, &)
#define atomic_clear_mask(i, v) atomic_and(~(i), (v))
#endif #endif
#ifndef atomic_set_mask #ifndef atomic_or
#define CONFIG_ARCH_HAS_ATOMIC_OR
ATOMIC_OP(or, |) ATOMIC_OP(or, |)
#define atomic_set_mask(i, v) atomic_or((i), (v)) #endif
#ifndef atomic_xor
ATOMIC_OP(xor, ^)
#endif #endif
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
......
...@@ -32,6 +32,10 @@ extern long long atomic64_##op##_return(long long a, atomic64_t *v); ...@@ -32,6 +32,10 @@ extern long long atomic64_##op##_return(long long a, atomic64_t *v);
ATOMIC64_OPS(add) ATOMIC64_OPS(add)
ATOMIC64_OPS(sub) ATOMIC64_OPS(sub)
ATOMIC64_OP(and)
ATOMIC64_OP(or)
ATOMIC64_OP(xor)
#undef ATOMIC64_OPS #undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP #undef ATOMIC64_OP
......
...@@ -28,6 +28,23 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) ...@@ -28,6 +28,23 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#endif #endif
#ifndef atomic_andnot
static inline void atomic_andnot(int i, atomic_t *v)
{
atomic_and(~i, v);
}
#endif
static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
atomic_andnot(mask, v);
}
static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
{
atomic_or(mask, v);
}
/** /**
* atomic_inc_not_zero_hint - increment if not null * atomic_inc_not_zero_hint - increment if not null
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
...@@ -111,21 +128,16 @@ static inline int atomic_dec_if_positive(atomic_t *v) ...@@ -111,21 +128,16 @@ static inline int atomic_dec_if_positive(atomic_t *v)
} }
#endif #endif
#ifndef CONFIG_ARCH_HAS_ATOMIC_OR
static inline void atomic_or(int i, atomic_t *v)
{
int old;
int new;
do {
old = atomic_read(v);
new = old | i;
} while (atomic_cmpxchg(v, old, new) != old);
}
#endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */
#include <asm-generic/atomic-long.h> #include <asm-generic/atomic-long.h>
#ifdef CONFIG_GENERIC_ATOMIC64 #ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h> #include <asm-generic/atomic64.h>
#endif #endif
#ifndef atomic64_andnot
static inline void atomic64_andnot(long long i, atomic64_t *v)
{
atomic64_and(~i, v);
}
#endif
#endif /* _LINUX_ATOMIC_H */ #endif /* _LINUX_ATOMIC_H */
...@@ -102,6 +102,9 @@ EXPORT_SYMBOL(atomic64_##op##_return); ...@@ -102,6 +102,9 @@ EXPORT_SYMBOL(atomic64_##op##_return);
ATOMIC64_OPS(add, +=) ATOMIC64_OPS(add, +=)
ATOMIC64_OPS(sub, -=) ATOMIC64_OPS(sub, -=)
ATOMIC64_OP(and, &=)
ATOMIC64_OP(or, |=)
ATOMIC64_OP(xor, ^=)
#undef ATOMIC64_OPS #undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP_RETURN
......
...@@ -16,8 +16,39 @@ ...@@ -16,8 +16,39 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#define TEST(bit, op, c_op, val) \
do { \
atomic##bit##_set(&v, v0); \
r = v0; \
atomic##bit##_##op(val, &v); \
r c_op val; \
WARN(atomic##bit##_read(&v) != r, "%Lx != %Lx\n", \
(unsigned long long)atomic##bit##_read(&v), \
(unsigned long long)r); \
} while (0)
static __init void test_atomic(void)
{
int v0 = 0xaaa31337;
int v1 = 0xdeadbeef;
int onestwos = 0x11112222;
int one = 1;
atomic_t v;
int r;
TEST(, add, +=, onestwos);
TEST(, add, +=, -one);
TEST(, sub, -=, onestwos);
TEST(, sub, -=, -one);
TEST(, or, |=, v1);
TEST(, and, &=, v1);
TEST(, xor, ^=, v1);
TEST(, andnot, &= ~, v1);
}
#define INIT(c) do { atomic64_set(&v, c); r = c; } while (0) #define INIT(c) do { atomic64_set(&v, c); r = c; } while (0)
static __init int test_atomic64(void) static __init void test_atomic64(void)
{ {
long long v0 = 0xaaa31337c001d00dLL; long long v0 = 0xaaa31337c001d00dLL;
long long v1 = 0xdeadbeefdeafcafeLL; long long v1 = 0xdeadbeefdeafcafeLL;
...@@ -34,15 +65,14 @@ static __init int test_atomic64(void) ...@@ -34,15 +65,14 @@ static __init int test_atomic64(void)
BUG_ON(v.counter != r); BUG_ON(v.counter != r);
BUG_ON(atomic64_read(&v) != r); BUG_ON(atomic64_read(&v) != r);
INIT(v0); TEST(64, add, +=, onestwos);
atomic64_add(onestwos, &v); TEST(64, add, +=, -one);
r += onestwos; TEST(64, sub, -=, onestwos);
BUG_ON(v.counter != r); TEST(64, sub, -=, -one);
TEST(64, or, |=, v1);
INIT(v0); TEST(64, and, &=, v1);
atomic64_add(-one, &v); TEST(64, xor, ^=, v1);
r += -one; TEST(64, andnot, &= ~, v1);
BUG_ON(v.counter != r);
INIT(v0); INIT(v0);
r += onestwos; r += onestwos;
...@@ -54,16 +84,6 @@ static __init int test_atomic64(void) ...@@ -54,16 +84,6 @@ static __init int test_atomic64(void)
BUG_ON(atomic64_add_return(-one, &v) != r); BUG_ON(atomic64_add_return(-one, &v) != r);
BUG_ON(v.counter != r); BUG_ON(v.counter != r);
INIT(v0);
atomic64_sub(onestwos, &v);
r -= onestwos;
BUG_ON(v.counter != r);
INIT(v0);
atomic64_sub(-one, &v);
r -= -one;
BUG_ON(v.counter != r);
INIT(v0); INIT(v0);
r -= onestwos; r -= onestwos;
BUG_ON(atomic64_sub_return(onestwos, &v) != r); BUG_ON(atomic64_sub_return(onestwos, &v) != r);
...@@ -147,6 +167,12 @@ static __init int test_atomic64(void) ...@@ -147,6 +167,12 @@ static __init int test_atomic64(void)
BUG_ON(!atomic64_inc_not_zero(&v)); BUG_ON(!atomic64_inc_not_zero(&v));
r += one; r += one;
BUG_ON(v.counter != r); BUG_ON(v.counter != r);
}
static __init int test_atomics(void)
{
test_atomic();
test_atomic64();
#ifdef CONFIG_X86 #ifdef CONFIG_X86
pr_info("passed for %s platform %s CX8 and %s SSE\n", pr_info("passed for %s platform %s CX8 and %s SSE\n",
...@@ -166,4 +192,4 @@ static __init int test_atomic64(void) ...@@ -166,4 +192,4 @@ static __init int test_atomic64(void)
return 0; return 0;
} }
core_initcall(test_atomic64); core_initcall(test_atomics);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment