Commit dbb885fe authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull arch atomic cleanups from Ingo Molnar:
 "This is a series kept separate from the main locking tree, which
  cleans up and improves various details in the atomics type handling:

   - Remove the unused atomic_or_long() method

   - Consolidate and compress atomic ops implementations between
     architectures, to reduce linecount and to make it easier to add new
     ops.

   - Rewrite generic atomic support to only require cmpxchg() from an
     architecture - generate all other methods from that"

* 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits)
  locking,arch: Use ACCESS_ONCE() instead of cast to volatile in atomic_read()
  locking, mips: Fix atomics
  locking, sparc64: Fix atomics
  locking,arch: Rewrite generic atomic support
  locking,arch,xtensa: Fold atomic_ops
  locking,arch,sparc: Fold atomic_ops
  locking,arch,sh: Fold atomic_ops
  locking,arch,powerpc: Fold atomic_ops
  locking,arch,parisc: Fold atomic_ops
  locking,arch,mn10300: Fold atomic_ops
  locking,arch,mips: Fold atomic_ops
  locking,arch,metag: Fold atomic_ops
  locking,arch,m68k: Fold atomic_ops
  locking,arch,m32r: Fold atomic_ops
  locking,arch,ia64: Fold atomic_ops
  locking,arch,hexagon: Fold atomic_ops
  locking,arch,cris: Fold atomic_ops
  locking,arch,avr32: Fold atomic_ops
  locking,arch,arm64: Fold atomic_ops
  locking,arch,arm: Fold atomic_ops
  ...
parents d6dd50e0 2291059c
...@@ -17,8 +17,8 @@ ...@@ -17,8 +17,8 @@
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic64_read(v) (*(volatile long *)&(v)->counter) #define atomic64_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v,i) ((v)->counter = (i)) #define atomic_set(v,i) ((v)->counter = (i))
#define atomic64_set(v,i) ((v)->counter = (i)) #define atomic64_set(v,i) ((v)->counter = (i))
...@@ -29,145 +29,92 @@ ...@@ -29,145 +29,92 @@
* branch back to restart the operation. * branch back to restart the operation.
*/ */
static __inline__ void atomic_add(int i, atomic_t * v) #define ATOMIC_OP(op) \
{ static __inline__ void atomic_##op(int i, atomic_t * v) \
unsigned long temp; { \
__asm__ __volatile__( unsigned long temp; \
"1: ldl_l %0,%1\n" __asm__ __volatile__( \
" addl %0,%2,%0\n" "1: ldl_l %0,%1\n" \
" stl_c %0,%1\n" " " #op "l %0,%2,%0\n" \
" beq %0,2f\n" " stl_c %0,%1\n" \
".subsection 2\n" " beq %0,2f\n" \
"2: br 1b\n" ".subsection 2\n" \
".previous" "2: br 1b\n" \
:"=&r" (temp), "=m" (v->counter) ".previous" \
:"Ir" (i), "m" (v->counter)); :"=&r" (temp), "=m" (v->counter) \
} :"Ir" (i), "m" (v->counter)); \
} \
static __inline__ void atomic64_add(long i, atomic64_t * v)
{ #define ATOMIC_OP_RETURN(op) \
unsigned long temp; static inline int atomic_##op##_return(int i, atomic_t *v) \
__asm__ __volatile__( { \
"1: ldq_l %0,%1\n" long temp, result; \
" addq %0,%2,%0\n" smp_mb(); \
" stq_c %0,%1\n" __asm__ __volatile__( \
" beq %0,2f\n" "1: ldl_l %0,%1\n" \
".subsection 2\n" " " #op "l %0,%3,%2\n" \
"2: br 1b\n" " " #op "l %0,%3,%0\n" \
".previous" " stl_c %0,%1\n" \
:"=&r" (temp), "=m" (v->counter) " beq %0,2f\n" \
:"Ir" (i), "m" (v->counter)); ".subsection 2\n" \
} "2: br 1b\n" \
".previous" \
static __inline__ void atomic_sub(int i, atomic_t * v) :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
{ :"Ir" (i), "m" (v->counter) : "memory"); \
unsigned long temp; smp_mb(); \
__asm__ __volatile__( return result; \
"1: ldl_l %0,%1\n"
" subl %0,%2,%0\n"
" stl_c %0,%1\n"
" beq %0,2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous"
:"=&r" (temp), "=m" (v->counter)
:"Ir" (i), "m" (v->counter));
} }
static __inline__ void atomic64_sub(long i, atomic64_t * v) #define ATOMIC64_OP(op) \
{ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
unsigned long temp; { \
__asm__ __volatile__( unsigned long temp; \
"1: ldq_l %0,%1\n" __asm__ __volatile__( \
" subq %0,%2,%0\n" "1: ldq_l %0,%1\n" \
" stq_c %0,%1\n" " " #op "q %0,%2,%0\n" \
" beq %0,2f\n" " stq_c %0,%1\n" \
".subsection 2\n" " beq %0,2f\n" \
"2: br 1b\n" ".subsection 2\n" \
".previous" "2: br 1b\n" \
:"=&r" (temp), "=m" (v->counter) ".previous" \
:"Ir" (i), "m" (v->counter)); :"=&r" (temp), "=m" (v->counter) \
} :"Ir" (i), "m" (v->counter)); \
} \
/* #define ATOMIC64_OP_RETURN(op) \
* Same as above, but return the result value static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
*/ { \
static inline int atomic_add_return(int i, atomic_t *v) long temp, result; \
{ smp_mb(); \
long temp, result; __asm__ __volatile__( \
smp_mb(); "1: ldq_l %0,%1\n" \
__asm__ __volatile__( " " #op "q %0,%3,%2\n" \
"1: ldl_l %0,%1\n" " " #op "q %0,%3,%0\n" \
" addl %0,%3,%2\n" " stq_c %0,%1\n" \
" addl %0,%3,%0\n" " beq %0,2f\n" \
" stl_c %0,%1\n" ".subsection 2\n" \
" beq %0,2f\n" "2: br 1b\n" \
".subsection 2\n" ".previous" \
"2: br 1b\n" :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
".previous" :"Ir" (i), "m" (v->counter) : "memory"); \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) smp_mb(); \
:"Ir" (i), "m" (v->counter) : "memory"); return result; \
smp_mb();
return result;
} }
static __inline__ long atomic64_add_return(long i, atomic64_t * v) #define ATOMIC_OPS(opg) \
{ ATOMIC_OP(opg) \
long temp, result; ATOMIC_OP_RETURN(opg) \
smp_mb(); ATOMIC64_OP(opg) \
__asm__ __volatile__( ATOMIC64_OP_RETURN(opg)
"1: ldq_l %0,%1\n"
" addq %0,%3,%2\n"
" addq %0,%3,%0\n"
" stq_c %0,%1\n"
" beq %0,2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous"
:"=&r" (temp), "=m" (v->counter), "=&r" (result)
:"Ir" (i), "m" (v->counter) : "memory");
smp_mb();
return result;
}
static __inline__ long atomic_sub_return(int i, atomic_t * v) ATOMIC_OPS(add)
{ ATOMIC_OPS(sub)
long temp, result;
smp_mb();
__asm__ __volatile__(
"1: ldl_l %0,%1\n"
" subl %0,%3,%2\n"
" subl %0,%3,%0\n"
" stl_c %0,%1\n"
" beq %0,2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous"
:"=&r" (temp), "=m" (v->counter), "=&r" (result)
:"Ir" (i), "m" (v->counter) : "memory");
smp_mb();
return result;
}
static __inline__ long atomic64_sub_return(long i, atomic64_t * v) #undef ATOMIC_OPS
{ #undef ATOMIC64_OP_RETURN
long temp, result; #undef ATOMIC64_OP
smp_mb(); #undef ATOMIC_OP_RETURN
__asm__ __volatile__( #undef ATOMIC_OP
"1: ldq_l %0,%1\n"
" subq %0,%3,%2\n"
" subq %0,%3,%0\n"
" stq_c %0,%1\n"
" beq %0,2f\n"
".subsection 2\n"
"2: br 1b\n"
".previous"
:"=&r" (temp), "=m" (v->counter), "=&r" (result)
:"Ir" (i), "m" (v->counter) : "memory");
smp_mb();
return result;
}
#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
......
...@@ -25,79 +25,36 @@ ...@@ -25,79 +25,36 @@
#define atomic_set(v, i) (((v)->counter) = (i)) #define atomic_set(v, i) (((v)->counter) = (i))
static inline void atomic_add(int i, atomic_t *v) #define ATOMIC_OP(op, c_op, asm_op) \
{ static inline void atomic_##op(int i, atomic_t *v) \
unsigned int temp; { \
unsigned int temp; \
__asm__ __volatile__( \
"1: llock %0, [%1] \n" __asm__ __volatile__( \
" add %0, %0, %2 \n" "1: llock %0, [%1] \n" \
" scond %0, [%1] \n" " " #asm_op " %0, %0, %2 \n" \
" bnz 1b \n" " scond %0, [%1] \n" \
: "=&r"(temp) /* Early clobber, to prevent reg reuse */ " bnz 1b \n" \
: "r"(&v->counter), "ir"(i) : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \
: "cc"); : "r"(&v->counter), "ir"(i) \
} : "cc"); \
} \
static inline void atomic_sub(int i, atomic_t *v)
{ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
unsigned int temp; static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
__asm__ __volatile__( unsigned int temp; \
"1: llock %0, [%1] \n" \
" sub %0, %0, %2 \n" __asm__ __volatile__( \
" scond %0, [%1] \n" "1: llock %0, [%1] \n" \
" bnz 1b \n" " " #asm_op " %0, %0, %2 \n" \
: "=&r"(temp) " scond %0, [%1] \n" \
: "r"(&v->counter), "ir"(i) " bnz 1b \n" \
: "cc"); : "=&r"(temp) \
} : "r"(&v->counter), "ir"(i) \
: "cc"); \
/* add and also return the new value */ \
static inline int atomic_add_return(int i, atomic_t *v) return temp; \
{
unsigned int temp;
__asm__ __volatile__(
"1: llock %0, [%1] \n"
" add %0, %0, %2 \n"
" scond %0, [%1] \n"
" bnz 1b \n"
: "=&r"(temp)
: "r"(&v->counter), "ir"(i)
: "cc");
return temp;
}
static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned int temp;
__asm__ __volatile__(
"1: llock %0, [%1] \n"
" sub %0, %0, %2 \n"
" scond %0, [%1] \n"
" bnz 1b \n"
: "=&r"(temp)
: "r"(&v->counter), "ir"(i)
: "cc");
return temp;
}
static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
{
unsigned int temp;
__asm__ __volatile__(
"1: llock %0, [%1] \n"
" bic %0, %0, %2 \n"
" scond %0, [%1] \n"
" bnz 1b \n"
: "=&r"(temp)
: "r"(addr), "ir"(mask)
: "cc");
} }
#else /* !CONFIG_ARC_HAS_LLSC */ #else /* !CONFIG_ARC_HAS_LLSC */
...@@ -126,6 +83,7 @@ static inline void atomic_set(atomic_t *v, int i) ...@@ -126,6 +83,7 @@ static inline void atomic_set(atomic_t *v, int i)
v->counter = i; v->counter = i;
atomic_ops_unlock(flags); atomic_ops_unlock(flags);
} }
#endif #endif
/* /*
...@@ -133,62 +91,46 @@ static inline void atomic_set(atomic_t *v, int i) ...@@ -133,62 +91,46 @@ static inline void atomic_set(atomic_t *v, int i)
* Locking would change to irq-disabling only (UP) and spinlocks (SMP) * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
*/ */
static inline void atomic_add(int i, atomic_t *v) #define ATOMIC_OP(op, c_op, asm_op) \
{ static inline void atomic_##op(int i, atomic_t *v) \
unsigned long flags; { \
unsigned long flags; \
atomic_ops_lock(flags); \
v->counter += i; atomic_ops_lock(flags); \
atomic_ops_unlock(flags); v->counter c_op i; \
atomic_ops_unlock(flags); \
} }
static inline void atomic_sub(int i, atomic_t *v) #define ATOMIC_OP_RETURN(op, c_op) \
{ static inline int atomic_##op##_return(int i, atomic_t *v) \
unsigned long flags; { \
unsigned long flags; \
atomic_ops_lock(flags); unsigned long temp; \
v->counter -= i; \
atomic_ops_unlock(flags); atomic_ops_lock(flags); \
temp = v->counter; \
temp c_op i; \
v->counter = temp; \
atomic_ops_unlock(flags); \
\
return temp; \
} }
static inline int atomic_add_return(int i, atomic_t *v) #endif /* !CONFIG_ARC_HAS_LLSC */
{
unsigned long flags;
unsigned long temp;
atomic_ops_lock(flags);
temp = v->counter;
temp += i;
v->counter = temp;
atomic_ops_unlock(flags);
return temp;
}
static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long flags;
unsigned long temp;
atomic_ops_lock(flags);
temp = v->counter;
temp -= i;
v->counter = temp;
atomic_ops_unlock(flags);
return temp; #define ATOMIC_OPS(op, c_op, asm_op) \
} ATOMIC_OP(op, c_op, asm_op) \
ATOMIC_OP_RETURN(op, c_op, asm_op)
static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) ATOMIC_OPS(add, +=, add)
{ ATOMIC_OPS(sub, -=, sub)
unsigned long flags; ATOMIC_OP(and, &=, and)
atomic_ops_lock(flags); #define atomic_clear_mask(mask, v) atomic_and(~(mask), (v))
*addr &= ~mask;
atomic_ops_unlock(flags);
}
#endif /* !CONFIG_ARC_HAS_LLSC */ #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
/** /**
* __atomic_add_unless - add unless the number is a given value * __atomic_add_unless - add unless the number is a given value
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
* strex/ldrex monitor on some implementations. The reason we can use it for * strex/ldrex monitor on some implementations. The reason we can use it for
* atomic_set() is the clrex or dummy strex done on every exception return. * atomic_set() is the clrex or dummy strex done on every exception return.
*/ */
#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i)) #define atomic_set(v,i) (((v)->counter) = (i))
#if __LINUX_ARM_ARCH__ >= 6 #if __LINUX_ARM_ARCH__ >= 6
...@@ -37,84 +37,47 @@ ...@@ -37,84 +37,47 @@
* store exclusive to ensure that these are atomic. We may loop * store exclusive to ensure that these are atomic. We may loop
* to ensure that the update happens. * to ensure that the update happens.
*/ */
static inline void atomic_add(int i, atomic_t *v)
{
unsigned long tmp;
int result;
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_add\n"
"1: ldrex %0, [%3]\n"
" add %0, %0, %4\n"
" strex %1, %0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter), "Ir" (i)
: "cc");
}
static inline int atomic_add_return(int i, atomic_t *v) #define ATOMIC_OP(op, c_op, asm_op) \
{ static inline void atomic_##op(int i, atomic_t *v) \
unsigned long tmp; { \
int result; unsigned long tmp; \
int result; \
smp_mb(); \
prefetchw(&v->counter); prefetchw(&v->counter); \
__asm__ __volatile__("@ atomic_" #op "\n" \
__asm__ __volatile__("@ atomic_add_return\n" "1: ldrex %0, [%3]\n" \
"1: ldrex %0, [%3]\n" " " #asm_op " %0, %0, %4\n" \
" add %0, %0, %4\n" " strex %1, %0, [%3]\n" \
" strex %1, %0, [%3]\n" " teq %1, #0\n" \
" teq %1, #0\n" " bne 1b" \
" bne 1b" : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "Ir" (i) \
: "r" (&v->counter), "Ir" (i) : "cc"); \
: "cc"); } \
smp_mb(); #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
return result; { \
} unsigned long tmp; \
int result; \
static inline void atomic_sub(int i, atomic_t *v) \
{ smp_mb(); \
unsigned long tmp; prefetchw(&v->counter); \
int result; \
__asm__ __volatile__("@ atomic_" #op "_return\n" \
prefetchw(&v->counter); "1: ldrex %0, [%3]\n" \
__asm__ __volatile__("@ atomic_sub\n" " " #asm_op " %0, %0, %4\n" \
"1: ldrex %0, [%3]\n" " strex %1, %0, [%3]\n" \
" sub %0, %0, %4\n" " teq %1, #0\n" \
" strex %1, %0, [%3]\n" " bne 1b" \
" teq %1, #0\n" : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
" bne 1b" : "r" (&v->counter), "Ir" (i) \
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "cc"); \
: "r" (&v->counter), "Ir" (i) \
: "cc"); smp_mb(); \
} \
return result; \
static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long tmp;
int result;
smp_mb();
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic_sub_return\n"
"1: ldrex %0, [%3]\n"
" sub %0, %0, %4\n"
" strex %1, %0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter), "Ir" (i)
: "cc");
smp_mb();
return result;
} }
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
...@@ -174,33 +137,29 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -174,33 +137,29 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#error SMP not supported on pre-ARMv6 CPUs #error SMP not supported on pre-ARMv6 CPUs
#endif #endif
static inline int atomic_add_return(int i, atomic_t *v) #define ATOMIC_OP(op, c_op, asm_op) \
{ static inline void atomic_##op(int i, atomic_t *v) \
unsigned long flags; { \
int val; unsigned long flags; \
\
raw_local_irq_save(flags); raw_local_irq_save(flags); \
val = v->counter; v->counter c_op i; \
v->counter = val += i; raw_local_irq_restore(flags); \
raw_local_irq_restore(flags); } \
return val; #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
} static inline int atomic_##op##_return(int i, atomic_t *v) \
#define atomic_add(i, v) (void) atomic_add_return(i, v) { \
unsigned long flags; \
static inline int atomic_sub_return(int i, atomic_t *v) int val; \
{ \
unsigned long flags; raw_local_irq_save(flags); \
int val; v->counter c_op i; \
val = v->counter; \
raw_local_irq_save(flags); raw_local_irq_restore(flags); \
val = v->counter; \
v->counter = val -= i; return val; \
raw_local_irq_restore(flags);
return val;
} }
#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
static inline int atomic_cmpxchg(atomic_t *v, int old, int new) static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{ {
...@@ -228,6 +187,17 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -228,6 +187,17 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#endif /* __LINUX_ARM_ARCH__ */ #endif /* __LINUX_ARM_ARCH__ */
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
ATOMIC_OP_RETURN(op, c_op, asm_op)
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_inc(v) atomic_add(1, v) #define atomic_inc(v) atomic_add(1, v)
...@@ -300,89 +270,60 @@ static inline void atomic64_set(atomic64_t *v, long long i) ...@@ -300,89 +270,60 @@ static inline void atomic64_set(atomic64_t *v, long long i)
} }
#endif #endif
static inline void atomic64_add(long long i, atomic64_t *v) #define ATOMIC64_OP(op, op1, op2) \
{ static inline void atomic64_##op(long long i, atomic64_t *v) \
long long result; { \
unsigned long tmp; long long result; \
unsigned long tmp; \
prefetchw(&v->counter); \
__asm__ __volatile__("@ atomic64_add\n" prefetchw(&v->counter); \
"1: ldrexd %0, %H0, [%3]\n" __asm__ __volatile__("@ atomic64_" #op "\n" \
" adds %Q0, %Q0, %Q4\n" "1: ldrexd %0, %H0, [%3]\n" \
" adc %R0, %R0, %R4\n" " " #op1 " %Q0, %Q0, %Q4\n" \
" strexd %1, %0, %H0, [%3]\n" " " #op2 " %R0, %R0, %R4\n" \
" teq %1, #0\n" " strexd %1, %0, %H0, [%3]\n" \
" bne 1b" " teq %1, #0\n" \
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) " bne 1b" \
: "r" (&v->counter), "r" (i) : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
: "cc"); : "r" (&v->counter), "r" (i) \
} : "cc"); \
} \
static inline long long atomic64_add_return(long long i, atomic64_t *v)
{ #define ATOMIC64_OP_RETURN(op, op1, op2) \
long long result; static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
unsigned long tmp; { \
long long result; \
smp_mb(); unsigned long tmp; \
prefetchw(&v->counter); \
smp_mb(); \
__asm__ __volatile__("@ atomic64_add_return\n" prefetchw(&v->counter); \
"1: ldrexd %0, %H0, [%3]\n" \
" adds %Q0, %Q0, %Q4\n" __asm__ __volatile__("@ atomic64_" #op "_return\n" \
" adc %R0, %R0, %R4\n" "1: ldrexd %0, %H0, [%3]\n" \
" strexd %1, %0, %H0, [%3]\n" " " #op1 " %Q0, %Q0, %Q4\n" \
" teq %1, #0\n" " " #op2 " %R0, %R0, %R4\n" \
" bne 1b" " strexd %1, %0, %H0, [%3]\n" \
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) " teq %1, #0\n" \
: "r" (&v->counter), "r" (i) " bne 1b" \
: "cc"); : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
: "r" (&v->counter), "r" (i) \
smp_mb(); : "cc"); \
\
return result; smp_mb(); \
} \
return result; \
static inline void atomic64_sub(long long i, atomic64_t *v)
{
long long result;
unsigned long tmp;
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_sub\n"
"1: ldrexd %0, %H0, [%3]\n"
" subs %Q0, %Q0, %Q4\n"
" sbc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter), "r" (i)
: "cc");
} }
static inline long long atomic64_sub_return(long long i, atomic64_t *v) #define ATOMIC64_OPS(op, op1, op2) \
{ ATOMIC64_OP(op, op1, op2) \
long long result; ATOMIC64_OP_RETURN(op, op1, op2)
unsigned long tmp;
smp_mb();
prefetchw(&v->counter);
__asm__ __volatile__("@ atomic64_sub_return\n"
"1: ldrexd %0, %H0, [%3]\n"
" subs %Q0, %Q0, %Q4\n"
" sbc %R0, %R0, %R4\n"
" strexd %1, %0, %H0, [%3]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
: "r" (&v->counter), "r" (i)
: "cc");
smp_mb(); ATOMIC64_OPS(add, adds, adc)
ATOMIC64_OPS(sub, subs, sbc)
return result; #undef ATOMIC64_OPS
} #undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
long long new) long long new)
......
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
* strex/ldrex monitor on some implementations. The reason we can use it for * strex/ldrex monitor on some implementations. The reason we can use it for
* atomic_set() is the clrex or dummy strex done on every exception return. * atomic_set() is the clrex or dummy strex done on every exception return.
*/ */
#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i)) #define atomic_set(v,i) (((v)->counter) = (i))
/* /*
...@@ -43,69 +43,51 @@ ...@@ -43,69 +43,51 @@
* store exclusive to ensure that these are atomic. We may loop * store exclusive to ensure that these are atomic. We may loop
* to ensure that the update happens. * to ensure that the update happens.
*/ */
static inline void atomic_add(int i, atomic_t *v)
{
unsigned long tmp;
int result;
asm volatile("// atomic_add\n"
"1: ldxr %w0, %2\n"
" add %w0, %w0, %w3\n"
" stxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i));
}
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long tmp;
int result;
asm volatile("// atomic_add_return\n"
"1: ldxr %w0, %2\n"
" add %w0, %w0, %w3\n"
" stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
: "memory");
smp_mb();
return result;
}
static inline void atomic_sub(int i, atomic_t *v)
{
unsigned long tmp;
int result;
asm volatile("// atomic_sub\n" #define ATOMIC_OP(op, asm_op) \
"1: ldxr %w0, %2\n" static inline void atomic_##op(int i, atomic_t *v) \
" sub %w0, %w0, %w3\n" { \
" stxr %w1, %w0, %2\n" unsigned long tmp; \
" cbnz %w1, 1b" int result; \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: "Ir" (i)); asm volatile("// atomic_" #op "\n" \
"1: ldxr %w0, %2\n" \
" " #asm_op " %w0, %w0, %w3\n" \
" stxr %w1, %w0, %2\n" \
" cbnz %w1, 1b" \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: "Ir" (i)); \
} \
#define ATOMIC_OP_RETURN(op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
\
asm volatile("// atomic_" #op "_return\n" \
"1: ldxr %w0, %2\n" \
" " #asm_op " %w0, %w0, %w3\n" \
" stlxr %w1, %w0, %2\n" \
" cbnz %w1, 1b" \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: "Ir" (i) \
: "memory"); \
\
smp_mb(); \
return result; \
} }
static inline int atomic_sub_return(int i, atomic_t *v) #define ATOMIC_OPS(op, asm_op) \
{ ATOMIC_OP(op, asm_op) \
unsigned long tmp; ATOMIC_OP_RETURN(op, asm_op)
int result;
asm volatile("// atomic_sub_return\n" ATOMIC_OPS(add, add)
"1: ldxr %w0, %2\n" ATOMIC_OPS(sub, sub)
" sub %w0, %w0, %w3\n"
" stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
: "memory");
smp_mb(); #undef ATOMIC_OPS
return result; #undef ATOMIC_OP_RETURN
} #undef ATOMIC_OP
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
{ {
...@@ -157,72 +139,53 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -157,72 +139,53 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
*/ */
#define ATOMIC64_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) }
#define atomic64_read(v) (*(volatile long *)&(v)->counter) #define atomic64_read(v) ACCESS_ONCE((v)->counter)
#define atomic64_set(v,i) (((v)->counter) = (i)) #define atomic64_set(v,i) (((v)->counter) = (i))
static inline void atomic64_add(u64 i, atomic64_t *v) #define ATOMIC64_OP(op, asm_op) \
{ static inline void atomic64_##op(long i, atomic64_t *v) \
long result; { \
unsigned long tmp; long result; \
unsigned long tmp; \
asm volatile("// atomic64_add\n" \
"1: ldxr %0, %2\n" asm volatile("// atomic64_" #op "\n" \
" add %0, %0, %3\n" "1: ldxr %0, %2\n" \
" stxr %w1, %0, %2\n" " " #asm_op " %0, %0, %3\n" \
" cbnz %w1, 1b" " stxr %w1, %0, %2\n" \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) " cbnz %w1, 1b" \
: "Ir" (i)); : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: "Ir" (i)); \
} \
#define ATOMIC64_OP_RETURN(op, asm_op) \
static inline long atomic64_##op##_return(long i, atomic64_t *v) \
{ \
long result; \
unsigned long tmp; \
\
asm volatile("// atomic64_" #op "_return\n" \
"1: ldxr %0, %2\n" \
" " #asm_op " %0, %0, %3\n" \
" stlxr %w1, %0, %2\n" \
" cbnz %w1, 1b" \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: "Ir" (i) \
: "memory"); \
\
smp_mb(); \
return result; \
} }
static inline long atomic64_add_return(long i, atomic64_t *v) #define ATOMIC64_OPS(op, asm_op) \
{ ATOMIC64_OP(op, asm_op) \
long result; ATOMIC64_OP_RETURN(op, asm_op)
unsigned long tmp;
asm volatile("// atomic64_add_return\n" ATOMIC64_OPS(add, add)
"1: ldxr %0, %2\n" ATOMIC64_OPS(sub, sub)
" add %0, %0, %3\n"
" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
: "memory");
smp_mb(); #undef ATOMIC64_OPS
return result; #undef ATOMIC64_OP_RETURN
} #undef ATOMIC64_OP
static inline void atomic64_sub(u64 i, atomic64_t *v)
{
long result;
unsigned long tmp;
asm volatile("// atomic64_sub\n"
"1: ldxr %0, %2\n"
" sub %0, %0, %3\n"
" stxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i));
}
static inline long atomic64_sub_return(long i, atomic64_t *v)
{
long result;
unsigned long tmp;
asm volatile("// atomic64_sub_return\n"
"1: ldxr %0, %2\n"
" sub %0, %0, %3\n"
" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
: "memory");
smp_mb();
return result;
}
static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new) static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
{ {
......
...@@ -19,33 +19,46 @@ ...@@ -19,33 +19,46 @@
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v, i) (((v)->counter) = i) #define atomic_set(v, i) (((v)->counter) = i)
#define ATOMIC_OP_RETURN(op, asm_op, asm_con) \
static inline int __atomic_##op##_return(int i, atomic_t *v) \
{ \
int result; \
\
asm volatile( \
"/* atomic_" #op "_return */\n" \
"1: ssrf 5\n" \
" ld.w %0, %2\n" \
" " #asm_op " %0, %3\n" \
" stcond %1, %0\n" \
" brne 1b" \
: "=&r" (result), "=o" (v->counter) \
: "m" (v->counter), #asm_con (i) \
: "cc"); \
\
return result; \
}
ATOMIC_OP_RETURN(sub, sub, rKs21)
ATOMIC_OP_RETURN(add, add, r)
#undef ATOMIC_OP_RETURN
/* /*
* atomic_sub_return - subtract the atomic variable * Probably found the reason why we want to use sub with the signed 21-bit
* @i: integer value to subtract * limit, it uses one less register than the add instruction that can add up to
* @v: pointer of type atomic_t * 32-bit values.
* *
* Atomically subtracts @i from @v. Returns the resulting value. * Both instructions are 32-bit, to use a 16-bit instruction the immediate is
* very small; 4 bit.
*
* sub 32-bit, type IV, takes a register and subtracts a 21-bit immediate.
* add 32-bit, type II, adds two register values together.
*/ */
static inline int atomic_sub_return(int i, atomic_t *v) #define IS_21BIT_CONST(i) \
{ (__builtin_constant_p(i) && ((i) >= -1048575) && ((i) <= 1048576))
int result;
asm volatile(
"/* atomic_sub_return */\n"
"1: ssrf 5\n"
" ld.w %0, %2\n"
" sub %0, %3\n"
" stcond %1, %0\n"
" brne 1b"
: "=&r"(result), "=o"(v->counter)
: "m"(v->counter), "rKs21"(i)
: "cc");
return result;
}
/* /*
* atomic_add_return - add integer to atomic variable * atomic_add_return - add integer to atomic variable
...@@ -56,51 +69,25 @@ static inline int atomic_sub_return(int i, atomic_t *v) ...@@ -56,51 +69,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
*/ */
static inline int atomic_add_return(int i, atomic_t *v) static inline int atomic_add_return(int i, atomic_t *v)
{ {
int result; if (IS_21BIT_CONST(i))
return __atomic_sub_return(-i, v);
if (__builtin_constant_p(i) && (i >= -1048575) && (i <= 1048576))
result = atomic_sub_return(-i, v);
else
asm volatile(
"/* atomic_add_return */\n"
"1: ssrf 5\n"
" ld.w %0, %1\n"
" add %0, %3\n"
" stcond %2, %0\n"
" brne 1b"
: "=&r"(result), "=o"(v->counter)
: "m"(v->counter), "r"(i)
: "cc", "memory");
return result; return __atomic_add_return(i, v);
} }
/* /*
* atomic_sub_unless - sub unless the number is a given value * atomic_sub_return - subtract the atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
* @a: the amount to subtract from v...
* @u: ...unless v is equal to u.
* *
* Atomically subtract @a from @v, so long as it was not @u. * Atomically subtracts @i from @v. Returns the resulting value.
* Returns the old value of @v. */
*/ static inline int atomic_sub_return(int i, atomic_t *v)
static inline void atomic_sub_unless(atomic_t *v, int a, int u)
{ {
int tmp; if (IS_21BIT_CONST(i))
return __atomic_sub_return(i, v);
asm volatile( return __atomic_add_return(-i, v);
"/* atomic_sub_unless */\n"
"1: ssrf 5\n"
" ld.w %0, %2\n"
" cp.w %0, %4\n"
" breq 1f\n"
" sub %0, %3\n"
" stcond %1, %0\n"
" brne 1b\n"
"1:"
: "=&r"(tmp), "=o"(v->counter)
: "m"(v->counter), "rKs21"(a), "rKs21"(u)
: "cc", "memory");
} }
/* /*
...@@ -116,9 +103,21 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -116,9 +103,21 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{ {
int tmp, old = atomic_read(v); int tmp, old = atomic_read(v);
if (__builtin_constant_p(a) && (a >= -1048575) && (a <= 1048576)) if (IS_21BIT_CONST(a)) {
atomic_sub_unless(v, -a, u); asm volatile(
else { "/* __atomic_sub_unless */\n"
"1: ssrf 5\n"
" ld.w %0, %2\n"
" cp.w %0, %4\n"
" breq 1f\n"
" sub %0, %3\n"
" stcond %1, %0\n"
" brne 1b\n"
"1:"
: "=&r"(tmp), "=o"(v->counter)
: "m"(v->counter), "rKs21"(-a), "rKs21"(u)
: "cc", "memory");
} else {
asm volatile( asm volatile(
"/* __atomic_add_unless */\n" "/* __atomic_add_unless */\n"
"1: ssrf 5\n" "1: ssrf 5\n"
...@@ -137,6 +136,8 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -137,6 +136,8 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
return old; return old;
} }
#undef IS_21BIT_CONST
/* /*
* atomic_sub_if_positive - conditionally subtract integer from atomic variable * atomic_sub_if_positive - conditionally subtract integer from atomic variable
* @i: integer value to subtract * @i: integer value to subtract
......
...@@ -17,48 +17,41 @@ ...@@ -17,48 +17,41 @@
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i)) #define atomic_set(v,i) (((v)->counter) = (i))
/* These should be written in asm but we do it in C for now. */ /* These should be written in asm but we do it in C for now. */
static inline void atomic_add(int i, volatile atomic_t *v) #define ATOMIC_OP(op, c_op) \
{ static inline void atomic_##op(int i, volatile atomic_t *v) \
unsigned long flags; { \
cris_atomic_save(v, flags); unsigned long flags; \
v->counter += i; cris_atomic_save(v, flags); \
cris_atomic_restore(v, flags); v->counter c_op i; \
cris_atomic_restore(v, flags); \
} \
#define ATOMIC_OP_RETURN(op, c_op) \
static inline int atomic_##op##_return(int i, volatile atomic_t *v) \
{ \
unsigned long flags; \
int retval; \
cris_atomic_save(v, flags); \
retval = (v->counter c_op i); \
cris_atomic_restore(v, flags); \
return retval; \
} }
static inline void atomic_sub(int i, volatile atomic_t *v) #define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
{
unsigned long flags;
cris_atomic_save(v, flags);
v->counter -= i;
cris_atomic_restore(v, flags);
}
static inline int atomic_add_return(int i, volatile atomic_t *v) ATOMIC_OPS(add, +=)
{ ATOMIC_OPS(sub, -=)
unsigned long flags;
int retval;
cris_atomic_save(v, flags);
retval = (v->counter += i);
cris_atomic_restore(v, flags);
return retval;
}
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
static inline int atomic_sub_return(int i, volatile atomic_t *v) #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
{
unsigned long flags;
int retval;
cris_atomic_save(v, flags);
retval = (v->counter -= i);
cris_atomic_restore(v, flags);
return retval;
}
static inline int atomic_sub_and_test(int i, volatile atomic_t *v) static inline int atomic_sub_and_test(int i, volatile atomic_t *v)
{ {
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
*/ */
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v, i) (((v)->counter) = (i)) #define atomic_set(v, i) (((v)->counter) = (i))
#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
......
...@@ -94,41 +94,47 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) ...@@ -94,41 +94,47 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return __oldval; return __oldval;
} }
static inline int atomic_add_return(int i, atomic_t *v) #define ATOMIC_OP(op) \
{ static inline void atomic_##op(int i, atomic_t *v) \
int output; { \
int output; \
__asm__ __volatile__ ( \
"1: %0 = memw_locked(%1);\n" __asm__ __volatile__ ( \
" %0 = add(%0,%2);\n" "1: %0 = memw_locked(%1);\n" \
" memw_locked(%1,P3)=%0;\n" " %0 = "#op "(%0,%2);\n" \
" if !P3 jump 1b;\n" " memw_locked(%1,P3)=%0;\n" \
: "=&r" (output) " if !P3 jump 1b;\n" \
: "r" (&v->counter), "r" (i) : "=&r" (output) \
: "memory", "p3" : "r" (&v->counter), "r" (i) \
); : "memory", "p3" \
return output; ); \
} \
#define ATOMIC_OP_RETURN(op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
int output; \
\
__asm__ __volatile__ ( \
"1: %0 = memw_locked(%1);\n" \
" %0 = "#op "(%0,%2);\n" \
" memw_locked(%1,P3)=%0;\n" \
" if !P3 jump 1b;\n" \
: "=&r" (output) \
: "r" (&v->counter), "r" (i) \
: "memory", "p3" \
); \
return output; \
} }
#define atomic_add(i, v) atomic_add_return(i, (v)) #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
static inline int atomic_sub_return(int i, atomic_t *v) ATOMIC_OPS(add)
{ ATOMIC_OPS(sub)
int output;
__asm__ __volatile__ (
"1: %0 = memw_locked(%1);\n"
" %0 = sub(%0,%2);\n"
" memw_locked(%1,P3)=%0\n"
" if !P3 jump 1b;\n"
: "=&r" (output)
: "r" (&v->counter), "r" (i)
: "memory", "p3"
);
return output;
}
#define atomic_sub(i, v) atomic_sub_return(i, (v)) #undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
/** /**
* __atomic_add_unless - add unless the number is a given value * __atomic_add_unless - add unless the number is a given value
......
...@@ -21,68 +21,100 @@ ...@@ -21,68 +21,100 @@
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic64_read(v) (*(volatile long *)&(v)->counter) #define atomic64_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i)) #define atomic_set(v,i) (((v)->counter) = (i))
#define atomic64_set(v,i) (((v)->counter) = (i)) #define atomic64_set(v,i) (((v)->counter) = (i))
static __inline__ int #define ATOMIC_OP(op, c_op) \
ia64_atomic_add (int i, atomic_t *v) static __inline__ int \
{ ia64_atomic_##op (int i, atomic_t *v) \
__s32 old, new; { \
CMPXCHG_BUGCHECK_DECL __s32 old, new; \
CMPXCHG_BUGCHECK_DECL \
do { \
CMPXCHG_BUGCHECK(v); do { \
old = atomic_read(v); CMPXCHG_BUGCHECK(v); \
new = old + i; old = atomic_read(v); \
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); new = old c_op i; \
return new; } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
return new; \
} }
static __inline__ long ATOMIC_OP(add, +)
ia64_atomic64_add (__s64 i, atomic64_t *v) ATOMIC_OP(sub, -)
{
__s64 old, new;
CMPXCHG_BUGCHECK_DECL
do {
CMPXCHG_BUGCHECK(v);
old = atomic64_read(v);
new = old + i;
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
return new;
}
static __inline__ int #undef ATOMIC_OP
ia64_atomic_sub (int i, atomic_t *v)
{
__s32 old, new;
CMPXCHG_BUGCHECK_DECL
do {
CMPXCHG_BUGCHECK(v);
old = atomic_read(v);
new = old - i;
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
return new;
}
static __inline__ long #define atomic_add_return(i,v) \
ia64_atomic64_sub (__s64 i, atomic64_t *v) ({ \
{ int __ia64_aar_i = (i); \
__s64 old, new; (__builtin_constant_p(i) \
CMPXCHG_BUGCHECK_DECL && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
|| (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
do { || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
CMPXCHG_BUGCHECK(v); || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
old = atomic64_read(v); ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
new = old - i; : ia64_atomic_add(__ia64_aar_i, v); \
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); })
return new;
#define atomic_sub_return(i,v) \
({ \
int __ia64_asr_i = (i); \
(__builtin_constant_p(i) \
&& ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
|| (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
|| (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
|| (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
: ia64_atomic_sub(__ia64_asr_i, v); \
})
#define ATOMIC64_OP(op, c_op) \
static __inline__ long \
ia64_atomic64_##op (__s64 i, atomic64_t *v) \
{ \
__s64 old, new; \
CMPXCHG_BUGCHECK_DECL \
\
do { \
CMPXCHG_BUGCHECK(v); \
old = atomic64_read(v); \
new = old c_op i; \
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
return new; \
} }
ATOMIC64_OP(add, +)
ATOMIC64_OP(sub, -)
#undef ATOMIC64_OP
#define atomic64_add_return(i,v) \
({ \
long __ia64_aar_i = (i); \
(__builtin_constant_p(i) \
&& ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
|| (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
|| (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
|| (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
: ia64_atomic64_add(__ia64_aar_i, v); \
})
#define atomic64_sub_return(i,v) \
({ \
long __ia64_asr_i = (i); \
(__builtin_constant_p(i) \
&& ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
|| (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
|| (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
|| (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
: ia64_atomic64_sub(__ia64_asr_i, v); \
})
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
...@@ -123,30 +155,6 @@ static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u) ...@@ -123,30 +155,6 @@ static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
#define atomic_add_return(i,v) \
({ \
int __ia64_aar_i = (i); \
(__builtin_constant_p(i) \
&& ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
|| (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
|| (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
|| (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
: ia64_atomic_add(__ia64_aar_i, v); \
})
#define atomic64_add_return(i,v) \
({ \
long __ia64_aar_i = (i); \
(__builtin_constant_p(i) \
&& ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
|| (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
|| (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
|| (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
: ia64_atomic64_add(__ia64_aar_i, v); \
})
/* /*
* Atomically add I to V and return TRUE if the resulting value is * Atomically add I to V and return TRUE if the resulting value is
* negative. * negative.
...@@ -163,30 +171,6 @@ atomic64_add_negative (__s64 i, atomic64_t *v) ...@@ -163,30 +171,6 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
return atomic64_add_return(i, v) < 0; return atomic64_add_return(i, v) < 0;
} }
#define atomic_sub_return(i,v) \
({ \
int __ia64_asr_i = (i); \
(__builtin_constant_p(i) \
&& ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
|| (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
|| (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
|| (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
: ia64_atomic_sub(__ia64_asr_i, v); \
})
#define atomic64_sub_return(i,v) \
({ \
long __ia64_asr_i = (i); \
(__builtin_constant_p(i) \
&& ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
|| (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
|| (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
|| (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
: ia64_atomic64_sub(__ia64_asr_i, v); \
})
#define atomic_dec_return(v) atomic_sub_return(1, (v)) #define atomic_dec_return(v) atomic_sub_return(1, (v))
#define atomic_inc_return(v) atomic_add_return(1, (v)) #define atomic_inc_return(v) atomic_add_return(1, (v))
#define atomic64_dec_return(v) atomic64_sub_return(1, (v)) #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
...@@ -199,13 +183,13 @@ atomic64_add_negative (__s64 i, atomic64_t *v) ...@@ -199,13 +183,13 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
#define atomic_add(i,v) atomic_add_return((i), (v)) #define atomic_add(i,v) (void)atomic_add_return((i), (v))
#define atomic_sub(i,v) atomic_sub_return((i), (v)) #define atomic_sub(i,v) (void)atomic_sub_return((i), (v))
#define atomic_inc(v) atomic_add(1, (v)) #define atomic_inc(v) atomic_add(1, (v))
#define atomic_dec(v) atomic_sub(1, (v)) #define atomic_dec(v) atomic_sub(1, (v))
#define atomic64_add(i,v) atomic64_add_return((i), (v)) #define atomic64_add(i,v) (void)atomic64_add_return((i), (v))
#define atomic64_sub(i,v) atomic64_sub_return((i), (v)) #define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v))
#define atomic64_inc(v) atomic64_add(1, (v)) #define atomic64_inc(v) atomic64_add(1, (v))
#define atomic64_dec(v) atomic64_sub(1, (v)) #define atomic64_dec(v) atomic64_sub(1, (v))
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
* *
* Atomically reads the value of @v. * Atomically reads the value of @v.
*/ */
#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_read(v) ACCESS_ONCE((v)->counter)
/** /**
* atomic_set - set atomic variable * atomic_set - set atomic variable
...@@ -39,85 +39,64 @@ ...@@ -39,85 +39,64 @@
*/ */
#define atomic_set(v,i) (((v)->counter) = (i)) #define atomic_set(v,i) (((v)->counter) = (i))
/**
* atomic_add_return - add integer to atomic variable and return it
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v and return (@i + @v).
*/
static __inline__ int atomic_add_return(int i, atomic_t *v)
{
unsigned long flags;
int result;
local_irq_save(flags);
__asm__ __volatile__ (
"# atomic_add_return \n\t"
DCACHE_CLEAR("%0", "r4", "%1")
M32R_LOCK" %0, @%1; \n\t"
"add %0, %2; \n\t"
M32R_UNLOCK" %0, @%1; \n\t"
: "=&r" (result)
: "r" (&v->counter), "r" (i)
: "memory"
#ifdef CONFIG_CHIP_M32700_TS1 #ifdef CONFIG_CHIP_M32700_TS1
, "r4" #define __ATOMIC_CLOBBER , "r4"
#endif /* CONFIG_CHIP_M32700_TS1 */ #else
); #define __ATOMIC_CLOBBER
local_irq_restore(flags); #endif
return result; #define ATOMIC_OP(op) \
static __inline__ void atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
int result; \
\
local_irq_save(flags); \
__asm__ __volatile__ ( \
"# atomic_" #op " \n\t" \
DCACHE_CLEAR("%0", "r4", "%1") \
M32R_LOCK" %0, @%1; \n\t" \
#op " %0, %2; \n\t" \
M32R_UNLOCK" %0, @%1; \n\t" \
: "=&r" (result) \
: "r" (&v->counter), "r" (i) \
: "memory" \
__ATOMIC_CLOBBER \
); \
local_irq_restore(flags); \
} \
#define ATOMIC_OP_RETURN(op) \
static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \
int result; \
\
local_irq_save(flags); \
__asm__ __volatile__ ( \
"# atomic_" #op "_return \n\t" \
DCACHE_CLEAR("%0", "r4", "%1") \
M32R_LOCK" %0, @%1; \n\t" \
#op " %0, %2; \n\t" \
M32R_UNLOCK" %0, @%1; \n\t" \
: "=&r" (result) \
: "r" (&v->counter), "r" (i) \
: "memory" \
__ATOMIC_CLOBBER \
); \
local_irq_restore(flags); \
\
return result; \
} }
/** #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
* atomic_sub_return - subtract integer from atomic variable and return it
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v and return (@v - @i).
*/
static __inline__ int atomic_sub_return(int i, atomic_t *v)
{
unsigned long flags;
int result;
local_irq_save(flags);
__asm__ __volatile__ (
"# atomic_sub_return \n\t"
DCACHE_CLEAR("%0", "r4", "%1")
M32R_LOCK" %0, @%1; \n\t"
"sub %0, %2; \n\t"
M32R_UNLOCK" %0, @%1; \n\t"
: "=&r" (result)
: "r" (&v->counter), "r" (i)
: "memory"
#ifdef CONFIG_CHIP_M32700_TS1
, "r4"
#endif /* CONFIG_CHIP_M32700_TS1 */
);
local_irq_restore(flags);
return result;
}
/** ATOMIC_OPS(add)
* atomic_add - add integer to atomic variable ATOMIC_OPS(sub)
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v.
*/
#define atomic_add(i,v) ((void) atomic_add_return((i), (v)))
/** #undef ATOMIC_OPS
* atomic_sub - subtract the atomic variable #undef ATOMIC_OP_RETURN
* @i: integer value to subtract #undef ATOMIC_OP
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v.
*/
#define atomic_sub(i,v) ((void) atomic_sub_return((i), (v)))
/** /**
* atomic_sub_and_test - subtract value from variable and test result * atomic_sub_and_test - subtract value from variable and test result
...@@ -151,9 +130,7 @@ static __inline__ int atomic_inc_return(atomic_t *v) ...@@ -151,9 +130,7 @@ static __inline__ int atomic_inc_return(atomic_t *v)
: "=&r" (result) : "=&r" (result)
: "r" (&v->counter) : "r" (&v->counter)
: "memory" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 __ATOMIC_CLOBBER
, "r4"
#endif /* CONFIG_CHIP_M32700_TS1 */
); );
local_irq_restore(flags); local_irq_restore(flags);
...@@ -181,9 +158,7 @@ static __inline__ int atomic_dec_return(atomic_t *v) ...@@ -181,9 +158,7 @@ static __inline__ int atomic_dec_return(atomic_t *v)
: "=&r" (result) : "=&r" (result)
: "r" (&v->counter) : "r" (&v->counter)
: "memory" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 __ATOMIC_CLOBBER
, "r4"
#endif /* CONFIG_CHIP_M32700_TS1 */
); );
local_irq_restore(flags); local_irq_restore(flags);
...@@ -280,9 +255,7 @@ static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr) ...@@ -280,9 +255,7 @@ static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr)
: "=&r" (tmp) : "=&r" (tmp)
: "r" (addr), "r" (~mask) : "r" (addr), "r" (~mask)
: "memory" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 __ATOMIC_CLOBBER
, "r5"
#endif /* CONFIG_CHIP_M32700_TS1 */
); );
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -302,9 +275,7 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr) ...@@ -302,9 +275,7 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr)
: "=&r" (tmp) : "=&r" (tmp)
: "r" (addr), "r" (mask) : "r" (addr), "r" (mask)
: "memory" : "memory"
#ifdef CONFIG_CHIP_M32700_TS1 __ATOMIC_CLOBBER
, "r5"
#endif /* CONFIG_CHIP_M32700_TS1 */
); );
local_irq_restore(flags); local_irq_restore(flags);
} }
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v, i) (((v)->counter) = i) #define atomic_set(v, i) (((v)->counter) = i)
/* /*
...@@ -30,16 +30,57 @@ ...@@ -30,16 +30,57 @@
#define ASM_DI "di" #define ASM_DI "di"
#endif #endif
static inline void atomic_add(int i, atomic_t *v) #define ATOMIC_OP(op, c_op, asm_op) \
{ static inline void atomic_##op(int i, atomic_t *v) \
__asm__ __volatile__("addl %1,%0" : "+m" (*v) : ASM_DI (i)); { \
__asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\
} \
#ifdef CONFIG_RMW_INSNS
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
int t, tmp; \
\
__asm__ __volatile__( \
"1: movel %2,%1\n" \
" " #asm_op "l %3,%1\n" \
" casl %2,%1,%0\n" \
" jne 1b" \
: "+m" (*v), "=&d" (t), "=&d" (tmp) \
: "g" (i), "2" (atomic_read(v))); \
return t; \
} }
static inline void atomic_sub(int i, atomic_t *v) #else
{
__asm__ __volatile__("subl %1,%0" : "+m" (*v) : ASM_DI (i)); #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t * v) \
{ \
unsigned long flags; \
int t; \
\
local_irq_save(flags); \
t = (v->counter c_op i); \
local_irq_restore(flags); \
\
return t; \
} }
#endif /* CONFIG_RMW_INSNS */
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
ATOMIC_OP_RETURN(op, c_op, asm_op)
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
static inline void atomic_inc(atomic_t *v) static inline void atomic_inc(atomic_t *v)
{ {
__asm__ __volatile__("addql #1,%0" : "+m" (*v)); __asm__ __volatile__("addql #1,%0" : "+m" (*v));
...@@ -76,67 +117,11 @@ static inline int atomic_inc_and_test(atomic_t *v) ...@@ -76,67 +117,11 @@ static inline int atomic_inc_and_test(atomic_t *v)
#ifdef CONFIG_RMW_INSNS #ifdef CONFIG_RMW_INSNS
static inline int atomic_add_return(int i, atomic_t *v)
{
int t, tmp;
__asm__ __volatile__(
"1: movel %2,%1\n"
" addl %3,%1\n"
" casl %2,%1,%0\n"
" jne 1b"
: "+m" (*v), "=&d" (t), "=&d" (tmp)
: "g" (i), "2" (atomic_read(v)));
return t;
}
static inline int atomic_sub_return(int i, atomic_t *v)
{
int t, tmp;
__asm__ __volatile__(
"1: movel %2,%1\n"
" subl %3,%1\n"
" casl %2,%1,%0\n"
" jne 1b"
: "+m" (*v), "=&d" (t), "=&d" (tmp)
: "g" (i), "2" (atomic_read(v)));
return t;
}
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#else /* !CONFIG_RMW_INSNS */ #else /* !CONFIG_RMW_INSNS */
static inline int atomic_add_return(int i, atomic_t * v)
{
unsigned long flags;
int t;
local_irq_save(flags);
t = atomic_read(v);
t += i;
atomic_set(v, t);
local_irq_restore(flags);
return t;
}
static inline int atomic_sub_return(int i, atomic_t * v)
{
unsigned long flags;
int t;
local_irq_save(flags);
t = atomic_read(v);
t -= i;
atomic_set(v, t);
local_irq_restore(flags);
return t;
}
static inline int atomic_cmpxchg(atomic_t *v, int old, int new) static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{ {
unsigned long flags; unsigned long flags;
......
...@@ -27,85 +27,56 @@ static inline int atomic_read(const atomic_t *v) ...@@ -27,85 +27,56 @@ static inline int atomic_read(const atomic_t *v)
return temp; return temp;
} }
static inline void atomic_add(int i, atomic_t *v) #define ATOMIC_OP(op) \
{ static inline void atomic_##op(int i, atomic_t *v) \
int temp; { \
int temp; \
asm volatile ( \
"1: LNKGETD %0, [%1]\n" asm volatile ( \
" ADD %0, %0, %2\n" "1: LNKGETD %0, [%1]\n" \
" LNKSETD [%1], %0\n" " " #op " %0, %0, %2\n" \
" DEFR %0, TXSTAT\n" " LNKSETD [%1], %0\n" \
" ANDT %0, %0, #HI(0x3f000000)\n" " DEFR %0, TXSTAT\n" \
" CMPT %0, #HI(0x02000000)\n" " ANDT %0, %0, #HI(0x3f000000)\n" \
" BNZ 1b\n" " CMPT %0, #HI(0x02000000)\n" \
: "=&d" (temp) " BNZ 1b\n" \
: "da" (&v->counter), "bd" (i) : "=&d" (temp) \
: "cc"); : "da" (&v->counter), "bd" (i) \
: "cc"); \
} \
#define ATOMIC_OP_RETURN(op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
int result, temp; \
\
smp_mb(); \
\
asm volatile ( \
"1: LNKGETD %1, [%2]\n" \
" " #op " %1, %1, %3\n" \
" LNKSETD [%2], %1\n" \
" DEFR %0, TXSTAT\n" \
" ANDT %0, %0, #HI(0x3f000000)\n" \
" CMPT %0, #HI(0x02000000)\n" \
" BNZ 1b\n" \
: "=&d" (temp), "=&da" (result) \
: "da" (&v->counter), "bd" (i) \
: "cc"); \
\
smp_mb(); \
\
return result; \
} }
static inline void atomic_sub(int i, atomic_t *v) #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
{
int temp;
asm volatile ( ATOMIC_OPS(add)
"1: LNKGETD %0, [%1]\n" ATOMIC_OPS(sub)
" SUB %0, %0, %2\n"
" LNKSETD [%1], %0\n"
" DEFR %0, TXSTAT\n"
" ANDT %0, %0, #HI(0x3f000000)\n"
" CMPT %0, #HI(0x02000000)\n"
" BNZ 1b\n"
: "=&d" (temp)
: "da" (&v->counter), "bd" (i)
: "cc");
}
static inline int atomic_add_return(int i, atomic_t *v) #undef ATOMIC_OPS
{ #undef ATOMIC_OP_RETURN
int result, temp; #undef ATOMIC_OP
smp_mb();
asm volatile (
"1: LNKGETD %1, [%2]\n"
" ADD %1, %1, %3\n"
" LNKSETD [%2], %1\n"
" DEFR %0, TXSTAT\n"
" ANDT %0, %0, #HI(0x3f000000)\n"
" CMPT %0, #HI(0x02000000)\n"
" BNZ 1b\n"
: "=&d" (temp), "=&da" (result)
: "da" (&v->counter), "bd" (i)
: "cc");
smp_mb();
return result;
}
static inline int atomic_sub_return(int i, atomic_t *v)
{
int result, temp;
smp_mb();
asm volatile (
"1: LNKGETD %1, [%2]\n"
" SUB %1, %1, %3\n"
" LNKSETD [%2], %1\n"
" DEFR %0, TXSTAT\n"
" ANDT %0, %0, #HI(0x3f000000)\n"
" CMPT %0, #HI(0x02000000)\n"
" BNZ 1b\n"
: "=&d" (temp), "=&da" (result)
: "da" (&v->counter), "bd" (i)
: "cc");
smp_mb();
return result;
}
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{ {
......
...@@ -37,55 +37,41 @@ static inline int atomic_set(atomic_t *v, int i) ...@@ -37,55 +37,41 @@ static inline int atomic_set(atomic_t *v, int i)
return i; return i;
} }
static inline void atomic_add(int i, atomic_t *v) #define ATOMIC_OP(op, c_op) \
{ static inline void atomic_##op(int i, atomic_t *v) \
unsigned long flags; { \
unsigned long flags; \
__global_lock1(flags); \
fence(); __global_lock1(flags); \
v->counter += i; fence(); \
__global_unlock1(flags); v->counter c_op i; \
__global_unlock1(flags); \
} \
#define ATOMIC_OP_RETURN(op, c_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long result; \
unsigned long flags; \
\
__global_lock1(flags); \
result = v->counter; \
result c_op i; \
fence(); \
v->counter = result; \
__global_unlock1(flags); \
\
return result; \
} }
static inline void atomic_sub(int i, atomic_t *v) #define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
{
unsigned long flags;
__global_lock1(flags); ATOMIC_OPS(add, +=)
fence(); ATOMIC_OPS(sub, -=)
v->counter -= i;
__global_unlock1(flags);
}
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long result;
unsigned long flags;
__global_lock1(flags); #undef ATOMIC_OPS
result = v->counter; #undef ATOMIC_OP_RETURN
result += i; #undef ATOMIC_OP
fence();
v->counter = result;
__global_unlock1(flags);
return result;
}
static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long result;
unsigned long flags;
__global_lock1(flags);
result = v->counter;
result -= i;
fence();
v->counter = result;
__global_unlock1(flags);
return result;
}
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{ {
......
This diff is collapsed.
...@@ -33,7 +33,6 @@ ...@@ -33,7 +33,6 @@
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
* *
* Atomically reads the value of @v. Note that the guaranteed * Atomically reads the value of @v. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
*/ */
#define atomic_read(v) (ACCESS_ONCE((v)->counter)) #define atomic_read(v) (ACCESS_ONCE((v)->counter))
...@@ -43,102 +42,62 @@ ...@@ -43,102 +42,62 @@
* @i: required value * @i: required value
* *
* Atomically sets the value of @v to @i. Note that the guaranteed * Atomically sets the value of @v to @i. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
*/ */
#define atomic_set(v, i) (((v)->counter) = (i)) #define atomic_set(v, i) (((v)->counter) = (i))
/** #define ATOMIC_OP(op) \
* atomic_add_return - add integer to atomic variable static inline void atomic_##op(int i, atomic_t *v) \
* @i: integer value to add { \
* @v: pointer of type atomic_t int retval, status; \
* \
* Atomically adds @i to @v and returns the result asm volatile( \
* Note that the guaranteed useful range of an atomic_t is only 24 bits. "1: mov %4,(_AAR,%3) \n" \
*/ " mov (_ADR,%3),%1 \n" \
static inline int atomic_add_return(int i, atomic_t *v) " " #op " %5,%1 \n" \
{ " mov %1,(_ADR,%3) \n" \
int retval; " mov (_ADR,%3),%0 \n" /* flush */ \
#ifdef CONFIG_SMP " mov (_ASR,%3),%0 \n" \
int status; " or %0,%0 \n" \
" bne 1b \n" \
asm volatile( : "=&r"(status), "=&r"(retval), "=m"(v->counter) \
"1: mov %4,(_AAR,%3) \n" : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i) \
" mov (_ADR,%3),%1 \n" : "memory", "cc"); \
" add %5,%1 \n" }
" mov %1,(_ADR,%3) \n"
" mov (_ADR,%3),%0 \n" /* flush */
" mov (_ASR,%3),%0 \n"
" or %0,%0 \n"
" bne 1b \n"
: "=&r"(status), "=&r"(retval), "=m"(v->counter)
: "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
: "memory", "cc");
#else
unsigned long flags;
flags = arch_local_cli_save(); #define ATOMIC_OP_RETURN(op) \
retval = v->counter; static inline int atomic_##op##_return(int i, atomic_t *v) \
retval += i; { \
v->counter = retval; int retval, status; \
arch_local_irq_restore(flags); \
#endif asm volatile( \
return retval; "1: mov %4,(_AAR,%3) \n" \
" mov (_ADR,%3),%1 \n" \
" " #op " %5,%1 \n" \
" mov %1,(_ADR,%3) \n" \
" mov (_ADR,%3),%0 \n" /* flush */ \
" mov (_ASR,%3),%0 \n" \
" or %0,%0 \n" \
" bne 1b \n" \
: "=&r"(status), "=&r"(retval), "=m"(v->counter) \
: "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i) \
: "memory", "cc"); \
return retval; \
} }
/** #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
* atomic_sub_return - subtract integer from atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v and returns the result
* Note that the guaranteed useful range of an atomic_t is only 24 bits.
*/
static inline int atomic_sub_return(int i, atomic_t *v)
{
int retval;
#ifdef CONFIG_SMP
int status;
asm volatile( ATOMIC_OPS(add)
"1: mov %4,(_AAR,%3) \n" ATOMIC_OPS(sub)
" mov (_ADR,%3),%1 \n"
" sub %5,%1 \n"
" mov %1,(_ADR,%3) \n"
" mov (_ADR,%3),%0 \n" /* flush */
" mov (_ASR,%3),%0 \n"
" or %0,%0 \n"
" bne 1b \n"
: "=&r"(status), "=&r"(retval), "=m"(v->counter)
: "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
: "memory", "cc");
#else #undef ATOMIC_OPS
unsigned long flags; #undef ATOMIC_OP_RETURN
flags = arch_local_cli_save(); #undef ATOMIC_OP
retval = v->counter;
retval -= i;
v->counter = retval;
arch_local_irq_restore(flags);
#endif
return retval;
}
static inline int atomic_add_negative(int i, atomic_t *v) static inline int atomic_add_negative(int i, atomic_t *v)
{ {
return atomic_add_return(i, v) < 0; return atomic_add_return(i, v) < 0;
} }
static inline void atomic_add(int i, atomic_t *v)
{
atomic_add_return(i, v);
}
static inline void atomic_sub(int i, atomic_t *v)
{
atomic_sub_return(i, v);
}
static inline void atomic_inc(atomic_t *v) static inline void atomic_inc(atomic_t *v)
{ {
atomic_add_return(1, v); atomic_add_return(1, v);
......
...@@ -55,24 +55,7 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; ...@@ -55,24 +55,7 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
* are atomic, so a reader never sees inconsistent values. * are atomic, so a reader never sees inconsistent values.
*/ */
/* It's possible to reduce all atomic operations to either static __inline__ void atomic_set(atomic_t *v, int i)
* __atomic_add_return, atomic_set and atomic_read (the latter
* is there only for consistency).
*/
static __inline__ int __atomic_add_return(int i, atomic_t *v)
{
int ret;
unsigned long flags;
_atomic_spin_lock_irqsave(v, flags);
ret = (v->counter += i);
_atomic_spin_unlock_irqrestore(v, flags);
return ret;
}
static __inline__ void atomic_set(atomic_t *v, int i)
{ {
unsigned long flags; unsigned long flags;
_atomic_spin_lock_irqsave(v, flags); _atomic_spin_lock_irqsave(v, flags);
...@@ -84,7 +67,7 @@ static __inline__ void atomic_set(atomic_t *v, int i) ...@@ -84,7 +67,7 @@ static __inline__ void atomic_set(atomic_t *v, int i)
static __inline__ int atomic_read(const atomic_t *v) static __inline__ int atomic_read(const atomic_t *v)
{ {
return (*(volatile int *)&(v)->counter); return ACCESS_ONCE((v)->counter);
} }
/* exported interface */ /* exported interface */
...@@ -115,16 +98,43 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -115,16 +98,43 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
return c; return c;
} }
#define ATOMIC_OP(op, c_op) \
static __inline__ void atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
_atomic_spin_lock_irqsave(v, flags); \
v->counter c_op i; \
_atomic_spin_unlock_irqrestore(v, flags); \
} \
#define ATOMIC_OP_RETURN(op, c_op) \
static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \
int ret; \
\
_atomic_spin_lock_irqsave(v, flags); \
ret = (v->counter c_op i); \
_atomic_spin_unlock_irqrestore(v, flags); \
\
return ret; \
}
#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
ATOMIC_OPS(add, +=)
ATOMIC_OPS(sub, -=)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
#define atomic_add(i,v) ((void)(__atomic_add_return( (i),(v)))) #define atomic_inc(v) (atomic_add( 1,(v)))
#define atomic_sub(i,v) ((void)(__atomic_add_return(-((int) (i)),(v)))) #define atomic_dec(v) (atomic_add( -1,(v)))
#define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))
#define atomic_dec(v) ((void)(__atomic_add_return( -1,(v))))
#define atomic_add_return(i,v) (__atomic_add_return( (i),(v))) #define atomic_inc_return(v) (atomic_add_return( 1,(v)))
#define atomic_sub_return(i,v) (__atomic_add_return(-(i),(v))) #define atomic_dec_return(v) (atomic_add_return( -1,(v)))
#define atomic_inc_return(v) (__atomic_add_return( 1,(v)))
#define atomic_dec_return(v) (__atomic_add_return( -1,(v)))
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
...@@ -148,18 +158,37 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -148,18 +158,37 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
#define ATOMIC64_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) }
static __inline__ s64 #define ATOMIC64_OP(op, c_op) \
__atomic64_add_return(s64 i, atomic64_t *v) static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \
{ { \
s64 ret; unsigned long flags; \
unsigned long flags; \
_atomic_spin_lock_irqsave(v, flags); _atomic_spin_lock_irqsave(v, flags); \
v->counter c_op i; \
_atomic_spin_unlock_irqrestore(v, flags); \
} \
#define ATOMIC64_OP_RETURN(op, c_op) \
static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
{ \
unsigned long flags; \
s64 ret; \
\
_atomic_spin_lock_irqsave(v, flags); \
ret = (v->counter c_op i); \
_atomic_spin_unlock_irqrestore(v, flags); \
\
return ret; \
}
ret = (v->counter += i); #define ATOMIC64_OPS(op, c_op) ATOMIC64_OP(op, c_op) ATOMIC64_OP_RETURN(op, c_op)
_atomic_spin_unlock_irqrestore(v, flags); ATOMIC64_OPS(add, +=)
return ret; ATOMIC64_OPS(sub, -=)
}
#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
static __inline__ void static __inline__ void
atomic64_set(atomic64_t *v, s64 i) atomic64_set(atomic64_t *v, s64 i)
...@@ -175,18 +204,14 @@ atomic64_set(atomic64_t *v, s64 i) ...@@ -175,18 +204,14 @@ atomic64_set(atomic64_t *v, s64 i)
static __inline__ s64 static __inline__ s64
atomic64_read(const atomic64_t *v) atomic64_read(const atomic64_t *v)
{ {
return (*(volatile long *)&(v)->counter); return ACCESS_ONCE((v)->counter);
} }
#define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)(i)),(v)))) #define atomic64_inc(v) (atomic64_add( 1,(v)))
#define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)(i)),(v)))) #define atomic64_dec(v) (atomic64_add( -1,(v)))
#define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v))))
#define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v))))
#define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)(i)),(v))) #define atomic64_inc_return(v) (atomic64_add_return( 1,(v)))
#define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)(i)),(v))) #define atomic64_dec_return(v) (atomic64_add_return( -1,(v)))
#define atomic64_inc_return(v) (__atomic64_add_return( 1,(v)))
#define atomic64_dec_return(v) (__atomic64_add_return( -1,(v)))
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
......
...@@ -26,76 +26,53 @@ static __inline__ void atomic_set(atomic_t *v, int i) ...@@ -26,76 +26,53 @@ static __inline__ void atomic_set(atomic_t *v, int i)
__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
} }
static __inline__ void atomic_add(int a, atomic_t *v) #define ATOMIC_OP(op, asm_op) \
{ static __inline__ void atomic_##op(int a, atomic_t *v) \
int t; { \
int t; \
__asm__ __volatile__( \
"1: lwarx %0,0,%3 # atomic_add\n\ __asm__ __volatile__( \
add %0,%2,%0\n" "1: lwarx %0,0,%3 # atomic_" #op "\n" \
PPC405_ERR77(0,%3) #asm_op " %0,%2,%0\n" \
" stwcx. %0,0,%3 \n\ PPC405_ERR77(0,%3) \
bne- 1b" " stwcx. %0,0,%3 \n" \
: "=&r" (t), "+m" (v->counter) " bne- 1b\n" \
: "r" (a), "r" (&v->counter) : "=&r" (t), "+m" (v->counter) \
: "cc"); : "r" (a), "r" (&v->counter) \
: "cc"); \
} \
#define ATOMIC_OP_RETURN(op, asm_op) \
static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
{ \
int t; \
\
__asm__ __volatile__( \
PPC_ATOMIC_ENTRY_BARRIER \
"1: lwarx %0,0,%2 # atomic_" #op "_return\n" \
#asm_op " %0,%1,%0\n" \
PPC405_ERR77(0,%2) \
" stwcx. %0,0,%2 \n" \
" bne- 1b\n" \
PPC_ATOMIC_EXIT_BARRIER \
: "=&r" (t) \
: "r" (a), "r" (&v->counter) \
: "cc", "memory"); \
\
return t; \
} }
static __inline__ int atomic_add_return(int a, atomic_t *v) #define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op)
{
int t;
__asm__ __volatile__( ATOMIC_OPS(add, add)
PPC_ATOMIC_ENTRY_BARRIER ATOMIC_OPS(sub, subf)
"1: lwarx %0,0,%2 # atomic_add_return\n\
add %0,%1,%0\n"
PPC405_ERR77(0,%2)
" stwcx. %0,0,%2 \n\
bne- 1b"
PPC_ATOMIC_EXIT_BARRIER
: "=&r" (t)
: "r" (a), "r" (&v->counter)
: "cc", "memory");
return t; #undef ATOMIC_OPS
} #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
static __inline__ void atomic_sub(int a, atomic_t *v)
{
int t;
__asm__ __volatile__(
"1: lwarx %0,0,%3 # atomic_sub\n\
subf %0,%2,%0\n"
PPC405_ERR77(0,%3)
" stwcx. %0,0,%3 \n\
bne- 1b"
: "=&r" (t), "+m" (v->counter)
: "r" (a), "r" (&v->counter)
: "cc");
}
static __inline__ int atomic_sub_return(int a, atomic_t *v)
{
int t;
__asm__ __volatile__(
PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %0,0,%2 # atomic_sub_return\n\
subf %0,%1,%0\n"
PPC405_ERR77(0,%2)
" stwcx. %0,0,%2 \n\
bne- 1b"
PPC_ATOMIC_EXIT_BARRIER
: "=&r" (t)
: "r" (a), "r" (&v->counter)
: "cc", "memory");
return t;
}
static __inline__ void atomic_inc(atomic_t *v) static __inline__ void atomic_inc(atomic_t *v)
{ {
int t; int t;
...@@ -289,71 +266,50 @@ static __inline__ void atomic64_set(atomic64_t *v, long i) ...@@ -289,71 +266,50 @@ static __inline__ void atomic64_set(atomic64_t *v, long i)
__asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i)); __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
} }
static __inline__ void atomic64_add(long a, atomic64_t *v) #define ATOMIC64_OP(op, asm_op) \
{ static __inline__ void atomic64_##op(long a, atomic64_t *v) \
long t; { \
long t; \
__asm__ __volatile__( \
"1: ldarx %0,0,%3 # atomic64_add\n\ __asm__ __volatile__( \
add %0,%2,%0\n\ "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
stdcx. %0,0,%3 \n\ #asm_op " %0,%2,%0\n" \
bne- 1b" " stdcx. %0,0,%3 \n" \
: "=&r" (t), "+m" (v->counter) " bne- 1b\n" \
: "r" (a), "r" (&v->counter) : "=&r" (t), "+m" (v->counter) \
: "cc"); : "r" (a), "r" (&v->counter) \
: "cc"); \
} }
static __inline__ long atomic64_add_return(long a, atomic64_t *v) #define ATOMIC64_OP_RETURN(op, asm_op) \
{ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
long t; { \
long t; \
__asm__ __volatile__( \
PPC_ATOMIC_ENTRY_BARRIER __asm__ __volatile__( \
"1: ldarx %0,0,%2 # atomic64_add_return\n\ PPC_ATOMIC_ENTRY_BARRIER \
add %0,%1,%0\n\ "1: ldarx %0,0,%2 # atomic64_" #op "_return\n" \
stdcx. %0,0,%2 \n\ #asm_op " %0,%1,%0\n" \
bne- 1b" " stdcx. %0,0,%2 \n" \
PPC_ATOMIC_EXIT_BARRIER " bne- 1b\n" \
: "=&r" (t) PPC_ATOMIC_EXIT_BARRIER \
: "r" (a), "r" (&v->counter) : "=&r" (t) \
: "cc", "memory"); : "r" (a), "r" (&v->counter) \
: "cc", "memory"); \
return t; \
return t; \
} }
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) #define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op)
static __inline__ void atomic64_sub(long a, atomic64_t *v)
{
long t;
__asm__ __volatile__(
"1: ldarx %0,0,%3 # atomic64_sub\n\
subf %0,%2,%0\n\
stdcx. %0,0,%3 \n\
bne- 1b"
: "=&r" (t), "+m" (v->counter)
: "r" (a), "r" (&v->counter)
: "cc");
}
static __inline__ long atomic64_sub_return(long a, atomic64_t *v) ATOMIC64_OPS(add, add)
{ ATOMIC64_OPS(sub, subf)
long t;
__asm__ __volatile__( #undef ATOMIC64_OPS
PPC_ATOMIC_ENTRY_BARRIER #undef ATOMIC64_OP_RETURN
"1: ldarx %0,0,%2 # atomic64_sub_return\n\ #undef ATOMIC64_OP
subf %0,%1,%0\n\
stdcx. %0,0,%2 \n\
bne- 1b"
PPC_ATOMIC_EXIT_BARRIER
: "=&r" (t)
: "r" (a), "r" (&v->counter)
: "cc", "memory");
return t; #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
}
static __inline__ void atomic64_inc(atomic64_t *v) static __inline__ void atomic64_inc(atomic64_t *v)
{ {
......
#ifndef __ASM_SH_ATOMIC_GRB_H #ifndef __ASM_SH_ATOMIC_GRB_H
#define __ASM_SH_ATOMIC_GRB_H #define __ASM_SH_ATOMIC_GRB_H
static inline void atomic_add(int i, atomic_t *v) #define ATOMIC_OP(op) \
{ static inline void atomic_##op(int i, atomic_t *v) \
int tmp; { \
int tmp; \
__asm__ __volatile__ ( \
" .align 2 \n\t" __asm__ __volatile__ ( \
" mova 1f, r0 \n\t" /* r0 = end point */ " .align 2 \n\t" \
" mov r15, r1 \n\t" /* r1 = saved sp */ " mova 1f, r0 \n\t" /* r0 = end point */ \
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */ " mov r15, r1 \n\t" /* r1 = saved sp */ \
" mov.l @%1, %0 \n\t" /* load old value */ " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ \
" add %2, %0 \n\t" /* add */ " mov.l @%1, %0 \n\t" /* load old value */ \
" mov.l %0, @%1 \n\t" /* store new value */ " " #op " %2, %0 \n\t" /* $op */ \
"1: mov r1, r15 \n\t" /* LOGOUT */ " mov.l %0, @%1 \n\t" /* store new value */ \
: "=&r" (tmp), "1: mov r1, r15 \n\t" /* LOGOUT */ \
"+r" (v) : "=&r" (tmp), \
: "r" (i) "+r" (v) \
: "memory" , "r0", "r1"); : "r" (i) \
} : "memory" , "r0", "r1"); \
} \
static inline void atomic_sub(int i, atomic_t *v)
{
int tmp;
__asm__ __volatile__ (
" .align 2 \n\t"
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */
" mov.l @%1, %0 \n\t" /* load old value */
" sub %2, %0 \n\t" /* sub */
" mov.l %0, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (tmp),
"+r" (v)
: "r" (i)
: "memory" , "r0", "r1");
}
static inline int atomic_add_return(int i, atomic_t *v)
{
int tmp;
__asm__ __volatile__ ( #define ATOMIC_OP_RETURN(op) \
" .align 2 \n\t" static inline int atomic_##op##_return(int i, atomic_t *v) \
" mova 1f, r0 \n\t" /* r0 = end point */ { \
" mov r15, r1 \n\t" /* r1 = saved sp */ int tmp; \
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */ \
" mov.l @%1, %0 \n\t" /* load old value */ __asm__ __volatile__ ( \
" add %2, %0 \n\t" /* add */ " .align 2 \n\t" \
" mov.l %0, @%1 \n\t" /* store new value */ " mova 1f, r0 \n\t" /* r0 = end point */ \
"1: mov r1, r15 \n\t" /* LOGOUT */ " mov r15, r1 \n\t" /* r1 = saved sp */ \
: "=&r" (tmp), " mov #-6, r15 \n\t" /* LOGIN: r15 = size */ \
"+r" (v) " mov.l @%1, %0 \n\t" /* load old value */ \
: "r" (i) " " #op " %2, %0 \n\t" /* $op */ \
: "memory" , "r0", "r1"); " mov.l %0, @%1 \n\t" /* store new value */ \
"1: mov r1, r15 \n\t" /* LOGOUT */ \
return tmp; : "=&r" (tmp), \
"+r" (v) \
: "r" (i) \
: "memory" , "r0", "r1"); \
\
return tmp; \
} }
static inline int atomic_sub_return(int i, atomic_t *v) #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
{
int tmp;
__asm__ __volatile__ ( ATOMIC_OPS(add)
" .align 2 \n\t" ATOMIC_OPS(sub)
" mova 1f, r0 \n\t" /* r0 = end point */
" mov r15, r1 \n\t" /* r1 = saved sp */
" mov #-6, r15 \n\t" /* LOGIN: r15 = size */
" mov.l @%1, %0 \n\t" /* load old value */
" sub %2, %0 \n\t" /* sub */
" mov.l %0, @%1 \n\t" /* store new value */
"1: mov r1, r15 \n\t" /* LOGOUT */
: "=&r" (tmp),
"+r" (v)
: "r" (i)
: "memory", "r0", "r1");
return tmp; #undef ATOMIC_OPS
} #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{ {
......
...@@ -8,49 +8,39 @@ ...@@ -8,49 +8,39 @@
* forward to code at the end of this object's .text section, then * forward to code at the end of this object's .text section, then
* branch back to restart the operation. * branch back to restart the operation.
*/ */
static inline void atomic_add(int i, atomic_t *v)
{
unsigned long flags;
raw_local_irq_save(flags);
v->counter += i;
raw_local_irq_restore(flags);
}
static inline void atomic_sub(int i, atomic_t *v) #define ATOMIC_OP(op, c_op) \
{ static inline void atomic_##op(int i, atomic_t *v) \
unsigned long flags; { \
unsigned long flags; \
raw_local_irq_save(flags); \
v->counter -= i; raw_local_irq_save(flags); \
raw_local_irq_restore(flags); v->counter c_op i; \
raw_local_irq_restore(flags); \
} }
static inline int atomic_add_return(int i, atomic_t *v) #define ATOMIC_OP_RETURN(op, c_op) \
{ static inline int atomic_##op##_return(int i, atomic_t *v) \
unsigned long temp, flags; { \
unsigned long temp, flags; \
raw_local_irq_save(flags); \
temp = v->counter; raw_local_irq_save(flags); \
temp += i; temp = v->counter; \
v->counter = temp; temp c_op i; \
raw_local_irq_restore(flags); v->counter = temp; \
raw_local_irq_restore(flags); \
return temp; \
return temp; \
} }
static inline int atomic_sub_return(int i, atomic_t *v) #define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
{
unsigned long temp, flags;
raw_local_irq_save(flags); ATOMIC_OPS(add, +=)
temp = v->counter; ATOMIC_OPS(sub, -=)
temp -= i;
v->counter = temp;
raw_local_irq_restore(flags);
return temp; #undef ATOMIC_OPS
} #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{ {
......
#ifndef __ASM_SH_ATOMIC_LLSC_H #ifndef __ASM_SH_ATOMIC_LLSC_H
#define __ASM_SH_ATOMIC_LLSC_H #define __ASM_SH_ATOMIC_LLSC_H
/*
* To get proper branch prediction for the main line, we must branch
* forward to code at the end of this object's .text section, then
* branch back to restart the operation.
*/
static inline void atomic_add(int i, atomic_t *v)
{
unsigned long tmp;
__asm__ __volatile__ (
"1: movli.l @%2, %0 ! atomic_add \n"
" add %1, %0 \n"
" movco.l %0, @%2 \n"
" bf 1b \n"
: "=&z" (tmp)
: "r" (i), "r" (&v->counter)
: "t");
}
static inline void atomic_sub(int i, atomic_t *v)
{
unsigned long tmp;
__asm__ __volatile__ (
"1: movli.l @%2, %0 ! atomic_sub \n"
" sub %1, %0 \n"
" movco.l %0, @%2 \n"
" bf 1b \n"
: "=&z" (tmp)
: "r" (i), "r" (&v->counter)
: "t");
}
/* /*
* SH-4A note: * SH-4A note:
* *
...@@ -42,39 +9,53 @@ static inline void atomic_sub(int i, atomic_t *v) ...@@ -42,39 +9,53 @@ static inline void atomic_sub(int i, atomic_t *v)
* encoding, so the retval is automatically set without having to * encoding, so the retval is automatically set without having to
* do any special work. * do any special work.
*/ */
static inline int atomic_add_return(int i, atomic_t *v) /*
{ * To get proper branch prediction for the main line, we must branch
unsigned long temp; * forward to code at the end of this object's .text section, then
* branch back to restart the operation.
*/
__asm__ __volatile__ ( #define ATOMIC_OP(op) \
"1: movli.l @%2, %0 ! atomic_add_return \n" static inline void atomic_##op(int i, atomic_t *v) \
" add %1, %0 \n" { \
" movco.l %0, @%2 \n" unsigned long tmp; \
" bf 1b \n" \
" synco \n" __asm__ __volatile__ ( \
: "=&z" (temp) "1: movli.l @%2, %0 ! atomic_" #op "\n" \
: "r" (i), "r" (&v->counter) " " #op " %1, %0 \n" \
: "t"); " movco.l %0, @%2 \n" \
" bf 1b \n" \
: "=&z" (tmp) \
: "r" (i), "r" (&v->counter) \
: "t"); \
}
return temp; #define ATOMIC_OP_RETURN(op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long temp; \
\
__asm__ __volatile__ ( \
"1: movli.l @%2, %0 ! atomic_" #op "_return \n" \
" " #op " %1, %0 \n" \
" movco.l %0, @%2 \n" \
" bf 1b \n" \
" synco \n" \
: "=&z" (temp) \
: "r" (i), "r" (&v->counter) \
: "t"); \
\
return temp; \
} }
static inline int atomic_sub_return(int i, atomic_t *v) #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
{
unsigned long temp;
__asm__ __volatile__ ( ATOMIC_OPS(add)
"1: movli.l @%2, %0 ! atomic_sub_return \n" ATOMIC_OPS(sub)
" sub %1, %0 \n"
" movco.l %0, @%2 \n"
" bf 1b \n"
" synco \n"
: "=&z" (temp)
: "r" (i), "r" (&v->counter)
: "t");
return temp; #undef ATOMIC_OPS
} #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{ {
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v,i) ((v)->counter = (i)) #define atomic_set(v,i) ((v)->counter = (i))
#if defined(CONFIG_GUSA_RB) #if defined(CONFIG_GUSA_RB)
......
...@@ -20,23 +20,22 @@ ...@@ -20,23 +20,22 @@
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
int __atomic_add_return(int, atomic_t *); int atomic_add_return(int, atomic_t *);
int atomic_cmpxchg(atomic_t *, int, int); int atomic_cmpxchg(atomic_t *, int, int);
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
int __atomic_add_unless(atomic_t *, int, int); int __atomic_add_unless(atomic_t *, int, int);
void atomic_set(atomic_t *, int); void atomic_set(atomic_t *, int);
#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v))) #define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v)))
#define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v))) #define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v)))
#define atomic_inc(v) ((void)__atomic_add_return( 1, (v))) #define atomic_inc(v) ((void)atomic_add_return( 1, (v)))
#define atomic_dec(v) ((void)__atomic_add_return( -1, (v))) #define atomic_dec(v) ((void)atomic_add_return( -1, (v)))
#define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v))) #define atomic_sub_return(i, v) (atomic_add_return(-(int)(i), (v)))
#define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v))) #define atomic_inc_return(v) (atomic_add_return( 1, (v)))
#define atomic_inc_return(v) (__atomic_add_return( 1, (v))) #define atomic_dec_return(v) (atomic_add_return( -1, (v)))
#define atomic_dec_return(v) (__atomic_add_return( -1, (v)))
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
......
...@@ -14,33 +14,34 @@ ...@@ -14,33 +14,34 @@
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic64_read(v) (*(volatile long *)&(v)->counter) #define atomic64_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v, i) (((v)->counter) = i) #define atomic_set(v, i) (((v)->counter) = i)
#define atomic64_set(v, i) (((v)->counter) = i) #define atomic64_set(v, i) (((v)->counter) = i)
void atomic_add(int, atomic_t *); #define ATOMIC_OP(op) \
void atomic64_add(long, atomic64_t *); void atomic_##op(int, atomic_t *); \
void atomic_sub(int, atomic_t *); void atomic64_##op(long, atomic64_t *);
void atomic64_sub(long, atomic64_t *);
int atomic_add_ret(int, atomic_t *); #define ATOMIC_OP_RETURN(op) \
long atomic64_add_ret(long, atomic64_t *); int atomic_##op##_return(int, atomic_t *); \
int atomic_sub_ret(int, atomic_t *); long atomic64_##op##_return(long, atomic64_t *);
long atomic64_sub_ret(long, atomic64_t *);
#define atomic_dec_return(v) atomic_sub_ret(1, v) #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
#define atomic64_dec_return(v) atomic64_sub_ret(1, v)
#define atomic_inc_return(v) atomic_add_ret(1, v) ATOMIC_OPS(add)
#define atomic64_inc_return(v) atomic64_add_ret(1, v) ATOMIC_OPS(sub)
#define atomic_sub_return(i, v) atomic_sub_ret(i, v) #undef ATOMIC_OPS
#define atomic64_sub_return(i, v) atomic64_sub_ret(i, v) #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
#define atomic_add_return(i, v) atomic_add_ret(i, v) #define atomic_dec_return(v) atomic_sub_return(1, v)
#define atomic64_add_return(i, v) atomic64_add_ret(i, v) #define atomic64_dec_return(v) atomic64_sub_return(1, v)
#define atomic_inc_return(v) atomic_add_return(1, v)
#define atomic64_inc_return(v) atomic64_add_return(1, v)
/* /*
* atomic_inc_and_test - increment and test * atomic_inc_and_test - increment and test
...@@ -53,11 +54,11 @@ long atomic64_sub_ret(long, atomic64_t *); ...@@ -53,11 +54,11 @@ long atomic64_sub_ret(long, atomic64_t *);
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
#define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0) #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
#define atomic64_sub_and_test(i, v) (atomic64_sub_ret(i, v) == 0) #define atomic64_sub_and_test(i, v) (atomic64_sub_return(i, v) == 0)
#define atomic_dec_and_test(v) (atomic_sub_ret(1, v) == 0) #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
#define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0) #define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
#define atomic_inc(v) atomic_add(1, v) #define atomic_inc(v) atomic_add(1, v)
#define atomic64_inc(v) atomic64_add(1, v) #define atomic64_inc(v) atomic64_add(1, v)
...@@ -65,8 +66,8 @@ long atomic64_sub_ret(long, atomic64_t *); ...@@ -65,8 +66,8 @@ long atomic64_sub_ret(long, atomic64_t *);
#define atomic_dec(v) atomic_sub(1, v) #define atomic_dec(v) atomic_sub(1, v)
#define atomic64_dec(v) atomic64_sub(1, v) #define atomic64_dec(v) atomic64_sub(1, v)
#define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0) #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
#define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0) #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
......
...@@ -1138,7 +1138,7 @@ static unsigned long penguins_are_doing_time; ...@@ -1138,7 +1138,7 @@ static unsigned long penguins_are_doing_time;
void smp_capture(void) void smp_capture(void)
{ {
int result = atomic_add_ret(1, &smp_capture_depth); int result = atomic_add_return(1, &smp_capture_depth);
if (result == 1) { if (result == 1) {
int ncpus = num_online_cpus(); int ncpus = num_online_cpus();
......
...@@ -27,18 +27,23 @@ static DEFINE_SPINLOCK(dummy); ...@@ -27,18 +27,23 @@ static DEFINE_SPINLOCK(dummy);
#endif /* SMP */ #endif /* SMP */
int __atomic_add_return(int i, atomic_t *v) #define ATOMIC_OP(op, cop) \
{ int atomic_##op##_return(int i, atomic_t *v) \
int ret; { \
unsigned long flags; int ret; \
spin_lock_irqsave(ATOMIC_HASH(v), flags); unsigned long flags; \
spin_lock_irqsave(ATOMIC_HASH(v), flags); \
ret = (v->counter += i); \
ret = (v->counter cop i); \
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
return ret; spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
} return ret; \
EXPORT_SYMBOL(__atomic_add_return); } \
EXPORT_SYMBOL(atomic_##op##_return);
ATOMIC_OP(add, +=)
#undef ATOMIC_OP
int atomic_cmpxchg(atomic_t *v, int old, int new) int atomic_cmpxchg(atomic_t *v, int old, int new)
{ {
......
...@@ -14,109 +14,80 @@ ...@@ -14,109 +14,80 @@
* memory barriers, and a second which returns * memory barriers, and a second which returns
* a value and does the barriers. * a value and does the barriers.
*/ */
ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: lduw [%o1], %g1
add %g1, %o0, %g7
cas [%o1], %g1, %g7
cmp %g1, %g7
bne,pn %icc, BACKOFF_LABEL(2f, 1b)
nop
retl
nop
2: BACKOFF_SPIN(%o2, %o3, 1b)
ENDPROC(atomic_add)
ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */ #define ATOMIC_OP(op) \
BACKOFF_SETUP(%o2) ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
1: lduw [%o1], %g1 BACKOFF_SETUP(%o2); \
sub %g1, %o0, %g7 1: lduw [%o1], %g1; \
cas [%o1], %g1, %g7 op %g1, %o0, %g7; \
cmp %g1, %g7 cas [%o1], %g1, %g7; \
bne,pn %icc, BACKOFF_LABEL(2f, 1b) cmp %g1, %g7; \
nop bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
retl nop; \
nop retl; \
2: BACKOFF_SPIN(%o2, %o3, 1b) nop; \
ENDPROC(atomic_sub) 2: BACKOFF_SPIN(%o2, %o3, 1b); \
ENDPROC(atomic_##op); \
ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */ #define ATOMIC_OP_RETURN(op) \
BACKOFF_SETUP(%o2) ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
1: lduw [%o1], %g1 BACKOFF_SETUP(%o2); \
add %g1, %o0, %g7 1: lduw [%o1], %g1; \
cas [%o1], %g1, %g7 op %g1, %o0, %g7; \
cmp %g1, %g7 cas [%o1], %g1, %g7; \
bne,pn %icc, BACKOFF_LABEL(2f, 1b) cmp %g1, %g7; \
add %g1, %o0, %g1 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
retl op %g1, %o0, %g1; \
sra %g1, 0, %o0 retl; \
2: BACKOFF_SPIN(%o2, %o3, 1b) sra %g1, 0, %o0; \
ENDPROC(atomic_add_ret) 2: BACKOFF_SPIN(%o2, %o3, 1b); \
ENDPROC(atomic_##op##_return);
ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */ #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
BACKOFF_SETUP(%o2)
1: lduw [%o1], %g1
sub %g1, %o0, %g7
cas [%o1], %g1, %g7
cmp %g1, %g7
bne,pn %icc, BACKOFF_LABEL(2f, 1b)
sub %g1, %o0, %g1
retl
sra %g1, 0, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b)
ENDPROC(atomic_sub_ret)
ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */ ATOMIC_OPS(add)
BACKOFF_SETUP(%o2) ATOMIC_OPS(sub)
1: ldx [%o1], %g1
add %g1, %o0, %g7
casx [%o1], %g1, %g7
cmp %g1, %g7
bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
nop
retl
nop
2: BACKOFF_SPIN(%o2, %o3, 1b)
ENDPROC(atomic64_add)
ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */ #undef ATOMIC_OPS
BACKOFF_SETUP(%o2) #undef ATOMIC_OP_RETURN
1: ldx [%o1], %g1 #undef ATOMIC_OP
sub %g1, %o0, %g7
casx [%o1], %g1, %g7
cmp %g1, %g7
bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
nop
retl
nop
2: BACKOFF_SPIN(%o2, %o3, 1b)
ENDPROC(atomic64_sub)
ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */ #define ATOMIC64_OP(op) \
BACKOFF_SETUP(%o2) ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
1: ldx [%o1], %g1 BACKOFF_SETUP(%o2); \
add %g1, %o0, %g7 1: ldx [%o1], %g1; \
casx [%o1], %g1, %g7 op %g1, %o0, %g7; \
cmp %g1, %g7 casx [%o1], %g1, %g7; \
bne,pn %xcc, BACKOFF_LABEL(2f, 1b) cmp %g1, %g7; \
nop bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
retl nop; \
add %g1, %o0, %o0 retl; \
2: BACKOFF_SPIN(%o2, %o3, 1b) nop; \
ENDPROC(atomic64_add_ret) 2: BACKOFF_SPIN(%o2, %o3, 1b); \
ENDPROC(atomic64_##op); \
ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */ #define ATOMIC64_OP_RETURN(op) \
BACKOFF_SETUP(%o2) ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
1: ldx [%o1], %g1 BACKOFF_SETUP(%o2); \
sub %g1, %o0, %g7 1: ldx [%o1], %g1; \
casx [%o1], %g1, %g7 op %g1, %o0, %g7; \
cmp %g1, %g7 casx [%o1], %g1, %g7; \
bne,pn %xcc, BACKOFF_LABEL(2f, 1b) cmp %g1, %g7; \
nop bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
retl nop; \
sub %g1, %o0, %o0 retl; \
2: BACKOFF_SPIN(%o2, %o3, 1b) op %g1, %o0, %o0; \
ENDPROC(atomic64_sub_ret) 2: BACKOFF_SPIN(%o2, %o3, 1b); \
ENDPROC(atomic64_##op##_return);
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
ATOMIC64_OPS(add)
ATOMIC64_OPS(sub)
#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */ ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
BACKOFF_SETUP(%o2) BACKOFF_SETUP(%o2)
......
...@@ -99,14 +99,23 @@ EXPORT_SYMBOL(___copy_in_user); ...@@ -99,14 +99,23 @@ EXPORT_SYMBOL(___copy_in_user);
EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(__clear_user);
/* Atomic counter implementation. */ /* Atomic counter implementation. */
EXPORT_SYMBOL(atomic_add); #define ATOMIC_OP(op) \
EXPORT_SYMBOL(atomic_add_ret); EXPORT_SYMBOL(atomic_##op); \
EXPORT_SYMBOL(atomic_sub); EXPORT_SYMBOL(atomic64_##op);
EXPORT_SYMBOL(atomic_sub_ret);
EXPORT_SYMBOL(atomic64_add); #define ATOMIC_OP_RETURN(op) \
EXPORT_SYMBOL(atomic64_add_ret); EXPORT_SYMBOL(atomic_##op##_return); \
EXPORT_SYMBOL(atomic64_sub); EXPORT_SYMBOL(atomic64_##op##_return);
EXPORT_SYMBOL(atomic64_sub_ret);
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
EXPORT_SYMBOL(atomic64_dec_if_positive); EXPORT_SYMBOL(atomic64_dec_if_positive);
/* Atomic bit operations. */ /* Atomic bit operations. */
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
*/ */
static inline int atomic_read(const atomic_t *v) static inline int atomic_read(const atomic_t *v)
{ {
return (*(volatile int *)&(v)->counter); return ACCESS_ONCE((v)->counter);
} }
/** /**
...@@ -219,21 +219,6 @@ static inline short int atomic_inc_short(short int *v) ...@@ -219,21 +219,6 @@ static inline short int atomic_inc_short(short int *v)
return *v; return *v;
} }
#ifdef CONFIG_X86_64
/**
* atomic_or_long - OR of two long integers
* @v1: pointer to type unsigned long
* @v2: pointer to type unsigned long
*
* Atomically ORs @v1 and @v2
* Returns the result of the OR
*/
static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
{
asm(LOCK_PREFIX "orq %1, %0" : "+m" (*v1) : "r" (v2));
}
#endif
/* These are x86-specific, used by some header files */ /* These are x86-specific, used by some header files */
#define atomic_clear_mask(mask, addr) \ #define atomic_clear_mask(mask, addr) \
asm volatile(LOCK_PREFIX "andl %0,%1" \ asm volatile(LOCK_PREFIX "andl %0,%1" \
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
*/ */
static inline long atomic64_read(const atomic64_t *v) static inline long atomic64_read(const atomic64_t *v)
{ {
return (*(volatile long *)&(v)->counter); return ACCESS_ONCE((v)->counter);
} }
/** /**
......
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
* *
* Atomically reads the value of @v. * Atomically reads the value of @v.
*/ */
#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_read(v) ACCESS_ONCE((v)->counter)
/** /**
* atomic_set - set atomic variable * atomic_set - set atomic variable
...@@ -58,165 +58,96 @@ ...@@ -58,165 +58,96 @@
*/ */
#define atomic_set(v,i) ((v)->counter = (i)) #define atomic_set(v,i) ((v)->counter = (i))
/**
* atomic_add - add integer to atomic variable
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v.
*/
static inline void atomic_add(int i, atomic_t * v)
{
#if XCHAL_HAVE_S32C1I #if XCHAL_HAVE_S32C1I
unsigned long tmp; #define ATOMIC_OP(op) \
int result; static inline void atomic_##op(int i, atomic_t * v) \
{ \
__asm__ __volatile__( unsigned long tmp; \
"1: l32i %1, %3, 0\n" int result; \
" wsr %1, scompare1\n" \
" add %0, %1, %2\n" __asm__ __volatile__( \
" s32c1i %0, %3, 0\n" "1: l32i %1, %3, 0\n" \
" bne %0, %1, 1b\n" " wsr %1, scompare1\n" \
: "=&a" (result), "=&a" (tmp) " " #op " %0, %1, %2\n" \
: "a" (i), "a" (v) " s32c1i %0, %3, 0\n" \
: "memory" " bne %0, %1, 1b\n" \
); : "=&a" (result), "=&a" (tmp) \
#else : "a" (i), "a" (v) \
unsigned int vval; : "memory" \
); \
__asm__ __volatile__( } \
" rsil a15, "__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n" #define ATOMIC_OP_RETURN(op) \
" add %0, %0, %1\n" static inline int atomic_##op##_return(int i, atomic_t * v) \
" s32i %0, %2, 0\n" { \
" wsr a15, ps\n" unsigned long tmp; \
" rsync\n" int result; \
: "=&a" (vval) \
: "a" (i), "a" (v) __asm__ __volatile__( \
: "a15", "memory" "1: l32i %1, %3, 0\n" \
); " wsr %1, scompare1\n" \
#endif " " #op " %0, %1, %2\n" \
} " s32c1i %0, %3, 0\n" \
" bne %0, %1, 1b\n" \
/** " " #op " %0, %0, %2\n" \
* atomic_sub - subtract the atomic variable : "=&a" (result), "=&a" (tmp) \
* @i: integer value to subtract : "a" (i), "a" (v) \
* @v: pointer of type atomic_t : "memory" \
* ); \
* Atomically subtracts @i from @v. \
*/ return result; \
static inline void atomic_sub(int i, atomic_t *v)
{
#if XCHAL_HAVE_S32C1I
unsigned long tmp;
int result;
__asm__ __volatile__(
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" sub %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
: "=&a" (result), "=&a" (tmp)
: "a" (i), "a" (v)
: "memory"
);
#else
unsigned int vval;
__asm__ __volatile__(
" rsil a15, "__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" sub %0, %0, %1\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval)
: "a" (i), "a" (v)
: "a15", "memory"
);
#endif
} }
/* #else /* XCHAL_HAVE_S32C1I */
* We use atomic_{add|sub}_return to define other functions.
*/ #define ATOMIC_OP(op) \
static inline void atomic_##op(int i, atomic_t * v) \
static inline int atomic_add_return(int i, atomic_t * v) { \
{ unsigned int vval; \
#if XCHAL_HAVE_S32C1I \
unsigned long tmp; __asm__ __volatile__( \
int result; " rsil a15, "__stringify(LOCKLEVEL)"\n"\
" l32i %0, %2, 0\n" \
__asm__ __volatile__( " " #op " %0, %0, %1\n" \
"1: l32i %1, %3, 0\n" " s32i %0, %2, 0\n" \
" wsr %1, scompare1\n" " wsr a15, ps\n" \
" add %0, %1, %2\n" " rsync\n" \
" s32c1i %0, %3, 0\n" : "=&a" (vval) \
" bne %0, %1, 1b\n" : "a" (i), "a" (v) \
" add %0, %0, %2\n" : "a15", "memory" \
: "=&a" (result), "=&a" (tmp) ); \
: "a" (i), "a" (v) } \
: "memory"
); #define ATOMIC_OP_RETURN(op) \
static inline int atomic_##op##_return(int i, atomic_t * v) \
return result; { \
#else unsigned int vval; \
unsigned int vval; \
__asm__ __volatile__( \
__asm__ __volatile__( " rsil a15,"__stringify(LOCKLEVEL)"\n" \
" rsil a15,"__stringify(LOCKLEVEL)"\n" " l32i %0, %2, 0\n" \
" l32i %0, %2, 0\n" " " #op " %0, %0, %1\n" \
" add %0, %0, %1\n" " s32i %0, %2, 0\n" \
" s32i %0, %2, 0\n" " wsr a15, ps\n" \
" wsr a15, ps\n" " rsync\n" \
" rsync\n" : "=&a" (vval) \
: "=&a" (vval) : "a" (i), "a" (v) \
: "a" (i), "a" (v) : "a15", "memory" \
: "a15", "memory" ); \
); \
return vval; \
return vval;
#endif
} }
static inline int atomic_sub_return(int i, atomic_t * v) #endif /* XCHAL_HAVE_S32C1I */
{
#if XCHAL_HAVE_S32C1I
unsigned long tmp;
int result;
__asm__ __volatile__( #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
"1: l32i %1, %3, 0\n"
" wsr %1, scompare1\n"
" sub %0, %1, %2\n"
" s32c1i %0, %3, 0\n"
" bne %0, %1, 1b\n"
" sub %0, %0, %2\n"
: "=&a" (result), "=&a" (tmp)
: "a" (i), "a" (v)
: "memory"
);
return result; ATOMIC_OPS(add)
#else ATOMIC_OPS(sub)
unsigned int vval;
__asm__ __volatile__(
" rsil a15,"__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" sub %0, %0, %1\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval)
: "a" (i), "a" (v)
: "a15", "memory"
);
return vval; #undef ATOMIC_OPS
#endif #undef ATOMIC_OP_RETURN
} #undef ATOMIC_OP
/** /**
* atomic_sub_and_test - subtract value from variable and test result * atomic_sub_and_test - subtract value from variable and test result
......
...@@ -18,14 +18,100 @@ ...@@ -18,14 +18,100 @@
#include <asm/cmpxchg.h> #include <asm/cmpxchg.h>
#include <asm/barrier.h> #include <asm/barrier.h>
/*
* atomic_$op() - $op integer to atomic variable
* @i: integer value to $op
* @v: pointer to the atomic variable
*
* Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use
* smp_mb__{before,after}_atomic().
*/
/*
* atomic_$op_return() - $op interer to atomic variable and returns the result
* @i: integer value to $op
* @v: pointer to the atomic variable
*
* Atomically $ops @i to @v. Does imply a full memory barrier.
*/
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* Force people to define core atomics */
# if !defined(atomic_add_return) || !defined(atomic_sub_return) || \ /* we can build all atomic primitives from cmpxchg */
!defined(atomic_clear_mask) || !defined(atomic_set_mask)
# error "SMP requires a little arch-specific magic" #define ATOMIC_OP(op, c_op) \
# endif static inline void atomic_##op(int i, atomic_t *v) \
{ \
int c, old; \
\
c = v->counter; \
while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
c = old; \
}
#define ATOMIC_OP_RETURN(op, c_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
int c, old; \
\
c = v->counter; \
while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
c = old; \
\
return c c_op i; \
}
#else
#include <linux/irqflags.h>
#define ATOMIC_OP(op, c_op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
raw_local_irq_save(flags); \
v->counter = v->counter c_op i; \
raw_local_irq_restore(flags); \
}
#define ATOMIC_OP_RETURN(op, c_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \
int ret; \
\
raw_local_irq_save(flags); \
ret = (v->counter = v->counter c_op i); \
raw_local_irq_restore(flags); \
\
return ret; \
}
#endif /* CONFIG_SMP */
#ifndef atomic_add_return
ATOMIC_OP_RETURN(add, +)
#endif
#ifndef atomic_sub_return
ATOMIC_OP_RETURN(sub, -)
#endif
#ifndef atomic_clear_mask
ATOMIC_OP(and, &)
#define atomic_clear_mask(i, v) atomic_and(~(i), (v))
#endif #endif
#ifndef atomic_set_mask
#define CONFIG_ARCH_HAS_ATOMIC_OR
ATOMIC_OP(or, |)
#define atomic_set_mask(i, v) atomic_or((i), (v))
#endif
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
/* /*
* Atomic operations that C can't guarantee us. Useful for * Atomic operations that C can't guarantee us. Useful for
* resource counting etc.. * resource counting etc..
...@@ -33,8 +119,6 @@ ...@@ -33,8 +119,6 @@
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
#ifdef __KERNEL__
/** /**
* atomic_read - read atomic variable * atomic_read - read atomic variable
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
...@@ -42,7 +126,7 @@ ...@@ -42,7 +126,7 @@
* Atomically reads the value of @v. * Atomically reads the value of @v.
*/ */
#ifndef atomic_read #ifndef atomic_read
#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_read(v) ACCESS_ONCE((v)->counter)
#endif #endif
/** /**
...@@ -56,52 +140,6 @@ ...@@ -56,52 +140,6 @@
#include <linux/irqflags.h> #include <linux/irqflags.h>
/**
* atomic_add_return - add integer to atomic variable
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v and returns the result
*/
#ifndef atomic_add_return
static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long flags;
int temp;
raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */
temp = v->counter;
temp += i;
v->counter = temp;
raw_local_irq_restore(flags);
return temp;
}
#endif
/**
* atomic_sub_return - subtract integer from atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v and returns the result
*/
#ifndef atomic_sub_return
static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long flags;
int temp;
raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */
temp = v->counter;
temp -= i;
v->counter = temp;
raw_local_irq_restore(flags);
return temp;
}
#endif
static inline int atomic_add_negative(int i, atomic_t *v) static inline int atomic_add_negative(int i, atomic_t *v)
{ {
return atomic_add_return(i, v) < 0; return atomic_add_return(i, v) < 0;
...@@ -139,49 +177,11 @@ static inline void atomic_dec(atomic_t *v) ...@@ -139,49 +177,11 @@ static inline void atomic_dec(atomic_t *v)
static inline int __atomic_add_unless(atomic_t *v, int a, int u) static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{ {
int c, old; int c, old;
c = atomic_read(v); c = atomic_read(v);
while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
c = old; c = old;
return c; return c;
}
/**
* atomic_clear_mask - Atomically clear bits in atomic variable
* @mask: Mask of the bits to be cleared
* @v: pointer of type atomic_t
*
* Atomically clears the bits set in @mask from @v
*/
#ifndef atomic_clear_mask
static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
{
unsigned long flags;
mask = ~mask;
raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */
v->counter &= mask;
raw_local_irq_restore(flags);
} }
#endif
/**
* atomic_set_mask - Atomically set bits in atomic variable
* @mask: Mask of the bits to be set
* @v: pointer of type atomic_t
*
* Atomically sets the bits set in @mask in @v
*/
#ifndef atomic_set_mask
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
unsigned long flags;
raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */
v->counter |= mask;
raw_local_irq_restore(flags);
}
#endif
#endif /* __KERNEL__ */
#endif /* __ASM_GENERIC_ATOMIC_H */ #endif /* __ASM_GENERIC_ATOMIC_H */
...@@ -20,10 +20,22 @@ typedef struct { ...@@ -20,10 +20,22 @@ typedef struct {
extern long long atomic64_read(const atomic64_t *v); extern long long atomic64_read(const atomic64_t *v);
extern void atomic64_set(atomic64_t *v, long long i); extern void atomic64_set(atomic64_t *v, long long i);
extern void atomic64_add(long long a, atomic64_t *v);
extern long long atomic64_add_return(long long a, atomic64_t *v); #define ATOMIC64_OP(op) \
extern void atomic64_sub(long long a, atomic64_t *v); extern void atomic64_##op(long long a, atomic64_t *v);
extern long long atomic64_sub_return(long long a, atomic64_t *v);
#define ATOMIC64_OP_RETURN(op) \
extern long long atomic64_##op##_return(long long a, atomic64_t *v);
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
ATOMIC64_OPS(add)
ATOMIC64_OPS(sub)
#undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
extern long long atomic64_dec_if_positive(atomic64_t *v); extern long long atomic64_dec_if_positive(atomic64_t *v);
extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n); extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
extern long long atomic64_xchg(atomic64_t *v, long long new); extern long long atomic64_xchg(atomic64_t *v, long long new);
......
...@@ -70,53 +70,42 @@ void atomic64_set(atomic64_t *v, long long i) ...@@ -70,53 +70,42 @@ void atomic64_set(atomic64_t *v, long long i)
} }
EXPORT_SYMBOL(atomic64_set); EXPORT_SYMBOL(atomic64_set);
void atomic64_add(long long a, atomic64_t *v) #define ATOMIC64_OP(op, c_op) \
{ void atomic64_##op(long long a, atomic64_t *v) \
unsigned long flags; { \
raw_spinlock_t *lock = lock_addr(v); unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
raw_spin_lock_irqsave(lock, flags); \
v->counter += a; raw_spin_lock_irqsave(lock, flags); \
raw_spin_unlock_irqrestore(lock, flags); v->counter c_op a; \
} raw_spin_unlock_irqrestore(lock, flags); \
EXPORT_SYMBOL(atomic64_add); } \
EXPORT_SYMBOL(atomic64_##op);
long long atomic64_add_return(long long a, atomic64_t *v)
{ #define ATOMIC64_OP_RETURN(op, c_op) \
unsigned long flags; long long atomic64_##op##_return(long long a, atomic64_t *v) \
raw_spinlock_t *lock = lock_addr(v); { \
long long val; unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
raw_spin_lock_irqsave(lock, flags); long long val; \
val = v->counter += a; \
raw_spin_unlock_irqrestore(lock, flags); raw_spin_lock_irqsave(lock, flags); \
return val; val = (v->counter c_op a); \
} raw_spin_unlock_irqrestore(lock, flags); \
EXPORT_SYMBOL(atomic64_add_return); return val; \
} \
void atomic64_sub(long long a, atomic64_t *v) EXPORT_SYMBOL(atomic64_##op##_return);
{
unsigned long flags; #define ATOMIC64_OPS(op, c_op) \
raw_spinlock_t *lock = lock_addr(v); ATOMIC64_OP(op, c_op) \
ATOMIC64_OP_RETURN(op, c_op)
raw_spin_lock_irqsave(lock, flags);
v->counter -= a; ATOMIC64_OPS(add, +=)
raw_spin_unlock_irqrestore(lock, flags); ATOMIC64_OPS(sub, -=)
}
EXPORT_SYMBOL(atomic64_sub); #undef ATOMIC64_OPS
#undef ATOMIC64_OP_RETURN
long long atomic64_sub_return(long long a, atomic64_t *v) #undef ATOMIC64_OP
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
long long val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter -= a;
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(atomic64_sub_return);
long long atomic64_dec_if_positive(atomic64_t *v) long long atomic64_dec_if_positive(atomic64_t *v)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment