Commit 8395c735 authored by David S. Miller's avatar David S. Miller

[SPARC64]: Fix spinlock macros.

This undoes bugs introduced by the generic
out-of-line spinlock patches.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ee9e2c8a
...@@ -41,56 +41,69 @@ typedef unsigned char spinlock_t; ...@@ -41,56 +41,69 @@ typedef unsigned char spinlock_t;
do { membar("#LoadLoad"); \ do { membar("#LoadLoad"); \
} while(*((volatile unsigned char *)lock)) } while(*((volatile unsigned char *)lock))
static __inline__ void _raw_spin_lock(spinlock_t *lock) static inline void _raw_spin_lock(spinlock_t *lock)
{ {
unsigned long tmp;
__asm__ __volatile__("1: ldstub [%0], %%g7\n\t"
"brnz,pn %%g7, 2f\n\t" __asm__ __volatile__(
"membar #StoreLoad | #StoreStore\n\t" "1: ldstub [%1], %0\n"
"b 3f\n\t" " brnz,pn %0, 2f\n"
"2: ldub [%0], %%g7\n\t" " membar #StoreLoad | #StoreStore\n"
"brnz,pt %%g7, 2b\n\t" " .subsection 2\n"
"membar #LoadLoad\n\t" "2: ldub [%1], %0\n"
"ba,a,pt %%xcc, 1b\n\t" " brnz,pt %0, 2b\n"
"3:\n\t" " membar #LoadLoad\n"
: : "r" (lock) : "memory"); " ba,a,pt %%xcc, 1b\n"
" .previous"
: "=&r" (tmp)
: "r" (lock)
: "memory");
} }
static __inline__ int _raw_spin_trylock(spinlock_t *lock) static inline int _raw_spin_trylock(spinlock_t *lock)
{ {
unsigned int result; unsigned long result;
__asm__ __volatile__("ldstub [%1], %0\n\t"
"membar #StoreLoad | #StoreStore" __asm__ __volatile__(
" ldstub [%1], %0\n"
" membar #StoreLoad | #StoreStore"
: "=r" (result) : "=r" (result)
: "r" (lock) : "r" (lock)
: "memory"); : "memory");
return (result == 0);
return (result == 0UL);
} }
static __inline__ void _raw_spin_unlock(spinlock_t *lock) static inline void _raw_spin_unlock(spinlock_t *lock)
{ {
__asm__ __volatile__("membar #StoreStore | #LoadStore\n\t" __asm__ __volatile__(
"stb %%g0, [%0]" " membar #StoreStore | #LoadStore\n"
" stb %%g0, [%0]"
: /* No outputs */ : /* No outputs */
: "r" (lock) : "r" (lock)
: "memory"); : "memory");
} }
static __inline__ void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags) static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
{ {
__asm__ __volatile__ ("1:ldstub [%0], %%g7\n\t" unsigned long tmp1, tmp2;
"brnz,pn %%g7, 2f\n\t"
"membar #StoreLoad | #StoreStore\n\t" __asm__ __volatile__(
"b 4f\n\t" "1: ldstub [%2], %0\n"
"2: rdpr %%pil, %%g2 ! Save PIL\n\t" " brnz,pn %0, 2f\n"
"wrpr %1, %%pil ! Set previous PIL\n\t" " membar #StoreLoad | #StoreStore\n"
"3:ldub [%0], %%g7 ! Spin on lock set\n\t" " .subsection 2\n"
"brnz,pt %%g7, 3b\n\t" "2: rdpr %%pil, %1\n"
"membar #LoadLoad\n\t" " wrpr %3, %%pil\n"
"ba,pt %%xcc, 1b ! Retry lock acquire\n\t" "3: ldub [%2], %0\n"
"wrpr %%g2, %%pil ! Restore PIL\n\t" " brnz,pt %0, 3b\n"
"4:\n\t" " membar #LoadLoad\n"
: : "r"(lock), "r"(flags) : "memory"); " ba,pt %%xcc, 1b\n"
" wrpr %1, %%pil\n"
" .previous"
: "=&r" (tmp1), "=&r" (tmp2)
: "r"(lock), "r"(flags)
: "memory");
} }
#else /* !(CONFIG_DEBUG_SPINLOCK) */ #else /* !(CONFIG_DEBUG_SPINLOCK) */
...@@ -131,85 +144,102 @@ typedef unsigned int rwlock_t; ...@@ -131,85 +144,102 @@ typedef unsigned int rwlock_t;
#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
#define rwlock_is_locked(x) (*(x) != RW_LOCK_UNLOCKED) #define rwlock_is_locked(x) (*(x) != RW_LOCK_UNLOCKED)
static void __inline__ __read_lock(rwlock_t *lock) static void inline __read_lock(rwlock_t *lock)
{ {
__asm__ __volatile__ ("b 1f\n\t" unsigned long tmp1, tmp2;
"99:\n\t"
"ldsw [%0], %%g5\n\t" __asm__ __volatile__ (
"brlz,pt %%g5, 99b\n\t" "1: ldsw [%2], %0\n"
"membar #LoadLoad\n\t" " brlz,pn %0, 2f\n"
"ba,a,pt %%xcc, 4f\n\t" "4: add %0, 1, %1\n"
"1: ldsw [%0], %%g5\n\t" " cas [%2], %0, %1\n"
"brlz,pn %%g5, 99b\n\t" " cmp %0, %1\n"
"4:add %%g5, 1, %%g7\n\t" " bne,pn %%icc, 1b\n"
"cas [%0], %%g5, %%g7\n\t" " membar #StoreLoad | #StoreStore\n"
"cmp %%g5, %%g7\n\t" " .subsection 2\n"
"bne,pn %%icc, 1b\n\t" "2: ldsw [%2], %0\n"
"membar #StoreLoad | #StoreStore\n\t" " brlz,pt %0, 2b\n"
: : "r"(lock) : "memory"); " membar #LoadLoad\n"
" ba,a,pt %%xcc, 4b\n"
" .previous"
: "=&r" (tmp1), "=&r" (tmp2)
: "r" (lock)
: "memory");
} }
static void __inline__ __read_unlock(rwlock_t *lock) static void inline __read_unlock(rwlock_t *lock)
{ {
__asm__ __volatile__ ("1: lduw [%0], %%g5\n\t" unsigned long tmp1, tmp2;
"sub %%g5, 1, %%g7\n\t"
"cas [%0], %%g5, %%g7\n\t" __asm__ __volatile__(
"cmp %%g5, %%g7\n\t" "1: lduw [%2], %0\n"
"be,pt %%xcc, 2f\n\t" " sub %0, 1, %1\n"
"membar #StoreLoad | #StoreStore\n\t" " cas [%2], %0, %1\n"
"ba,a,pt %%xcc, 1b\n\t" " cmp %0, %1\n"
"2:\n\t" " bne,pn %%xcc, 1b\n"
: : "r" (lock) : "memory"); " membar #StoreLoad | #StoreStore"
: "=&r" (tmp1), "=&r" (tmp2)
: "r" (lock)
: "memory");
} }
static void __inline__ __write_lock(rwlock_t *lock) static void inline __write_lock(rwlock_t *lock)
{ {
__asm__ __volatile__ ("sethi %%hi(0x80000000), %%g2\n\t" unsigned long mask, tmp1, tmp2;
"b 1f\n\t"
"99:\n\t" mask = 0x80000000UL;
"lduw [%0], %%g5\n\t"
"brnz,pt %%g5, 99b\n\t" __asm__ __volatile__(
"membar #LoadLoad\n\t" "1: lduw [%2], %0\n"
"ba,a,pt %%xcc, 4f\n\t" " brnz,pn %0, 2f\n"
"1: lduw [%0], %%g5\n\t" "4: or %0, %3, %1\n"
"brnz,pn %%g5, 99b\n\t" " cas [%2], %0, %1\n"
"4: or %%g5, %%g2, %%g7\n\t" " cmp %0, %1\n"
"cas [%0], %%g5, %%g7\n\t" " bne,pn %%icc, 1b\n"
"cmp %%g5, %%g7\n\t" " membar #StoreLoad | #StoreStore\n"
"be,pt %%icc, 2f\n\t" " .subsection 2\n"
"membar #StoreLoad | #StoreStore\n\t" "2: lduw [%2], %0\n"
"ba,a,pt %%xcc, 1b\n\t" " brnz,pt %0, 2b\n"
"2:\n\t" " membar #LoadLoad\n"
: : "r"(lock) : "memory"); " ba,a,pt %%xcc, 4b\n"
" .previous"
: "=&r" (tmp1), "=&r" (tmp2)
: "r" (lock), "r" (mask)
: "memory");
} }
static void __inline__ __write_unlock(rwlock_t *lock) static void inline __write_unlock(rwlock_t *lock)
{ {
__asm__ __volatile__ ("membar #LoadStore | #StoreStore\n\t" __asm__ __volatile__(
"retl\n\t" " membar #LoadStore | #StoreStore\n"
"stw %%g0, [%0]\n\t" " stw %%g0, [%0]"
: : "r"(lock) : "memory"); : /* no outputs */
: "r" (lock)
: "memory");
} }
static int __inline__ __write_trylock(rwlock_t *lock) static int inline __write_trylock(rwlock_t *lock)
{ {
__asm__ __volatile__ ("sethi %%hi(0x80000000), %%g2\n\t" unsigned long mask, tmp1, tmp2, result;
"1: lduw [%0], %%g5\n\t"
"brnz,pn %%g5, 100f\n\t" mask = 0x80000000UL;
"4: or %%g5, %%g2, %%g7\n\t"
"cas [%0], %%g5, %%g7\n\t" __asm__ __volatile__(
"cmp %%g5, %%g7\n\t" " mov 0, %2\n"
"be,pt %%icc, 99f\n\t" "1: lduw [%3], %0\n"
"membar #StoreLoad | #StoreStore\n\t" " brnz,pn %0, 2f\n"
"ba,pt %%xcc, 1b\n\t" " or %0, %4, %1\n"
"99:\n\t" " cas [%3], %0, %1\n"
"retl\n\t" " cmp %0, %1\n"
"mov 1, %0\n\t" " bne,pn %%icc, 1b\n"
"100:\n\t" " membar #StoreLoad | #StoreStore\n"
"retl\n\t" " mov 1, %2\n"
"mov 0, %0\n\t" "2:"
: : "r"(lock) : "memory"); : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
return rwlock_is_locked(lock); : "r" (lock), "r" (mask)
: "memory");
return result;
} }
#define _raw_read_lock(p) __read_lock(p) #define _raw_read_lock(p) __read_lock(p)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment