Commit 54064592 authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Linus Torvalds

[PATCH] parisc: spinlock fixes

 - Fix SMP f_list corruption problem.

   This was rather subtle.  It turns out that gcc was doing a small
   amount of reordering around the file_lock because it doesn't see our
   spinlock implementation as being a barrier.  To fix this I

   - Added the appropriate barriers to all the spinlocks.
   - Removed the atomic opencoded spinlock and redid it as a proper one.

   SMP now seems stable on a 2xA500 and has survived a 10 hour 35 loop
   make -j 4 kernel compile without showing any problems (previously, it
   usually fell over in the first loop).

 - Since we have one or two users who don't want SMP, make the
   code base compile for UP again ...

 - missing parenthesis for pointer dereferencing
Committed-by: default avatarJames Bottomley <jejb@parisc-linux.org>
Committed-by: default avatarThibaut Varene <varenet@parisc-linux.org>
parent 261f72ea
......@@ -13,8 +13,8 @@
#include <asm/atomic.h>
#ifdef CONFIG_SMP
atomic_lock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
[0 ... (ATOMIC_HASH_SIZE-1)] = (atomic_lock_t) { { 1, 1, 1, 1 } }
spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
[0 ... (ATOMIC_HASH_SIZE-1)] = SPIN_LOCK_UNLOCKED
};
#endif
......@@ -23,10 +23,10 @@ unsigned long __xchg64(unsigned long x, unsigned long *ptr)
{
unsigned long temp, flags;
atomic_spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
_atomic_spin_lock_irqsave(ptr, flags);
temp = *ptr;
*ptr = x;
atomic_spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
_atomic_spin_unlock_irqrestore(ptr, flags);
return temp;
}
#endif
......@@ -36,10 +36,10 @@ unsigned long __xchg32(int x, int *ptr)
unsigned long flags;
long temp;
atomic_spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
_atomic_spin_lock_irqsave(ptr, flags);
temp = (long) *ptr; /* XXX - sign extension wanted? */
*ptr = x;
atomic_spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
_atomic_spin_unlock_irqrestore(ptr, flags);
return (unsigned long)temp;
}
......@@ -49,10 +49,10 @@ unsigned long __xchg8(char x, char *ptr)
unsigned long flags;
long temp;
atomic_spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
_atomic_spin_lock_irqsave(ptr, flags);
temp = (long) *ptr; /* XXX - sign extension wanted? */
*ptr = x;
atomic_spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
_atomic_spin_unlock_irqrestore(ptr, flags);
return (unsigned long)temp;
}
......@@ -63,10 +63,10 @@ unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsi
unsigned long flags;
unsigned long prev;
atomic_spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
_atomic_spin_lock_irqsave(ptr, flags);
if ((prev = *ptr) == old)
*ptr = new;
atomic_spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
_atomic_spin_unlock_irqrestore(ptr, flags);
return prev;
}
#endif
......@@ -76,9 +76,9 @@ unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsign
unsigned long flags;
unsigned int prev;
atomic_spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
_atomic_spin_lock_irqsave(ptr, flags);
if ((prev = *ptr) == old)
*ptr = new;
atomic_spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
_atomic_spin_unlock_irqrestore(ptr, flags);
return (unsigned long)prev;
}
......@@ -14,10 +14,9 @@
*/
#ifdef CONFIG_SMP
#include <asm/spinlock.h>
#include <asm/cache.h> /* we use L1_CACHE_BYTES */
typedef spinlock_t atomic_lock_t;
/* Use an array of spinlocks for our atomic_ts.
* Hash function to index into a different SPINLOCK.
* Since "a" is usually an address, use one spinlock per cacheline.
......@@ -25,37 +24,28 @@ typedef spinlock_t atomic_lock_t;
# define ATOMIC_HASH_SIZE 4
# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
extern atomic_lock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
static inline void atomic_spin_lock(atomic_lock_t *a)
{
while (__ldcw(a) == 0)
while (a->lock[0] == 0);
}
/* Can't use _raw_spin_lock_irq because of #include problems, so
* this is the substitute */
#define _atomic_spin_lock_irqsave(l,f) do { \
spinlock_t *s = ATOMIC_HASH(l); \
local_irq_save(f); \
_raw_spin_lock(s); \
} while(0)
#define _atomic_spin_unlock_irqrestore(l,f) do { \
spinlock_t *s = ATOMIC_HASH(l); \
_raw_spin_unlock(s); \
local_irq_restore(f); \
} while(0)
static inline void atomic_spin_unlock(atomic_lock_t *a)
{
a->lock[0] = 1;
}
#else
# define ATOMIC_HASH_SIZE 1
# define ATOMIC_HASH(a) (0)
# define atomic_spin_lock(x) (void)(x)
# define atomic_spin_unlock(x) do { } while(0)
# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
#endif
/* copied from <linux/spinlock.h> and modified */
#define atomic_spin_lock_irqsave(lock, flags) do { \
local_irq_save(flags); \
atomic_spin_lock(lock); \
} while (0)
#define atomic_spin_unlock_irqrestore(lock, flags) do { \
atomic_spin_unlock(lock); \
local_irq_restore(flags); \
} while (0)
/* Note that we need not lock read accesses - aligned word writes/reads
* are atomic, so a reader never sees unconsistent values.
*
......@@ -150,22 +140,22 @@ static __inline__ int __atomic_add_return(int i, atomic_t *v)
{
int ret;
unsigned long flags;
atomic_spin_lock_irqsave(ATOMIC_HASH(v), flags);
_atomic_spin_lock_irqsave(v, flags);
ret = (v->counter += i);
atomic_spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
_atomic_spin_unlock_irqrestore(v, flags);
return ret;
}
static __inline__ void atomic_set(atomic_t *v, int i)
{
unsigned long flags;
atomic_spin_lock_irqsave(ATOMIC_HASH(v), flags);
_atomic_spin_lock_irqsave(v, flags);
v->counter = i;
atomic_spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
_atomic_spin_unlock_irqrestore(v, flags);
}
static __inline__ int atomic_read(const atomic_t *v)
......
......@@ -38,9 +38,9 @@ static __inline__ void set_bit(int nr, volatile unsigned long * address)
addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr);
atomic_spin_lock_irqsave(ATOMIC_HASH(addr), flags);
_atomic_spin_lock_irqsave(addr, flags);
*addr |= mask;
atomic_spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
_atomic_spin_unlock_irqrestore(addr, flags);
}
static __inline__ void __set_bit(int nr, volatile unsigned long * address)
......@@ -61,9 +61,9 @@ static __inline__ void clear_bit(int nr, volatile unsigned long * address)
addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr);
atomic_spin_lock_irqsave(ATOMIC_HASH(addr), flags);
_atomic_spin_lock_irqsave(addr, flags);
*addr &= ~mask;
atomic_spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
_atomic_spin_unlock_irqrestore(addr, flags);
}
static __inline__ void __clear_bit(unsigned long nr, volatile unsigned long * address)
......@@ -84,9 +84,9 @@ static __inline__ void change_bit(int nr, volatile unsigned long * address)
addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr);
atomic_spin_lock_irqsave(ATOMIC_HASH(addr), flags);
_atomic_spin_lock_irqsave(addr, flags);
*addr ^= mask;
atomic_spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
_atomic_spin_unlock_irqrestore(addr, flags);
}
static __inline__ void __change_bit(int nr, volatile unsigned long * address)
......@@ -108,10 +108,10 @@ static __inline__ int test_and_set_bit(int nr, volatile unsigned long * address)
addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr);
atomic_spin_lock_irqsave(ATOMIC_HASH(addr), flags);
_atomic_spin_lock_irqsave(addr, flags);
oldbit = (*addr & mask) ? 1 : 0;
*addr |= mask;
atomic_spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
_atomic_spin_unlock_irqrestore(addr, flags);
return oldbit;
}
......@@ -139,10 +139,10 @@ static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addres
addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr);
atomic_spin_lock_irqsave(ATOMIC_HASH(addr), flags);
_atomic_spin_lock_irqsave(addr, flags);
oldbit = (*addr & mask) ? 1 : 0;
*addr &= ~mask;
atomic_spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
_atomic_spin_unlock_irqrestore(addr, flags);
return oldbit;
}
......@@ -170,10 +170,10 @@ static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addre
addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr);
atomic_spin_lock_irqsave(ATOMIC_HASH(addr), flags);
_atomic_spin_lock_irqsave(addr, flags);
oldbit = (*addr & mask) ? 1 : 0;
*addr ^= mask;
atomic_spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
_atomic_spin_unlock_irqrestore(addr, flags);
return oldbit;
}
......
......@@ -27,21 +27,35 @@ static inline int spin_is_locked(spinlock_t *x)
static inline void _raw_spin_lock(spinlock_t *x)
{
volatile unsigned int *a = __ldcw_align(x);
volatile unsigned int *a;
mb();
a = __ldcw_align(x);
while (__ldcw(a) == 0)
while (*a == 0);
mb();
}
static inline void _raw_spin_unlock(spinlock_t *x)
{
volatile unsigned int *a = __ldcw_align(x);
volatile unsigned int *a;
mb();
a = __ldcw_align(x);
*a = 1;
mb();
}
static inline int _raw_spin_trylock(spinlock_t *x)
{
volatile unsigned int *a = __ldcw_align(x);
return __ldcw(a) != 0;
volatile unsigned int *a;
int ret;
mb();
a = __ldcw_align(x);
ret = __ldcw(a) != 0;
mb();
return ret;
}
#define spin_lock_own(LOCK, LOCATION) ((void)0)
......
......@@ -80,7 +80,7 @@ extern struct task_struct *_switch_to(struct task_struct *, struct task_struct *
#define mtctl(gr, cr) \
__asm__ __volatile__("mtctl %0,%1" \
: /* no outputs */ \
: "r" (gr), "i" (cr))
: "r" (gr), "i" (cr) : "memory")
/* these are here to de-mystefy the calling code, and to provide hooks */
/* which I needed for debugging EIEM problems -PB */
......@@ -102,7 +102,7 @@ static inline void set_eiem(unsigned long val)
#define mtsp(gr, cr) \
__asm__ __volatile__("mtsp %0,%1" \
: /* no outputs */ \
: "r" (gr), "i" (cr))
: "r" (gr), "i" (cr) : "memory")
/*
......@@ -154,7 +154,7 @@ static inline void set_eiem(unsigned long val)
for the semaphore. */
#define __PA_LDCW_ALIGNMENT 16
#define __ldcw_align(a) ({ \
unsigned long __ret = (unsigned long) a; \
unsigned long __ret = (unsigned long) &(a)->lock[0]; \
__ret = (__ret + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1); \
(volatile unsigned int *) __ret; \
})
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment