Commit 6e071852 authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Matthew Wilcox

[PARISC] Improve rwlock implementation

Rewrite rwlock implementation to avoid various deadlocks in the current
scheme.
Signed-off-by: default avatarMatthew Wilcox <matthew@wil.cx>
Signed-off-by: default avatarKyle McMartin <kyle@parisc-linux.org>
parent 9c2c5457
...@@ -56,50 +56,70 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x) ...@@ -56,50 +56,70 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x)
} }
/* /*
* Read-write spinlocks, allowing multiple readers * Read-write spinlocks, allowing multiple readers but only one writer.
* but only one writer. * The spinlock is held by the writer, preventing any readers or other
* writers from grabbing the rwlock. Readers use the lock to serialise their
* access to the counter (which records how many readers currently hold the
* lock). Linux rwlocks are unfair to writers; they can be starved for
* an indefinite time by readers. They can also be taken in interrupt context,
* so we have to disable interrupts when acquiring the spin lock to be sure
* that an interrupting reader doesn't get an inconsistent view of the lock.
*/ */
#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
/* read_lock, read_unlock are pretty straightforward. Of course it somehow
* sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */
static __inline__ void __raw_read_lock(raw_rwlock_t *rw) static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
{ {
unsigned long flags;
local_irq_save(flags);
__raw_spin_lock(&rw->lock); __raw_spin_lock(&rw->lock);
rw->counter++; rw->counter++;
__raw_spin_unlock(&rw->lock); __raw_spin_unlock(&rw->lock);
local_irq_restore(flags);
} }
static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
{ {
unsigned long flags;
local_irq_save(flags);
__raw_spin_lock(&rw->lock); __raw_spin_lock(&rw->lock);
rw->counter--; rw->counter--;
__raw_spin_unlock(&rw->lock); __raw_spin_unlock(&rw->lock);
local_irq_restore(flags);
} }
/* write_lock is less trivial. We optimistically grab the lock and check static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
* if we surprised any readers. If so we release the lock and wait till {
* they're all gone before trying again unsigned long flags;
* retry:
* Also note that we don't use the _irqsave / _irqrestore suffixes here. local_irq_save(flags);
* If we're called with interrupts enabled and we've got readers (or other if (__raw_spin_trylock(&rw->lock)) {
* writers) in interrupt handlers someone fucked up and we'd dead-lock rw->counter++;
* sooner or later anyway. prumpf */ __raw_spin_unlock(&rw->lock);
local_irq_restore(flags);
return 1;
}
local_irq_restore(flags);
/* If write-locked, we fail to acquire the lock */
if (rw->counter < 0)
return 0;
/* Wait until we have a realistic chance at the lock */
while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0)
cpu_relax();
goto retry;
}
static __inline__ void __raw_write_lock(raw_rwlock_t *rw) static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
{ {
unsigned long flags;
retry: retry:
local_irq_save(flags);
__raw_spin_lock(&rw->lock); __raw_spin_lock(&rw->lock);
if(rw->counter != 0) { if (rw->counter != 0) {
/* this basically never happens */
__raw_spin_unlock(&rw->lock); __raw_spin_unlock(&rw->lock);
local_irq_restore(flags);
while (rw->counter != 0) while (rw->counter != 0)
cpu_relax(); cpu_relax();
...@@ -107,31 +127,35 @@ static __inline__ void __raw_write_lock(raw_rwlock_t *rw) ...@@ -107,31 +127,35 @@ static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
goto retry; goto retry;
} }
/* got it. now leave without unlocking */ rw->counter = -1; /* mark as write-locked */
rw->counter = -1; /* remember we are locked */ mb();
local_irq_restore(flags);
} }
/* write_unlock is absolutely trivial - we don't have to wait for anything */ static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
{ {
rw->counter = 0; rw->counter = 0;
__raw_spin_unlock(&rw->lock); __raw_spin_unlock(&rw->lock);
} }
static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
{ {
__raw_spin_lock(&rw->lock); unsigned long flags;
if (rw->counter != 0) { int result = 0;
/* this basically never happens */
__raw_spin_unlock(&rw->lock); local_irq_save(flags);
if (__raw_spin_trylock(&rw->lock)) {
return 0; if (rw->counter == 0) {
rw->counter = -1;
result = 1;
} else {
/* Read-locked. Oh well. */
__raw_spin_unlock(&rw->lock);
}
} }
local_irq_restore(flags);
/* got it. now leave without unlocking */ return result;
rw->counter = -1; /* remember we are locked */
return 1;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment