Commit e5122ad4 authored by Anton Blanchard's avatar Anton Blanchard

ppc64: semaphore fixes based on report by ever watchful Olaf Hering

parent 64553593
......@@ -75,9 +75,8 @@ void __down(struct semaphore *sem)
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
tsk->state = TASK_UNINTERRUPTIBLE;
__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
add_wait_queue_exclusive(&sem->wait, &wait);
smp_wmb();
/*
* Try to get the semaphore. If the count is > 0, then we've
......@@ -87,10 +86,10 @@ void __down(struct semaphore *sem)
*/
while (__sem_update_count(sem, -1) <= 0) {
schedule();
tsk->state = TASK_UNINTERRUPTIBLE;
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
}
remove_wait_queue(&sem->wait, &wait);
tsk->state = TASK_RUNNING;
__set_task_state(tsk, TASK_RUNNING);
/*
* If there are any more sleepers, wake one of them up so
......@@ -106,9 +105,8 @@ int __down_interruptible(struct semaphore * sem)
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
tsk->state = TASK_INTERRUPTIBLE;
__set_task_state(tsk, TASK_INTERRUPTIBLE);
add_wait_queue_exclusive(&sem->wait, &wait);
smp_wmb();
while (__sem_update_count(sem, -1) <= 0) {
if (signal_pending(current)) {
......@@ -122,10 +120,11 @@ int __down_interruptible(struct semaphore * sem)
break;
}
schedule();
tsk->state = TASK_INTERRUPTIBLE;
set_task_state(tsk, TASK_INTERRUPTIBLE);
}
tsk->state = TASK_RUNNING;
remove_wait_queue(&sem->wait, &wait);
__set_task_state(tsk, TASK_RUNNING);
wake_up(&sem->wait);
return retval;
}
/*
* include/asm-ppc/rwsem.h: R/W semaphores for PPC using the stuff
* include/asm-ppc64/rwsem.h: R/W semaphores for PPC using the stuff
* in lib/rwsem.c. Adapted largely from include/asm-i386/rwsem.h
* by Paul Mackerras <paulus@samba.org>.
*
......@@ -74,9 +74,7 @@ static inline void init_rwsem(struct rw_semaphore *sem)
*/
static inline void __down_read(struct rw_semaphore *sem)
{
if (atomic_inc_return((atomic_t *)(&sem->count)) > 0)
smp_wmb();
else
if (unlikely(atomic_inc_return((atomic_t *)(&sem->count)) <= 0))
rwsem_down_read_failed(sem);
}
......@@ -87,7 +85,6 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
while ((tmp = sem->count) >= 0) {
if (tmp == cmpxchg(&sem->count, tmp,
tmp + RWSEM_ACTIVE_READ_BIAS)) {
smp_wmb();
return 1;
}
}
......@@ -103,9 +100,7 @@ static inline void __down_write(struct rw_semaphore *sem)
tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_t *)(&sem->count));
if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
smp_wmb();
else
if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
rwsem_down_write_failed(sem);
}
......@@ -115,7 +110,6 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS);
smp_wmb();
return tmp == RWSEM_UNLOCKED_VALUE;
}
......@@ -126,9 +120,8 @@ static inline void __up_read(struct rw_semaphore *sem)
{
int tmp;
smp_wmb();
tmp = atomic_dec_return((atomic_t *)(&sem->count));
if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
rwsem_wake(sem);
}
......@@ -137,9 +130,8 @@ static inline void __up_read(struct rw_semaphore *sem)
*/
static inline void __up_write(struct rw_semaphore *sem)
{
smp_wmb();
if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_t *)(&sem->count)) < 0)
if (unlikely(atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_t *)(&sem->count)) < 0))
rwsem_wake(sem);
}
......@@ -158,7 +150,6 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
{
int tmp;
smp_wmb();
tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
if (tmp < 0)
rwsem_downgrade_wake(sem);
......@@ -169,7 +160,6 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
*/
static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
{
smp_mb();
return atomic_add_return(delta, (atomic_t *)(&sem->count));
}
......
......@@ -82,9 +82,8 @@ static inline void down(struct semaphore * sem)
/*
* Try to get the semaphore, take the slow path if we fail.
*/
if (atomic_dec_return(&sem->count) < 0)
if (unlikely(atomic_dec_return(&sem->count) < 0))
__down(sem);
smp_wmb();
}
static inline int down_interruptible(struct semaphore * sem)
......@@ -96,23 +95,18 @@ static inline int down_interruptible(struct semaphore * sem)
#endif
might_sleep();
if (atomic_dec_return(&sem->count) < 0)
if (unlikely(atomic_dec_return(&sem->count) < 0))
ret = __down_interruptible(sem);
smp_wmb();
return ret;
}
static inline int down_trylock(struct semaphore * sem)
{
int ret;
#ifdef WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
ret = atomic_dec_if_positive(&sem->count) < 0;
smp_wmb();
return ret;
return atomic_dec_if_positive(&sem->count) < 0;
}
static inline void up(struct semaphore * sem)
......@@ -121,8 +115,7 @@ static inline void up(struct semaphore * sem)
CHECK_MAGIC(sem->__magic);
#endif
smp_wmb();
if (atomic_inc_return(&sem->count) <= 0)
if (unlikely(atomic_inc_return(&sem->count) <= 0))
__up(sem);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment