Commit 84b23f9b authored by Davidlohr Bueso's avatar Davidlohr Bueso Committed by Ingo Molnar

locking/rwsem: Return void in __rwsem_mark_wake()

We currently return a rw_semaphore structure, which is the
same lock we passed to the function's argument in the first
place. While there are several functions that choose this
return value, the callers use it, for example, for things
like ERR_PTR. This is not the case for __rwsem_mark_wake(),
and in addition this function is really about the lock
waiters (which we know there are at this point), so its
somewhat odd to be returning the sem structure.
Signed-off-by: default avatarDavidlohr Bueso <dbueso@suse.de>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Waiman.Long@hp.com
Cc: dave@stgolabs.net
Cc: jason.low2@hpe.com
Cc: wanpeng.li@hotmail.com
Link: http://lkml.kernel.org/r/1470384285-32163-2-git-send-email-dave@stgolabs.netSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 3942a9bd
...@@ -121,16 +121,17 @@ enum rwsem_wake_type { ...@@ -121,16 +121,17 @@ enum rwsem_wake_type {
* - woken process blocks are discarded from the list after having task zeroed * - woken process blocks are discarded from the list after having task zeroed
* - writers are only marked woken if downgrading is false * - writers are only marked woken if downgrading is false
*/ */
static struct rw_semaphore * static void __rwsem_mark_wake(struct rw_semaphore *sem,
__rwsem_mark_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type,
enum rwsem_wake_type wake_type, struct wake_q_head *wake_q) struct wake_q_head *wake_q)
{ {
struct rwsem_waiter *waiter; struct rwsem_waiter *waiter;
struct task_struct *tsk; struct task_struct *tsk;
struct list_head *next; struct list_head *next;
long oldcount, woken, loop, adjustment; long loop, oldcount, woken = 0, adjustment = 0;
waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
if (waiter->type == RWSEM_WAITING_FOR_WRITE) { if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
if (wake_type == RWSEM_WAKE_ANY) { if (wake_type == RWSEM_WAKE_ANY) {
/* /*
...@@ -142,19 +143,19 @@ __rwsem_mark_wake(struct rw_semaphore *sem, ...@@ -142,19 +143,19 @@ __rwsem_mark_wake(struct rw_semaphore *sem,
*/ */
wake_q_add(wake_q, waiter->task); wake_q_add(wake_q, waiter->task);
} }
goto out;
return;
} }
/* Writers might steal the lock before we grant it to the next reader. /*
* Writers might steal the lock before we grant it to the next reader.
* We prefer to do the first reader grant before counting readers * We prefer to do the first reader grant before counting readers
* so we can bail out early if a writer stole the lock. * so we can bail out early if a writer stole the lock.
*/ */
adjustment = 0;
if (wake_type != RWSEM_WAKE_READ_OWNED) { if (wake_type != RWSEM_WAKE_READ_OWNED) {
adjustment = RWSEM_ACTIVE_READ_BIAS; adjustment = RWSEM_ACTIVE_READ_BIAS;
try_reader_grant: try_reader_grant:
oldcount = atomic_long_fetch_add(adjustment, &sem->count); oldcount = atomic_long_fetch_add(adjustment, &sem->count);
if (unlikely(oldcount < RWSEM_WAITING_BIAS)) { if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
/* /*
* If the count is still less than RWSEM_WAITING_BIAS * If the count is still less than RWSEM_WAITING_BIAS
...@@ -164,7 +165,8 @@ __rwsem_mark_wake(struct rw_semaphore *sem, ...@@ -164,7 +165,8 @@ __rwsem_mark_wake(struct rw_semaphore *sem,
*/ */
if (atomic_long_add_return(-adjustment, &sem->count) < if (atomic_long_add_return(-adjustment, &sem->count) <
RWSEM_WAITING_BIAS) RWSEM_WAITING_BIAS)
goto out; return;
/* Last active locker left. Retry waking readers. */ /* Last active locker left. Retry waking readers. */
goto try_reader_grant; goto try_reader_grant;
} }
...@@ -176,11 +178,11 @@ __rwsem_mark_wake(struct rw_semaphore *sem, ...@@ -176,11 +178,11 @@ __rwsem_mark_wake(struct rw_semaphore *sem,
rwsem_set_reader_owned(sem); rwsem_set_reader_owned(sem);
} }
/* Grant an infinite number of read locks to the readers at the front /*
* Grant an infinite number of read locks to the readers at the front
* of the queue. Note we increment the 'active part' of the count by * of the queue. Note we increment the 'active part' of the count by
* the number of readers before waking any processes up. * the number of readers before waking any processes up.
*/ */
woken = 0;
do { do {
woken++; woken++;
...@@ -219,9 +221,6 @@ __rwsem_mark_wake(struct rw_semaphore *sem, ...@@ -219,9 +221,6 @@ __rwsem_mark_wake(struct rw_semaphore *sem,
sem->wait_list.next = next; sem->wait_list.next = next;
next->prev = &sem->wait_list; next->prev = &sem->wait_list;
out:
return sem;
} }
/* /*
...@@ -255,7 +254,7 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem) ...@@ -255,7 +254,7 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
if (count == RWSEM_WAITING_BIAS || if (count == RWSEM_WAITING_BIAS ||
(count > RWSEM_WAITING_BIAS && (count > RWSEM_WAITING_BIAS &&
adjustment != -RWSEM_ACTIVE_READ_BIAS)) adjustment != -RWSEM_ACTIVE_READ_BIAS))
sem = __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
raw_spin_unlock_irq(&sem->wait_lock); raw_spin_unlock_irq(&sem->wait_lock);
wake_up_q(&wake_q); wake_up_q(&wake_q);
...@@ -505,7 +504,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state) ...@@ -505,7 +504,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
if (count > RWSEM_WAITING_BIAS) { if (count > RWSEM_WAITING_BIAS) {
WAKE_Q(wake_q); WAKE_Q(wake_q);
sem = __rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q); __rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q);
/* /*
* The wakeup is normally called _after_ the wait_lock * The wakeup is normally called _after_ the wait_lock
* is released, but given that we are proactively waking * is released, but given that we are proactively waking
...@@ -616,7 +615,7 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) ...@@ -616,7 +615,7 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
/* do nothing if list empty */ /* do nothing if list empty */
if (!list_empty(&sem->wait_list)) if (!list_empty(&sem->wait_list))
sem = __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q); __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
wake_up_q(&wake_q); wake_up_q(&wake_q);
...@@ -640,7 +639,7 @@ struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) ...@@ -640,7 +639,7 @@ struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
/* do nothing if list empty */ /* do nothing if list empty */
if (!list_empty(&sem->wait_list)) if (!list_empty(&sem->wait_list))
sem = __rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q); __rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
wake_up_q(&wake_q); wake_up_q(&wake_q);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment