Commit 0aa1125f authored by Kirill Tkhai's avatar Kirill Tkhai Committed by Ingo Molnar

locking/rwsem-spinlock: Add killable versions of __down_read()

Rename __down_read() in __down_read_common() and teach it
to abort waiting in case of pending signals and killable
state argument passed.

Note, that we shouldn't wake anybody up in EINTR path, as:

We check for signal_pending_state() after (!waiter.task)
test and under spinlock. So, current task wasn't able to
be woken up. It may be in two cases: a writer is owner
of the sem, or a writer is a first waiter of the sem.

If a writer is owner of the sem, no one else may work
with it in parallel. It will wake somebody, when it
call up_write() or downgrade_write().

If a writer is the first waiter, it will be woken up,
when the last active reader releases the sem, and
sem->count became 0.

Also note, that set_current_state() may be moved down
to schedule() (after !waiter.task check), as all
assignments in this type of semaphore (including wake_up),
occur under spinlock, so we can't miss anything.
Signed-off-by: default avatarKirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: arnd@arndb.de
Cc: avagin@virtuozzo.com
Cc: davem@davemloft.net
Cc: fenghua.yu@intel.com
Cc: gorcunov@virtuozzo.com
Cc: heiko.carstens@de.ibm.com
Cc: hpa@zytor.com
Cc: ink@jurassic.park.msu.ru
Cc: mattst88@gmail.com
Cc: rth@twiddle.net
Cc: schwidefsky@de.ibm.com
Cc: tony.luck@intel.com
Link: http://lkml.kernel.org/r/149789533283.9059.9829416940494747182.stgit@localhost.localdomainSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 50972fe7
...@@ -32,6 +32,7 @@ struct rw_semaphore { ...@@ -32,6 +32,7 @@ struct rw_semaphore {
#define RWSEM_UNLOCKED_VALUE 0x00000000 #define RWSEM_UNLOCKED_VALUE 0x00000000
extern void __down_read(struct rw_semaphore *sem); extern void __down_read(struct rw_semaphore *sem);
extern int __must_check __down_read_killable(struct rw_semaphore *sem);
extern int __down_read_trylock(struct rw_semaphore *sem); extern int __down_read_trylock(struct rw_semaphore *sem);
extern void __down_write(struct rw_semaphore *sem); extern void __down_write(struct rw_semaphore *sem);
extern int __must_check __down_write_killable(struct rw_semaphore *sem); extern int __must_check __down_write_killable(struct rw_semaphore *sem);
......
...@@ -126,7 +126,7 @@ __rwsem_wake_one_writer(struct rw_semaphore *sem) ...@@ -126,7 +126,7 @@ __rwsem_wake_one_writer(struct rw_semaphore *sem)
/* /*
* get a read lock on the semaphore * get a read lock on the semaphore
*/ */
void __sched __down_read(struct rw_semaphore *sem) int __sched __down_read_common(struct rw_semaphore *sem, int state)
{ {
struct rwsem_waiter waiter; struct rwsem_waiter waiter;
unsigned long flags; unsigned long flags;
...@@ -140,8 +140,6 @@ void __sched __down_read(struct rw_semaphore *sem) ...@@ -140,8 +140,6 @@ void __sched __down_read(struct rw_semaphore *sem)
goto out; goto out;
} }
set_current_state(TASK_UNINTERRUPTIBLE);
/* set up my own style of waitqueue */ /* set up my own style of waitqueue */
waiter.task = current; waiter.task = current;
waiter.type = RWSEM_WAITING_FOR_READ; waiter.type = RWSEM_WAITING_FOR_READ;
...@@ -149,20 +147,41 @@ void __sched __down_read(struct rw_semaphore *sem) ...@@ -149,20 +147,41 @@ void __sched __down_read(struct rw_semaphore *sem)
list_add_tail(&waiter.list, &sem->wait_list); list_add_tail(&waiter.list, &sem->wait_list);
/* we don't need to touch the semaphore struct anymore */
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
/* wait to be given the lock */ /* wait to be given the lock */
for (;;) { for (;;) {
if (!waiter.task) if (!waiter.task)
break; break;
if (signal_pending_state(state, current))
goto out_nolock;
set_current_state(state);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
schedule(); schedule();
set_current_state(TASK_UNINTERRUPTIBLE); raw_spin_lock_irqsave(&sem->wait_lock, flags);
} }
__set_current_state(TASK_RUNNING); raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
out: out:
; return 0;
out_nolock:
/*
* We didn't take the lock, so that there is a writer, which
* is owner or the first waiter of the sem. If it's a waiter,
* it will be woken by current owner. Not need to wake anybody.
*/
list_del(&waiter.list);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return -EINTR;
}
void __sched __down_read(struct rw_semaphore *sem)
{
__down_read_common(sem, TASK_UNINTERRUPTIBLE);
}
int __sched __down_read_killable(struct rw_semaphore *sem)
{
return __down_read_common(sem, TASK_KILLABLE);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment