Commit 5136bab9 authored by Nick Piggin's avatar Nick Piggin Committed by Linus Torvalds

[PATCH] lindent rwsem

Lindent rwsem.c and rwsem-spinlock.c and fix a few things by hand.  Also
added a couple of comments for the memory barriers.  Added the __sched
annotation that was left out of rwsem-spinlock.c.
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 099bb7c0
/* rwsem-spinlock.c: R/W semaphores: contention handling functions for generic spinlock /* rwsem-spinlock.c: R/W semaphores: contention handling functions for
* implementation * generic spinlock implementation
* *
* Copyright (c) 2001 David Howells (dhowells@redhat.com). * Copyright (c) 2001 David Howells (dhowells@redhat.com).
* - Derived partially from idea by Andrea Arcangeli <andrea@suse.de> * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
...@@ -10,9 +10,9 @@ ...@@ -10,9 +10,9 @@
#include <linux/module.h> #include <linux/module.h>
struct rwsem_waiter { struct rwsem_waiter {
struct list_head list; struct list_head list;
struct task_struct *task; struct task_struct *task;
unsigned int flags; unsigned int flags;
#define RWSEM_WAITING_FOR_READ 0x00000001 #define RWSEM_WAITING_FOR_READ 0x00000001
#define RWSEM_WAITING_FOR_WRITE 0x00000002 #define RWSEM_WAITING_FOR_WRITE 0x00000002
}; };
...@@ -22,7 +22,8 @@ void rwsemtrace(struct rw_semaphore *sem, const char *str) ...@@ -22,7 +22,8 @@ void rwsemtrace(struct rw_semaphore *sem, const char *str)
{ {
if (sem->debug) if (sem->debug)
printk("[%d] %s({%d,%d})\n", printk("[%d] %s({%d,%d})\n",
current->pid,str,sem->activity,list_empty(&sem->wait_list)?0:1); current->pid, str, sem->activity,
list_empty(&sem->wait_list) ? 0 : 1);
} }
#endif #endif
...@@ -40,7 +41,7 @@ void fastcall init_rwsem(struct rw_semaphore *sem) ...@@ -40,7 +41,7 @@ void fastcall init_rwsem(struct rw_semaphore *sem)
} }
/* /*
* handle the lock being released whilst there are processes blocked on it that can now run * handle the lock release when processes blocked on it that can now run
* - if we come here, then: * - if we come here, then:
* - the 'active count' _reached_ zero * - the 'active count' _reached_ zero
* - the 'waiting count' is non-zero * - the 'waiting count' is non-zero
...@@ -48,15 +49,16 @@ void fastcall init_rwsem(struct rw_semaphore *sem) ...@@ -48,15 +49,16 @@ void fastcall init_rwsem(struct rw_semaphore *sem)
* - woken process blocks are discarded from the list after having task zeroed * - woken process blocks are discarded from the list after having task zeroed
* - writers are only woken if wakewrite is non-zero * - writers are only woken if wakewrite is non-zero
*/ */
static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) static inline struct rw_semaphore *
__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
{ {
struct rwsem_waiter *waiter; struct rwsem_waiter *waiter;
struct task_struct *tsk; struct task_struct *tsk;
int woken; int woken;
rwsemtrace(sem,"Entering __rwsem_do_wake"); rwsemtrace(sem, "Entering __rwsem_do_wake");
waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list); waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
if (!wakewrite) { if (!wakewrite) {
if (waiter->flags & RWSEM_WAITING_FOR_WRITE) if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
...@@ -64,14 +66,16 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int ...@@ -64,14 +66,16 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int
goto dont_wake_writers; goto dont_wake_writers;
} }
/* if we are allowed to wake writers try to grant a single write lock if there's a /* if we are allowed to wake writers try to grant a single write lock
* writer at the front of the queue * if there's a writer at the front of the queue
* - we leave the 'waiting count' incremented to signify potential contention * - we leave the 'waiting count' incremented to signify potential
* contention
*/ */
if (waiter->flags & RWSEM_WAITING_FOR_WRITE) { if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
sem->activity = -1; sem->activity = -1;
list_del(&waiter->list); list_del(&waiter->list);
tsk = waiter->task; tsk = waiter->task;
/* Don't touch waiter after ->task has been NULLed */
mb(); mb();
waiter->task = NULL; waiter->task = NULL;
wake_up_process(tsk); wake_up_process(tsk);
...@@ -79,10 +83,10 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int ...@@ -79,10 +83,10 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int
goto out; goto out;
} }
/* grant an infinite number of read locks to the readers at the front of the queue */ /* grant an infinite number of read locks to the front of the queue */
dont_wake_writers: dont_wake_writers:
woken = 0; woken = 0;
while (waiter->flags&RWSEM_WAITING_FOR_READ) { while (waiter->flags & RWSEM_WAITING_FOR_READ) {
struct list_head *next = waiter->list.next; struct list_head *next = waiter->list.next;
list_del(&waiter->list); list_del(&waiter->list);
...@@ -94,27 +98,28 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int ...@@ -94,27 +98,28 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int
woken++; woken++;
if (list_empty(&sem->wait_list)) if (list_empty(&sem->wait_list))
break; break;
waiter = list_entry(next,struct rwsem_waiter,list); waiter = list_entry(next, struct rwsem_waiter, list);
} }
sem->activity += woken; sem->activity += woken;
out: out:
rwsemtrace(sem,"Leaving __rwsem_do_wake"); rwsemtrace(sem, "Leaving __rwsem_do_wake");
return sem; return sem;
} }
/* /*
* wake a single writer * wake a single writer
*/ */
static inline struct rw_semaphore *__rwsem_wake_one_writer(struct rw_semaphore *sem) static inline struct rw_semaphore *
__rwsem_wake_one_writer(struct rw_semaphore *sem)
{ {
struct rwsem_waiter *waiter; struct rwsem_waiter *waiter;
struct task_struct *tsk; struct task_struct *tsk;
sem->activity = -1; sem->activity = -1;
waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list); waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
list_del(&waiter->list); list_del(&waiter->list);
tsk = waiter->task; tsk = waiter->task;
...@@ -128,16 +133,16 @@ static inline struct rw_semaphore *__rwsem_wake_one_writer(struct rw_semaphore * ...@@ -128,16 +133,16 @@ static inline struct rw_semaphore *__rwsem_wake_one_writer(struct rw_semaphore *
/* /*
* get a read lock on the semaphore * get a read lock on the semaphore
*/ */
void fastcall __down_read(struct rw_semaphore *sem) void fastcall __sched __down_read(struct rw_semaphore *sem)
{ {
struct rwsem_waiter waiter; struct rwsem_waiter waiter;
struct task_struct *tsk; struct task_struct *tsk;
rwsemtrace(sem,"Entering __down_read"); rwsemtrace(sem, "Entering __down_read");
spin_lock(&sem->wait_lock); spin_lock(&sem->wait_lock);
if (sem->activity>=0 && list_empty(&sem->wait_list)) { if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
/* granted */ /* granted */
sem->activity++; sem->activity++;
spin_unlock(&sem->wait_lock); spin_unlock(&sem->wait_lock);
...@@ -145,14 +150,14 @@ void fastcall __down_read(struct rw_semaphore *sem) ...@@ -145,14 +150,14 @@ void fastcall __down_read(struct rw_semaphore *sem)
} }
tsk = current; tsk = current;
set_task_state(tsk,TASK_UNINTERRUPTIBLE); set_task_state(tsk, TASK_UNINTERRUPTIBLE);
/* set up my own style of waitqueue */ /* set up my own style of waitqueue */
waiter.task = tsk; waiter.task = tsk;
waiter.flags = RWSEM_WAITING_FOR_READ; waiter.flags = RWSEM_WAITING_FOR_READ;
get_task_struct(tsk); get_task_struct(tsk);
list_add_tail(&waiter.list,&sem->wait_list); list_add_tail(&waiter.list, &sem->wait_list);
/* we don't need to touch the semaphore struct anymore */ /* we don't need to touch the semaphore struct anymore */
spin_unlock(&sem->wait_lock); spin_unlock(&sem->wait_lock);
...@@ -168,7 +173,7 @@ void fastcall __down_read(struct rw_semaphore *sem) ...@@ -168,7 +173,7 @@ void fastcall __down_read(struct rw_semaphore *sem)
tsk->state = TASK_RUNNING; tsk->state = TASK_RUNNING;
out: out:
rwsemtrace(sem,"Leaving __down_read"); rwsemtrace(sem, "Leaving __down_read");
} }
/* /*
...@@ -177,11 +182,11 @@ void fastcall __down_read(struct rw_semaphore *sem) ...@@ -177,11 +182,11 @@ void fastcall __down_read(struct rw_semaphore *sem)
int fastcall __down_read_trylock(struct rw_semaphore *sem) int fastcall __down_read_trylock(struct rw_semaphore *sem)
{ {
int ret = 0; int ret = 0;
rwsemtrace(sem,"Entering __down_read_trylock"); rwsemtrace(sem, "Entering __down_read_trylock");
spin_lock(&sem->wait_lock); spin_lock(&sem->wait_lock);
if (sem->activity>=0 && list_empty(&sem->wait_list)) { if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
/* granted */ /* granted */
sem->activity++; sem->activity++;
ret = 1; ret = 1;
...@@ -189,24 +194,24 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem) ...@@ -189,24 +194,24 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem)
spin_unlock(&sem->wait_lock); spin_unlock(&sem->wait_lock);
rwsemtrace(sem,"Leaving __down_read_trylock"); rwsemtrace(sem, "Leaving __down_read_trylock");
return ret; return ret;
} }
/* /*
* get a write lock on the semaphore * get a write lock on the semaphore
* - note that we increment the waiting count anyway to indicate an exclusive lock * - we increment the waiting count anyway to indicate an exclusive lock
*/ */
void fastcall __down_write(struct rw_semaphore *sem) void fastcall __sched __down_write(struct rw_semaphore *sem)
{ {
struct rwsem_waiter waiter; struct rwsem_waiter waiter;
struct task_struct *tsk; struct task_struct *tsk;
rwsemtrace(sem,"Entering __down_write"); rwsemtrace(sem, "Entering __down_write");
spin_lock(&sem->wait_lock); spin_lock(&sem->wait_lock);
if (sem->activity==0 && list_empty(&sem->wait_list)) { if (sem->activity == 0 && list_empty(&sem->wait_list)) {
/* granted */ /* granted */
sem->activity = -1; sem->activity = -1;
spin_unlock(&sem->wait_lock); spin_unlock(&sem->wait_lock);
...@@ -214,14 +219,14 @@ void fastcall __down_write(struct rw_semaphore *sem) ...@@ -214,14 +219,14 @@ void fastcall __down_write(struct rw_semaphore *sem)
} }
tsk = current; tsk = current;
set_task_state(tsk,TASK_UNINTERRUPTIBLE); set_task_state(tsk, TASK_UNINTERRUPTIBLE);
/* set up my own style of waitqueue */ /* set up my own style of waitqueue */
waiter.task = tsk; waiter.task = tsk;
waiter.flags = RWSEM_WAITING_FOR_WRITE; waiter.flags = RWSEM_WAITING_FOR_WRITE;
get_task_struct(tsk); get_task_struct(tsk);
list_add_tail(&waiter.list,&sem->wait_list); list_add_tail(&waiter.list, &sem->wait_list);
/* we don't need to touch the semaphore struct anymore */ /* we don't need to touch the semaphore struct anymore */
spin_unlock(&sem->wait_lock); spin_unlock(&sem->wait_lock);
...@@ -237,7 +242,7 @@ void fastcall __down_write(struct rw_semaphore *sem) ...@@ -237,7 +242,7 @@ void fastcall __down_write(struct rw_semaphore *sem)
tsk->state = TASK_RUNNING; tsk->state = TASK_RUNNING;
out: out:
rwsemtrace(sem,"Leaving __down_write"); rwsemtrace(sem, "Leaving __down_write");
} }
/* /*
...@@ -246,11 +251,11 @@ void fastcall __down_write(struct rw_semaphore *sem) ...@@ -246,11 +251,11 @@ void fastcall __down_write(struct rw_semaphore *sem)
int fastcall __down_write_trylock(struct rw_semaphore *sem) int fastcall __down_write_trylock(struct rw_semaphore *sem)
{ {
int ret = 0; int ret = 0;
rwsemtrace(sem,"Entering __down_write_trylock"); rwsemtrace(sem, "Entering __down_write_trylock");
spin_lock(&sem->wait_lock); spin_lock(&sem->wait_lock);
if (sem->activity==0 && list_empty(&sem->wait_list)) { if (sem->activity == 0 && list_empty(&sem->wait_list)) {
/* granted */ /* granted */
sem->activity = -1; sem->activity = -1;
ret = 1; ret = 1;
...@@ -258,7 +263,7 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem) ...@@ -258,7 +263,7 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem)
spin_unlock(&sem->wait_lock); spin_unlock(&sem->wait_lock);
rwsemtrace(sem,"Leaving __down_write_trylock"); rwsemtrace(sem, "Leaving __down_write_trylock");
return ret; return ret;
} }
...@@ -267,16 +272,16 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem) ...@@ -267,16 +272,16 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem)
*/ */
void fastcall __up_read(struct rw_semaphore *sem) void fastcall __up_read(struct rw_semaphore *sem)
{ {
rwsemtrace(sem,"Entering __up_read"); rwsemtrace(sem, "Entering __up_read");
spin_lock(&sem->wait_lock); spin_lock(&sem->wait_lock);
if (--sem->activity==0 && !list_empty(&sem->wait_list)) if (--sem->activity == 0 && !list_empty(&sem->wait_list))
sem = __rwsem_wake_one_writer(sem); sem = __rwsem_wake_one_writer(sem);
spin_unlock(&sem->wait_lock); spin_unlock(&sem->wait_lock);
rwsemtrace(sem,"Leaving __up_read"); rwsemtrace(sem, "Leaving __up_read");
} }
/* /*
...@@ -284,7 +289,7 @@ void fastcall __up_read(struct rw_semaphore *sem) ...@@ -284,7 +289,7 @@ void fastcall __up_read(struct rw_semaphore *sem)
*/ */
void fastcall __up_write(struct rw_semaphore *sem) void fastcall __up_write(struct rw_semaphore *sem)
{ {
rwsemtrace(sem,"Entering __up_write"); rwsemtrace(sem, "Entering __up_write");
spin_lock(&sem->wait_lock); spin_lock(&sem->wait_lock);
...@@ -294,7 +299,7 @@ void fastcall __up_write(struct rw_semaphore *sem) ...@@ -294,7 +299,7 @@ void fastcall __up_write(struct rw_semaphore *sem)
spin_unlock(&sem->wait_lock); spin_unlock(&sem->wait_lock);
rwsemtrace(sem,"Leaving __up_write"); rwsemtrace(sem, "Leaving __up_write");
} }
/* /*
...@@ -303,17 +308,17 @@ void fastcall __up_write(struct rw_semaphore *sem) ...@@ -303,17 +308,17 @@ void fastcall __up_write(struct rw_semaphore *sem)
*/ */
void fastcall __downgrade_write(struct rw_semaphore *sem) void fastcall __downgrade_write(struct rw_semaphore *sem)
{ {
rwsemtrace(sem,"Entering __downgrade_write"); rwsemtrace(sem, "Entering __downgrade_write");
spin_lock(&sem->wait_lock); spin_lock(&sem->wait_lock);
sem->activity = 1; sem->activity = 1;
if (!list_empty(&sem->wait_list)) if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem,0); sem = __rwsem_do_wake(sem, 0);
spin_unlock(&sem->wait_lock); spin_unlock(&sem->wait_lock);
rwsemtrace(sem,"Leaving __downgrade_write"); rwsemtrace(sem, "Leaving __downgrade_write");
} }
EXPORT_SYMBOL(init_rwsem); EXPORT_SYMBOL(init_rwsem);
......
...@@ -9,9 +9,9 @@ ...@@ -9,9 +9,9 @@
#include <linux/module.h> #include <linux/module.h>
struct rwsem_waiter { struct rwsem_waiter {
struct list_head list; struct list_head list;
struct task_struct *task; struct task_struct *task;
unsigned int flags; unsigned int flags;
#define RWSEM_WAITING_FOR_READ 0x00000001 #define RWSEM_WAITING_FOR_READ 0x00000001
#define RWSEM_WAITING_FOR_WRITE 0x00000002 #define RWSEM_WAITING_FOR_WRITE 0x00000002
}; };
...@@ -20,31 +20,32 @@ struct rwsem_waiter { ...@@ -20,31 +20,32 @@ struct rwsem_waiter {
#undef rwsemtrace #undef rwsemtrace
void rwsemtrace(struct rw_semaphore *sem, const char *str) void rwsemtrace(struct rw_semaphore *sem, const char *str)
{ {
printk("sem=%p\n",sem); printk("sem=%p\n", sem);
printk("(sem)=%08lx\n",sem->count); printk("(sem)=%08lx\n", sem->count);
if (sem->debug) if (sem->debug)
printk("[%d] %s({%08lx})\n",current->pid,str,sem->count); printk("[%d] %s({%08lx})\n", current->pid, str, sem->count);
} }
#endif #endif
/* /*
* handle the lock being released whilst there are processes blocked on it that can now run * handle the lock release when processes blocked on it that can now run
* - if we come here from up_xxxx(), then: * - if we come here from up_xxxx(), then:
* - the 'active part' of the count (&0x0000ffff) had reached zero (but may have changed) * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
* - the 'waiting part' of the count (&0xffff0000) is negative (and will still be so) * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
* - there must be someone on the queue * - there must be someone on the queue
* - the spinlock must be held by the caller * - the spinlock must be held by the caller
* - woken process blocks are discarded from the list after having task zeroed * - woken process blocks are discarded from the list after having task zeroed
* - writers are only woken if downgrading is false * - writers are only woken if downgrading is false
*/ */
static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int downgrading) static inline struct rw_semaphore *
__rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
{ {
struct rwsem_waiter *waiter; struct rwsem_waiter *waiter;
struct task_struct *tsk; struct task_struct *tsk;
struct list_head *next; struct list_head *next;
signed long oldcount, woken, loop; signed long oldcount, woken, loop;
rwsemtrace(sem,"Entering __rwsem_do_wake"); rwsemtrace(sem, "Entering __rwsem_do_wake");
if (downgrading) if (downgrading)
goto dont_wake_writers; goto dont_wake_writers;
...@@ -53,19 +54,24 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int ...@@ -53,19 +54,24 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int
* if we can transition the active part of the count from 0 -> 1 * if we can transition the active part of the count from 0 -> 1
*/ */
try_again: try_again:
oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS,sem) - RWSEM_ACTIVE_BIAS; oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem)
- RWSEM_ACTIVE_BIAS;
if (oldcount & RWSEM_ACTIVE_MASK) if (oldcount & RWSEM_ACTIVE_MASK)
goto undo; goto undo;
waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list); waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
/* try to grant a single write lock if there's a writer at the front of the queue /* try to grant a single write lock if there's a writer at the front
* - note we leave the 'active part' of the count incremented by 1 and the waiting part * of the queue - note we leave the 'active part' of the count
* incremented by 0x00010000 * incremented by 1 and the waiting part incremented by 0x00010000
*/ */
if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE)) if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
goto readers_only; goto readers_only;
/* We must be careful not to touch 'waiter' after we set ->task = NULL.
* It is an allocated on the waiter's stack and may become invalid at
* any time after that point (due to a wakeup from another source).
*/
list_del(&waiter->list); list_del(&waiter->list);
tsk = waiter->task; tsk = waiter->task;
mb(); mb();
...@@ -76,7 +82,7 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int ...@@ -76,7 +82,7 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int
/* don't want to wake any writers */ /* don't want to wake any writers */
dont_wake_writers: dont_wake_writers:
waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list); waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
if (waiter->flags & RWSEM_WAITING_FOR_WRITE) if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
goto out; goto out;
...@@ -90,23 +96,25 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int ...@@ -90,23 +96,25 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int
do { do {
woken++; woken++;
if (waiter->list.next==&sem->wait_list) if (waiter->list.next == &sem->wait_list)
break; break;
waiter = list_entry(waiter->list.next,struct rwsem_waiter,list); waiter = list_entry(waiter->list.next,
struct rwsem_waiter, list);
} while (waiter->flags & RWSEM_WAITING_FOR_READ); } while (waiter->flags & RWSEM_WAITING_FOR_READ);
loop = woken; loop = woken;
woken *= RWSEM_ACTIVE_BIAS - RWSEM_WAITING_BIAS; woken *= RWSEM_ACTIVE_BIAS - RWSEM_WAITING_BIAS;
if (!downgrading) if (!downgrading)
woken -= RWSEM_ACTIVE_BIAS; /* we'd already done one increment /* we'd already done one increment earlier */
* earlier */ woken -= RWSEM_ACTIVE_BIAS;
rwsem_atomic_add(woken,sem);
rwsem_atomic_add(woken, sem);
next = sem->wait_list.next; next = sem->wait_list.next;
for (; loop>0; loop--) { for (; loop > 0; loop--) {
waiter = list_entry(next,struct rwsem_waiter,list); waiter = list_entry(next, struct rwsem_waiter, list);
next = waiter->list.next; next = waiter->list.next;
tsk = waiter->task; tsk = waiter->task;
mb(); mb();
...@@ -119,12 +127,12 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int ...@@ -119,12 +127,12 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int
next->prev = &sem->wait_list; next->prev = &sem->wait_list;
out: out:
rwsemtrace(sem,"Leaving __rwsem_do_wake"); rwsemtrace(sem, "Leaving __rwsem_do_wake");
return sem; return sem;
/* undo the change to count, but check for a transition 1->0 */ /* undo the change to count, but check for a transition 1->0 */
undo: undo:
if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS,sem)!=0) if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) != 0)
goto out; goto out;
goto try_again; goto try_again;
} }
...@@ -132,28 +140,26 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int ...@@ -132,28 +140,26 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int
/* /*
* wait for a lock to be granted * wait for a lock to be granted
*/ */
static inline struct rw_semaphore *rwsem_down_failed_common(struct rw_semaphore *sem, static inline struct rw_semaphore *
struct rwsem_waiter *waiter, rwsem_down_failed_common(struct rw_semaphore *sem,
signed long adjustment) struct rwsem_waiter *waiter, signed long adjustment)
{ {
struct task_struct *tsk = current; struct task_struct *tsk = current;
signed long count; signed long count;
set_task_state(tsk,TASK_UNINTERRUPTIBLE); set_task_state(tsk, TASK_UNINTERRUPTIBLE);
/* set up my own style of waitqueue */ /* set up my own style of waitqueue */
spin_lock(&sem->wait_lock); spin_lock(&sem->wait_lock);
waiter->task = tsk; waiter->task = tsk;
get_task_struct(tsk); get_task_struct(tsk);
list_add_tail(&waiter->list,&sem->wait_list); list_add_tail(&waiter->list, &sem->wait_list);
/* note that we're now waiting on the lock, but no longer actively read-locking */ /* we're now waiting on the lock, but no longer actively read-locking */
count = rwsem_atomic_update(adjustment,sem); count = rwsem_atomic_update(adjustment, sem);
/* if there are no longer active locks, wake the front queued process(es) up /* if there are no active locks, wake the front queued process(es) up */
* - it might even be this process, since the waker takes a more active part
*/
if (!(count & RWSEM_ACTIVE_MASK)) if (!(count & RWSEM_ACTIVE_MASK))
sem = __rwsem_do_wake(sem, 0); sem = __rwsem_do_wake(sem, 0);
...@@ -175,42 +181,45 @@ static inline struct rw_semaphore *rwsem_down_failed_common(struct rw_semaphore ...@@ -175,42 +181,45 @@ static inline struct rw_semaphore *rwsem_down_failed_common(struct rw_semaphore
/* /*
* wait for the read lock to be granted * wait for the read lock to be granted
*/ */
struct rw_semaphore fastcall __sched *rwsem_down_read_failed(struct rw_semaphore *sem) struct rw_semaphore fastcall __sched *
rwsem_down_read_failed(struct rw_semaphore *sem)
{ {
struct rwsem_waiter waiter; struct rwsem_waiter waiter;
rwsemtrace(sem,"Entering rwsem_down_read_failed"); rwsemtrace(sem, "Entering rwsem_down_read_failed");
waiter.flags = RWSEM_WAITING_FOR_READ; waiter.flags = RWSEM_WAITING_FOR_READ;
rwsem_down_failed_common(sem,&waiter,RWSEM_WAITING_BIAS-RWSEM_ACTIVE_BIAS); rwsem_down_failed_common(sem, &waiter,
RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS);
rwsemtrace(sem,"Leaving rwsem_down_read_failed"); rwsemtrace(sem, "Leaving rwsem_down_read_failed");
return sem; return sem;
} }
/* /*
* wait for the write lock to be granted * wait for the write lock to be granted
*/ */
struct rw_semaphore fastcall __sched *rwsem_down_write_failed(struct rw_semaphore *sem) struct rw_semaphore fastcall __sched *
rwsem_down_write_failed(struct rw_semaphore *sem)
{ {
struct rwsem_waiter waiter; struct rwsem_waiter waiter;
rwsemtrace(sem,"Entering rwsem_down_write_failed"); rwsemtrace(sem, "Entering rwsem_down_write_failed");
waiter.flags = RWSEM_WAITING_FOR_WRITE; waiter.flags = RWSEM_WAITING_FOR_WRITE;
rwsem_down_failed_common(sem,&waiter,-RWSEM_ACTIVE_BIAS); rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS);
rwsemtrace(sem,"Leaving rwsem_down_write_failed"); rwsemtrace(sem, "Leaving rwsem_down_write_failed");
return sem; return sem;
} }
/* /*
* handle waking up a waiter on the semaphore * handle waking up a waiter on the semaphore
* - up_read/up_write has decremented the active part of the count if we come here * - up_read/up_write has decremented the active part of count if we come here
*/ */
struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem) struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
{ {
rwsemtrace(sem,"Entering rwsem_wake"); rwsemtrace(sem, "Entering rwsem_wake");
spin_lock(&sem->wait_lock); spin_lock(&sem->wait_lock);
...@@ -220,19 +229,19 @@ struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem) ...@@ -220,19 +229,19 @@ struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
spin_unlock(&sem->wait_lock); spin_unlock(&sem->wait_lock);
rwsemtrace(sem,"Leaving rwsem_wake"); rwsemtrace(sem, "Leaving rwsem_wake");
return sem; return sem;
} }
/* /*
* downgrade a write lock into a read lock * downgrade a write lock into a read lock
* - caller incremented waiting part of count, and discovered it to be still negative * - caller incremented waiting part of count and discovered it still negative
* - just wake up any readers at the front of the queue * - just wake up any readers at the front of the queue
*/ */
struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem) struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem)
{ {
rwsemtrace(sem,"Entering rwsem_downgrade_wake"); rwsemtrace(sem, "Entering rwsem_downgrade_wake");
spin_lock(&sem->wait_lock); spin_lock(&sem->wait_lock);
...@@ -242,7 +251,7 @@ struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem) ...@@ -242,7 +251,7 @@ struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem)
spin_unlock(&sem->wait_lock); spin_unlock(&sem->wait_lock);
rwsemtrace(sem,"Leaving rwsem_downgrade_wake"); rwsemtrace(sem, "Leaving rwsem_downgrade_wake");
return sem; return sem;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment