Commit baa896b3 authored by William Lee Irwin III's avatar William Lee Irwin III Committed by Linus Torvalds

[PATCH] consolidate bit waiting code patterns

Consolidate bit waiting code patterns for page waitqueues using
__wait_on_bit() and __wait_on_bit_lock().
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent fd4d36bf
...@@ -139,6 +139,8 @@ void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *k ...@@ -139,6 +139,8 @@ void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *k
extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int mode)); extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int mode));
extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr)); extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr));
void FASTCALL(__wake_up_bit(wait_queue_head_t *, void *, int)); void FASTCALL(__wake_up_bit(wait_queue_head_t *, void *, int));
int FASTCALL(__wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, void *, int, int (*)(void *), unsigned));
int FASTCALL(__wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, void *, int, int (*)(void *), unsigned));
#define wake_up(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL) #define wake_up(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL)
#define wake_up_nr(x, nr) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr, NULL) #define wake_up_nr(x, nr) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr, NULL)
......
...@@ -143,6 +143,43 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg) ...@@ -143,6 +143,43 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
} }
EXPORT_SYMBOL(wake_bit_function); EXPORT_SYMBOL(wake_bit_function);
/*
* To allow interruptible waiting and asynchronous (i.e. nonblocking)
* waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
* permitted return codes. Nonzero return codes halt waiting and return.
*/
int __sched fastcall
__wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
void *word, int bit, int (*action)(void *), unsigned mode)
{
int ret = 0;
prepare_to_wait(wq, &q->wait, mode);
if (test_bit(bit, word))
ret = (*action)(word);
finish_wait(wq, &q->wait);
return ret;
}
EXPORT_SYMBOL(__wait_on_bit);
int __sched fastcall
__wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
void *word, int bit, int (*action)(void *), unsigned mode)
{
int ret = 0;
while (test_and_set_bit(bit, word)) {
prepare_to_wait_exclusive(wq, &q->wait, mode);
if (test_bit(bit, word)) {
if ((ret = (*action)(word)))
break;
}
}
finish_wait(wq, &q->wait);
return ret;
}
EXPORT_SYMBOL(__wait_on_bit_lock);
void fastcall __wake_up_bit(wait_queue_head_t *wq, void *word, int bit) void fastcall __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
{ {
struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit); struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
......
...@@ -131,9 +131,12 @@ void remove_from_page_cache(struct page *page) ...@@ -131,9 +131,12 @@ void remove_from_page_cache(struct page *page)
spin_unlock_irq(&mapping->tree_lock); spin_unlock_irq(&mapping->tree_lock);
} }
static inline int sync_page(struct page *page) static int sync_page(void *word)
{ {
struct address_space *mapping; struct address_space *mapping;
struct page *page;
page = container_of((page_flags_t *)word, struct page, flags);
/* /*
* FIXME, fercrissake. What is this barrier here for? * FIXME, fercrissake. What is this barrier here for?
...@@ -141,7 +144,8 @@ static inline int sync_page(struct page *page) ...@@ -141,7 +144,8 @@ static inline int sync_page(struct page *page)
smp_mb(); smp_mb();
mapping = page_mapping(page); mapping = page_mapping(page);
if (mapping && mapping->a_ops && mapping->a_ops->sync_page) if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
return mapping->a_ops->sync_page(page); mapping->a_ops->sync_page(page);
io_schedule();
return 0; return 0;
} }
...@@ -367,19 +371,19 @@ static wait_queue_head_t *page_waitqueue(struct page *page) ...@@ -367,19 +371,19 @@ static wait_queue_head_t *page_waitqueue(struct page *page)
return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
} }
static inline void wake_up_page(struct page *page, int bit)
{
__wake_up_bit(page_waitqueue(page), &page->flags, bit);
}
void fastcall wait_on_page_bit(struct page *page, int bit_nr) void fastcall wait_on_page_bit(struct page *page, int bit_nr)
{ {
wait_queue_head_t *waitqueue = page_waitqueue(page);
DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
prepare_to_wait(waitqueue, &wait.wait, TASK_UNINTERRUPTIBLE); if (test_bit(bit_nr, &page->flags))
if (test_bit(bit_nr, &page->flags)) { __wait_on_bit(page_waitqueue(page), &wait, wait.key.flags,
sync_page(page); bit_nr, sync_page, TASK_UNINTERRUPTIBLE);
io_schedule();
}
finish_wait(waitqueue, &wait.wait);
} }
EXPORT_SYMBOL(wait_on_page_bit); EXPORT_SYMBOL(wait_on_page_bit);
/** /**
...@@ -403,7 +407,7 @@ void fastcall unlock_page(struct page *page) ...@@ -403,7 +407,7 @@ void fastcall unlock_page(struct page *page)
if (!TestClearPageLocked(page)) if (!TestClearPageLocked(page))
BUG(); BUG();
smp_mb__after_clear_bit(); smp_mb__after_clear_bit();
__wake_up_bit(page_waitqueue(page), &page->flags, PG_locked); wake_up_page(page, PG_locked);
} }
EXPORT_SYMBOL(unlock_page); EXPORT_SYMBOL(unlock_page);
...@@ -419,7 +423,7 @@ void end_page_writeback(struct page *page) ...@@ -419,7 +423,7 @@ void end_page_writeback(struct page *page)
BUG(); BUG();
smp_mb__after_clear_bit(); smp_mb__after_clear_bit();
} }
__wake_up_bit(page_waitqueue(page), &page->flags, PG_writeback); wake_up_page(page, PG_writeback);
} }
EXPORT_SYMBOL(end_page_writeback); EXPORT_SYMBOL(end_page_writeback);
...@@ -434,19 +438,11 @@ EXPORT_SYMBOL(end_page_writeback); ...@@ -434,19 +438,11 @@ EXPORT_SYMBOL(end_page_writeback);
*/ */
void fastcall __lock_page(struct page *page) void fastcall __lock_page(struct page *page)
{ {
wait_queue_head_t *wqh = page_waitqueue(page);
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
while (TestSetPageLocked(page)) { __wait_on_bit_lock(page_waitqueue(page), &wait, wait.key.flags,
prepare_to_wait_exclusive(wqh, &wait.wait, TASK_UNINTERRUPTIBLE); PG_locked, sync_page, TASK_UNINTERRUPTIBLE);
if (PageLocked(page)) {
sync_page(page);
io_schedule();
}
}
finish_wait(wqh, &wait.wait);
} }
EXPORT_SYMBOL(__lock_page); EXPORT_SYMBOL(__lock_page);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment