Commit 8a1270cd authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

writeback: remove wb_writeback_work->single_wait/done

wb_writeback_work->single_wait/done are used for the wait mechanism
for synchronous wb_work (wb_writeback_work) items which are issued
when bdi_split_work_to_wbs() fails to allocate memory for asynchronous
wb_work items; however, there's no reason to use a separate wait
mechanism for this.  bdi_split_work_to_wbs() can simply use on-stack
fallback wb_work item and separate wb_completion to wait for it.

This patch removes wb_work->single_wait/done and the related code and
make bdi_split_work_to_wbs() use on-stack fallback wb_work and
wb_completion instead.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Suggested-by: default avatarJan Kara <jack@suse.cz>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 1ed8d48c
...@@ -53,8 +53,6 @@ struct wb_writeback_work { ...@@ -53,8 +53,6 @@ struct wb_writeback_work {
unsigned int for_background:1; unsigned int for_background:1;
unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
unsigned int auto_free:1; /* free on completion */ unsigned int auto_free:1; /* free on completion */
unsigned int single_wait:1;
unsigned int single_done:1;
enum wb_reason reason; /* why was writeback initiated? */ enum wb_reason reason; /* why was writeback initiated? */
struct list_head list; /* pending work list */ struct list_head list; /* pending work list */
...@@ -181,11 +179,8 @@ static void wb_queue_work(struct bdi_writeback *wb, ...@@ -181,11 +179,8 @@ static void wb_queue_work(struct bdi_writeback *wb,
trace_writeback_queue(wb->bdi, work); trace_writeback_queue(wb->bdi, work);
spin_lock_bh(&wb->work_lock); spin_lock_bh(&wb->work_lock);
if (!test_bit(WB_registered, &wb->state)) { if (!test_bit(WB_registered, &wb->state))
if (work->single_wait)
work->single_done = 1;
goto out_unlock; goto out_unlock;
}
if (work->done) if (work->done)
atomic_inc(&work->done->cnt); atomic_inc(&work->done->cnt);
list_add_tail(&work->list, &wb->work_list); list_add_tail(&work->list, &wb->work_list);
...@@ -737,32 +732,6 @@ int inode_congested(struct inode *inode, int cong_bits) ...@@ -737,32 +732,6 @@ int inode_congested(struct inode *inode, int cong_bits)
} }
EXPORT_SYMBOL_GPL(inode_congested); EXPORT_SYMBOL_GPL(inode_congested);
/**
* wb_wait_for_single_work - wait for completion of a single bdi_writeback_work
* @bdi: bdi the work item was issued to
* @work: work item to wait for
*
* Wait for the completion of @work which was issued to one of @bdi's
* bdi_writeback's. The caller must have set @work->single_wait before
* issuing it. This wait operates independently fo
* wb_wait_for_completion() and also disables automatic freeing of @work.
*/
static void wb_wait_for_single_work(struct backing_dev_info *bdi,
struct wb_writeback_work *work)
{
if (WARN_ON_ONCE(!work->single_wait))
return;
wait_event(bdi->wb_waitq, work->single_done);
/*
* Paired with smp_wmb() in wb_do_writeback() and ensures that all
* modifications to @work prior to assertion of ->single_done is
* visible to the caller once this function returns.
*/
smp_rmb();
}
/** /**
* wb_split_bdi_pages - split nr_pages to write according to bandwidth * wb_split_bdi_pages - split nr_pages to write according to bandwidth
* @wb: target bdi_writeback to split @nr_pages to * @wb: target bdi_writeback to split @nr_pages to
...@@ -791,38 +760,6 @@ static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages) ...@@ -791,38 +760,6 @@ static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
return DIV_ROUND_UP_ULL((u64)nr_pages * this_bw, tot_bw); return DIV_ROUND_UP_ULL((u64)nr_pages * this_bw, tot_bw);
} }
/**
* wb_clone_and_queue_work - clone a wb_writeback_work and issue it to a wb
* @wb: target bdi_writeback
* @base_work: source wb_writeback_work
*
* Try to make a clone of @base_work and issue it to @wb. If cloning
* succeeds, %true is returned; otherwise, @base_work is issued directly
* and %false is returned. In the latter case, the caller is required to
* wait for @base_work's completion using wb_wait_for_single_work().
*
* A clone is auto-freed on completion. @base_work never is.
*/
static bool wb_clone_and_queue_work(struct bdi_writeback *wb,
struct wb_writeback_work *base_work)
{
struct wb_writeback_work *work;
work = kmalloc(sizeof(*work), GFP_ATOMIC);
if (work) {
*work = *base_work;
work->auto_free = 1;
work->single_wait = 0;
} else {
work = base_work;
work->auto_free = 0;
work->single_wait = 1;
}
work->single_done = 0;
wb_queue_work(wb, work);
return work != base_work;
}
/** /**
* bdi_split_work_to_wbs - split a wb_writeback_work to all wb's of a bdi * bdi_split_work_to_wbs - split a wb_writeback_work to all wb's of a bdi
* @bdi: target backing_dev_info * @bdi: target backing_dev_info
...@@ -838,7 +775,6 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi, ...@@ -838,7 +775,6 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
struct wb_writeback_work *base_work, struct wb_writeback_work *base_work,
bool skip_if_busy) bool skip_if_busy)
{ {
long nr_pages = base_work->nr_pages;
int next_memcg_id = 0; int next_memcg_id = 0;
struct bdi_writeback *wb; struct bdi_writeback *wb;
struct wb_iter iter; struct wb_iter iter;
...@@ -850,17 +786,39 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi, ...@@ -850,17 +786,39 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
restart: restart:
rcu_read_lock(); rcu_read_lock();
bdi_for_each_wb(wb, bdi, &iter, next_memcg_id) { bdi_for_each_wb(wb, bdi, &iter, next_memcg_id) {
DEFINE_WB_COMPLETION_ONSTACK(fallback_work_done);
struct wb_writeback_work fallback_work;
struct wb_writeback_work *work;
long nr_pages;
if (!wb_has_dirty_io(wb) || if (!wb_has_dirty_io(wb) ||
(skip_if_busy && writeback_in_progress(wb))) (skip_if_busy && writeback_in_progress(wb)))
continue; continue;
base_work->nr_pages = wb_split_bdi_pages(wb, nr_pages); nr_pages = wb_split_bdi_pages(wb, base_work->nr_pages);
if (!wb_clone_and_queue_work(wb, base_work)) {
next_memcg_id = wb->memcg_css->id + 1; work = kmalloc(sizeof(*work), GFP_ATOMIC);
rcu_read_unlock(); if (work) {
wb_wait_for_single_work(bdi, base_work); *work = *base_work;
goto restart; work->nr_pages = nr_pages;
work->auto_free = 1;
wb_queue_work(wb, work);
continue;
} }
/* alloc failed, execute synchronously using on-stack fallback */
work = &fallback_work;
*work = *base_work;
work->nr_pages = nr_pages;
work->auto_free = 0;
work->done = &fallback_work_done;
wb_queue_work(wb, work);
next_memcg_id = wb->memcg_css->id + 1;
rcu_read_unlock();
wb_wait_for_completion(bdi, &fallback_work_done);
goto restart;
} }
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -902,8 +860,6 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi, ...@@ -902,8 +860,6 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
if (bdi_has_dirty_io(bdi) && if (bdi_has_dirty_io(bdi) &&
(!skip_if_busy || !writeback_in_progress(&bdi->wb))) { (!skip_if_busy || !writeback_in_progress(&bdi->wb))) {
base_work->auto_free = 0; base_work->auto_free = 0;
base_work->single_wait = 0;
base_work->single_done = 0;
wb_queue_work(&bdi->wb, base_work); wb_queue_work(&bdi->wb, base_work);
} }
} }
...@@ -1794,26 +1750,14 @@ static long wb_do_writeback(struct bdi_writeback *wb) ...@@ -1794,26 +1750,14 @@ static long wb_do_writeback(struct bdi_writeback *wb)
set_bit(WB_writeback_running, &wb->state); set_bit(WB_writeback_running, &wb->state);
while ((work = get_next_work_item(wb)) != NULL) { while ((work = get_next_work_item(wb)) != NULL) {
struct wb_completion *done = work->done; struct wb_completion *done = work->done;
bool need_wake_up = false;
trace_writeback_exec(wb->bdi, work); trace_writeback_exec(wb->bdi, work);
wrote += wb_writeback(wb, work); wrote += wb_writeback(wb, work);
if (work->single_wait) { if (work->auto_free)
WARN_ON_ONCE(work->auto_free);
/* paired w/ rmb in wb_wait_for_single_work() */
smp_wmb();
work->single_done = 1;
need_wake_up = true;
} else if (work->auto_free) {
kfree(work); kfree(work);
}
if (done && atomic_dec_and_test(&done->cnt)) if (done && atomic_dec_and_test(&done->cnt))
need_wake_up = true;
if (need_wake_up)
wake_up_all(&wb->bdi->wb_waitq); wake_up_all(&wb->bdi->wb_waitq);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment