Commit 6585027a authored by Jan Kara's avatar Jan Kara Committed by Linus Torvalds

writeback: integrated background writeback work

Check whether background writeback is needed after finishing each work.

When bdi flusher thread finishes doing some work check whether any kind of
background writeback needs to be done (either because
dirty_background_ratio is exceeded or because we need to start flushing
old inodes).  If so, just do background write back.

This way, bdi_start_background_writeback() just needs to wake up the
flusher thread.  It will do background writeback as soon as there is no
other work.

This is a preparatory patch for the next patch which stops background
writeback as soon as there is other work to do.
Signed-off-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarWu Fengguang <fengguang.wu@intel.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Jan Engelhardt <jengelh@medozas.de>
Cc: Jens Axboe <axboe@kernel.dk>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b44129b3
...@@ -84,13 +84,9 @@ static inline struct inode *wb_inode(struct list_head *head) ...@@ -84,13 +84,9 @@ static inline struct inode *wb_inode(struct list_head *head)
return list_entry(head, struct inode, i_wb_list); return list_entry(head, struct inode, i_wb_list);
} }
static void bdi_queue_work(struct backing_dev_info *bdi, /* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */
struct wb_writeback_work *work) static void bdi_wakeup_flusher(struct backing_dev_info *bdi)
{ {
trace_writeback_queue(bdi, work);
spin_lock_bh(&bdi->wb_lock);
list_add_tail(&work->list, &bdi->work_list);
if (bdi->wb.task) { if (bdi->wb.task) {
wake_up_process(bdi->wb.task); wake_up_process(bdi->wb.task);
} else { } else {
...@@ -98,15 +94,26 @@ static void bdi_queue_work(struct backing_dev_info *bdi, ...@@ -98,15 +94,26 @@ static void bdi_queue_work(struct backing_dev_info *bdi,
* The bdi thread isn't there, wake up the forker thread which * The bdi thread isn't there, wake up the forker thread which
* will create and run it. * will create and run it.
*/ */
trace_writeback_nothread(bdi, work);
wake_up_process(default_backing_dev_info.wb.task); wake_up_process(default_backing_dev_info.wb.task);
} }
}
static void bdi_queue_work(struct backing_dev_info *bdi,
struct wb_writeback_work *work)
{
trace_writeback_queue(bdi, work);
spin_lock_bh(&bdi->wb_lock);
list_add_tail(&work->list, &bdi->work_list);
if (!bdi->wb.task)
trace_writeback_nothread(bdi, work);
bdi_wakeup_flusher(bdi);
spin_unlock_bh(&bdi->wb_lock); spin_unlock_bh(&bdi->wb_lock);
} }
static void static void
__bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
bool range_cyclic, bool for_background) bool range_cyclic)
{ {
struct wb_writeback_work *work; struct wb_writeback_work *work;
...@@ -126,7 +133,6 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, ...@@ -126,7 +133,6 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
work->sync_mode = WB_SYNC_NONE; work->sync_mode = WB_SYNC_NONE;
work->nr_pages = nr_pages; work->nr_pages = nr_pages;
work->range_cyclic = range_cyclic; work->range_cyclic = range_cyclic;
work->for_background = for_background;
bdi_queue_work(bdi, work); bdi_queue_work(bdi, work);
} }
...@@ -144,7 +150,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, ...@@ -144,7 +150,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
*/ */
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages) void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
{ {
__bdi_start_writeback(bdi, nr_pages, true, false); __bdi_start_writeback(bdi, nr_pages, true);
} }
/** /**
...@@ -152,13 +158,20 @@ void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages) ...@@ -152,13 +158,20 @@ void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
* @bdi: the backing device to write from * @bdi: the backing device to write from
* *
* Description: * Description:
* This does WB_SYNC_NONE background writeback. The IO is only * This makes sure WB_SYNC_NONE background writeback happens. When
* started when this function returns, we make no guarentees on * this function returns, it is only guaranteed that for given BDI
* completion. Caller need not hold sb s_umount semaphore. * some IO is happening if we are over background dirty threshold.
* Caller need not hold sb s_umount semaphore.
*/ */
void bdi_start_background_writeback(struct backing_dev_info *bdi) void bdi_start_background_writeback(struct backing_dev_info *bdi)
{ {
__bdi_start_writeback(bdi, LONG_MAX, true, true); /*
* We just wake up the flusher thread. It will perform background
* writeback as soon as there is no other work to do.
*/
spin_lock_bh(&bdi->wb_lock);
bdi_wakeup_flusher(bdi);
spin_unlock_bh(&bdi->wb_lock);
} }
/* /*
...@@ -718,6 +731,23 @@ static unsigned long get_nr_dirty_pages(void) ...@@ -718,6 +731,23 @@ static unsigned long get_nr_dirty_pages(void)
get_nr_dirty_inodes(); get_nr_dirty_inodes();
} }
static long wb_check_background_flush(struct bdi_writeback *wb)
{
if (over_bground_thresh()) {
struct wb_writeback_work work = {
.nr_pages = LONG_MAX,
.sync_mode = WB_SYNC_NONE,
.for_background = 1,
.range_cyclic = 1,
};
return wb_writeback(wb, &work);
}
return 0;
}
static long wb_check_old_data_flush(struct bdi_writeback *wb) static long wb_check_old_data_flush(struct bdi_writeback *wb)
{ {
unsigned long expired; unsigned long expired;
...@@ -787,6 +817,7 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait) ...@@ -787,6 +817,7 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
* Check for periodic writeback, kupdated() style * Check for periodic writeback, kupdated() style
*/ */
wrote += wb_check_old_data_flush(wb); wrote += wb_check_old_data_flush(wb);
wrote += wb_check_background_flush(wb);
clear_bit(BDI_writeback_running, &wb->bdi->state); clear_bit(BDI_writeback_running, &wb->bdi->state);
return wrote; return wrote;
...@@ -873,7 +904,7 @@ void wakeup_flusher_threads(long nr_pages) ...@@ -873,7 +904,7 @@ void wakeup_flusher_threads(long nr_pages)
list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
if (!bdi_has_dirty_io(bdi)) if (!bdi_has_dirty_io(bdi))
continue; continue;
__bdi_start_writeback(bdi, nr_pages, false, false); __bdi_start_writeback(bdi, nr_pages, false);
} }
rcu_read_unlock(); rcu_read_unlock();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment