Commit 3a2e9a5a authored by Wu Fengguang's avatar Wu Fengguang Committed by Jens Axboe

writeback: balance_dirty_pages() shall write more than dirtied pages

Some filesystem may choose to write much more than ratelimit_pages
before calling balance_dirty_pages_ratelimited_nr(). So it is safer to
determine number to write based on real number of dirtied pages.

Otherwise it is possible that
  loop {
    btrfs_file_write():     dirty 1024 pages
    balance_dirty_pages():  write up to 48 pages (= ratelimit_pages * 1.5)
  }
in which the writeback rate cannot keep up with dirty rate, and the
dirty pages go all the way beyond dirty_thresh.

The increased write_chunk may make the dirtier more bumpy.
So filesystems shall be take care not to dirty too much at
a time (eg. > 4MB) without checking the ratelimit.
Signed-off-by: default avatarWu Fengguang <fengguang.wu@intel.com>
Acked-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent a5989bdc
...@@ -44,12 +44,15 @@ static long ratelimit_pages = 32; ...@@ -44,12 +44,15 @@ static long ratelimit_pages = 32;
/* /*
* When balance_dirty_pages decides that the caller needs to perform some * When balance_dirty_pages decides that the caller needs to perform some
* non-background writeback, this is how many pages it will attempt to write. * non-background writeback, this is how many pages it will attempt to write.
* It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably * It should be somewhat larger than dirtied pages to ensure that reasonably
* large amounts of I/O are submitted. * large amounts of I/O are submitted.
*/ */
static inline long sync_writeback_pages(void) static inline long sync_writeback_pages(unsigned long dirtied)
{ {
return ratelimit_pages + ratelimit_pages / 2; if (dirtied < ratelimit_pages)
dirtied = ratelimit_pages;
return dirtied + dirtied / 2;
} }
/* The following parameters are exported via /proc/sys/vm */ /* The following parameters are exported via /proc/sys/vm */
...@@ -477,7 +480,8 @@ get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty, ...@@ -477,7 +480,8 @@ get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
* If we're over `background_thresh' then pdflush is woken to perform some * If we're over `background_thresh' then pdflush is woken to perform some
* writeout. * writeout.
*/ */
static void balance_dirty_pages(struct address_space *mapping) static void balance_dirty_pages(struct address_space *mapping,
unsigned long write_chunk)
{ {
long nr_reclaimable, bdi_nr_reclaimable; long nr_reclaimable, bdi_nr_reclaimable;
long nr_writeback, bdi_nr_writeback; long nr_writeback, bdi_nr_writeback;
...@@ -485,7 +489,6 @@ static void balance_dirty_pages(struct address_space *mapping) ...@@ -485,7 +489,6 @@ static void balance_dirty_pages(struct address_space *mapping)
unsigned long dirty_thresh; unsigned long dirty_thresh;
unsigned long bdi_thresh; unsigned long bdi_thresh;
unsigned long pages_written = 0; unsigned long pages_written = 0;
unsigned long write_chunk = sync_writeback_pages();
unsigned long pause = 1; unsigned long pause = 1;
struct backing_dev_info *bdi = mapping->backing_dev_info; struct backing_dev_info *bdi = mapping->backing_dev_info;
...@@ -640,9 +643,10 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, ...@@ -640,9 +643,10 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
p = &__get_cpu_var(bdp_ratelimits); p = &__get_cpu_var(bdp_ratelimits);
*p += nr_pages_dirtied; *p += nr_pages_dirtied;
if (unlikely(*p >= ratelimit)) { if (unlikely(*p >= ratelimit)) {
ratelimit = sync_writeback_pages(*p);
*p = 0; *p = 0;
preempt_enable(); preempt_enable();
balance_dirty_pages(mapping); balance_dirty_pages(mapping, ratelimit);
return; return;
} }
preempt_enable(); preempt_enable();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment