Commit 49ea7eb6 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm: vmscan: immediately reclaim end-of-LRU dirty pages when writeback completes

When direct reclaim encounters a dirty page, it gets recycled around the
LRU for another cycle.  This patch marks the page PageReclaim similar to
deactivate_page() so that the page gets reclaimed almost immediately after
the page gets cleaned.  This is to avoid reclaiming clean pages that are
younger than a dirty page encountered at the end of the LRU that might
have been something like a use-once page.
Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Acked-by: default avatarJohannes Weiner <jweiner@redhat.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Alex Elder <aelder@sgi.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Chris Mason <chris.mason@oracle.com>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 92df3a72
...@@ -100,7 +100,7 @@ enum zone_stat_item { ...@@ -100,7 +100,7 @@ enum zone_stat_item {
NR_UNSTABLE_NFS, /* NFS unstable pages */ NR_UNSTABLE_NFS, /* NFS unstable pages */
NR_BOUNCE, NR_BOUNCE,
NR_VMSCAN_WRITE, NR_VMSCAN_WRITE,
NR_VMSCAN_WRITE_SKIP, NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */
NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
......
...@@ -866,7 +866,15 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -866,7 +866,15 @@ static unsigned long shrink_page_list(struct list_head *page_list,
*/ */
if (page_is_file_cache(page) && if (page_is_file_cache(page) &&
(!current_is_kswapd() || priority >= DEF_PRIORITY - 2)) { (!current_is_kswapd() || priority >= DEF_PRIORITY - 2)) {
inc_zone_page_state(page, NR_VMSCAN_WRITE_SKIP); /*
* Immediately reclaim when written back.
* Similar in principal to deactivate_page()
* except we already have the page isolated
* and know it's dirty
*/
inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE);
SetPageReclaim(page);
goto keep_locked; goto keep_locked;
} }
......
...@@ -702,7 +702,7 @@ const char * const vmstat_text[] = { ...@@ -702,7 +702,7 @@ const char * const vmstat_text[] = {
"nr_unstable", "nr_unstable",
"nr_bounce", "nr_bounce",
"nr_vmscan_write", "nr_vmscan_write",
"nr_vmscan_write_skip", "nr_vmscan_immediate_reclaim",
"nr_writeback_temp", "nr_writeback_temp",
"nr_isolated_anon", "nr_isolated_anon",
"nr_isolated_file", "nr_isolated_file",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment