Commit f84f6e2b authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm: vmscan: do not writeback filesystem pages in kswapd except in high priority

It is preferable that no dirty pages are dispatched for cleaning from the
page reclaim path.  At normal priorities, this patch prevents kswapd
writing pages.

However, page reclaim does have a requirement that pages be freed in a
particular zone.  If it is failing to make sufficient progress (reclaiming
< SWAP_CLUSTER_MAX at any priority priority), the priority is raised to
scan more pages.  A priority of DEF_PRIORITY - 3 is considered to be the
point where kswapd is getting into trouble reclaiming pages.  If this
priority is reached, kswapd will dispatch pages for writing.
Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Reviewed-by: default avatarMinchan Kim <minchan.kim@gmail.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Johannes Weiner <jweiner@redhat.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Alex Elder <aelder@sgi.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Chris Mason <chris.mason@oracle.com>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 966dbde2
...@@ -750,7 +750,8 @@ static noinline_for_stack void free_page_list(struct list_head *free_pages) ...@@ -750,7 +750,8 @@ static noinline_for_stack void free_page_list(struct list_head *free_pages)
*/ */
static unsigned long shrink_page_list(struct list_head *page_list, static unsigned long shrink_page_list(struct list_head *page_list,
struct zone *zone, struct zone *zone,
struct scan_control *sc) struct scan_control *sc,
int priority)
{ {
LIST_HEAD(ret_pages); LIST_HEAD(ret_pages);
LIST_HEAD(free_pages); LIST_HEAD(free_pages);
...@@ -856,9 +857,11 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -856,9 +857,11 @@ static unsigned long shrink_page_list(struct list_head *page_list,
/* /*
* Only kswapd can writeback filesystem pages to * Only kswapd can writeback filesystem pages to
* avoid risk of stack overflow * avoid risk of stack overflow but do not writeback
* unless under significant pressure.
*/ */
if (page_is_file_cache(page) && !current_is_kswapd()) { if (page_is_file_cache(page) &&
(!current_is_kswapd() || priority >= DEF_PRIORITY - 2)) {
inc_zone_page_state(page, NR_VMSCAN_WRITE_SKIP); inc_zone_page_state(page, NR_VMSCAN_WRITE_SKIP);
goto keep_locked; goto keep_locked;
} }
...@@ -1509,12 +1512,12 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, ...@@ -1509,12 +1512,12 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
spin_unlock_irq(&zone->lru_lock); spin_unlock_irq(&zone->lru_lock);
nr_reclaimed = shrink_page_list(&page_list, zone, sc); nr_reclaimed = shrink_page_list(&page_list, zone, sc, priority);
/* Check if we should syncronously wait for writeback */ /* Check if we should syncronously wait for writeback */
if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) { if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
set_reclaim_mode(priority, sc, true); set_reclaim_mode(priority, sc, true);
nr_reclaimed += shrink_page_list(&page_list, zone, sc); nr_reclaimed += shrink_page_list(&page_list, zone, sc, priority);
} }
local_irq_disable(); local_irq_disable();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment