Commit b2e18757 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm, vmscan: begin reclaiming pages on a per-node basis

This patch makes reclaim decisions on a per-node basis.  A reclaimer
knows what zone is required by the allocation request and skips pages
from higher zones.  In many cases this will be ok because it's a
GFP_HIGHMEM request of some description.  On 64-bit, ZONE_DMA32 requests
will cause some problems but 32-bit devices on 64-bit platforms are
increasingly rare.  Historically it would have been a major problem on
32-bit with big Highmem:Lowmem ratios but such configurations are also
now rare and even where they exist, they are not encouraged.  If it
really becomes a problem, it'll manifest as very low reclaim
efficiencies.

Link: http://lkml.kernel.org/r/1467970510-21195-6-git-send-email-mgorman@techsingularity.netSigned-off-by: default avatarMel Gorman <mgorman@techsingularity.net>
Acked-by: default avatarHillf Danton <hillf.zj@alibaba-inc.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Rik van Riel <riel@surriel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0f661148
...@@ -84,6 +84,9 @@ struct scan_control { ...@@ -84,6 +84,9 @@ struct scan_control {
/* Scan (total_size >> priority) pages at once */ /* Scan (total_size >> priority) pages at once */
int priority; int priority;
/* The highest zone to isolate pages for reclaim from */
enum zone_type reclaim_idx;
unsigned int may_writepage:1; unsigned int may_writepage:1;
/* Can mapped pages be reclaimed? */ /* Can mapped pages be reclaimed? */
...@@ -1392,6 +1395,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, ...@@ -1392,6 +1395,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
unsigned long nr_taken = 0; unsigned long nr_taken = 0;
unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 }; unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
unsigned long scan, nr_pages; unsigned long scan, nr_pages;
LIST_HEAD(pages_skipped);
for (scan = 0; scan < nr_to_scan && nr_taken < nr_to_scan && for (scan = 0; scan < nr_to_scan && nr_taken < nr_to_scan &&
!list_empty(src); scan++) { !list_empty(src); scan++) {
...@@ -1402,6 +1406,11 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, ...@@ -1402,6 +1406,11 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
VM_BUG_ON_PAGE(!PageLRU(page), page); VM_BUG_ON_PAGE(!PageLRU(page), page);
if (page_zonenum(page) > sc->reclaim_idx) {
list_move(&page->lru, &pages_skipped);
continue;
}
switch (__isolate_lru_page(page, mode)) { switch (__isolate_lru_page(page, mode)) {
case 0: case 0:
nr_pages = hpage_nr_pages(page); nr_pages = hpage_nr_pages(page);
...@@ -1420,6 +1429,15 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, ...@@ -1420,6 +1429,15 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
} }
} }
/*
* Splice any skipped pages to the start of the LRU list. Note that
* this disrupts the LRU order when reclaiming for lower zones but
* we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX
* scanning would soon rescan the same pages to skip and put the
* system at risk of premature OOM.
*/
if (!list_empty(&pages_skipped))
list_splice(&pages_skipped, src);
*nr_scanned = scan; *nr_scanned = scan;
trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan, trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan,
nr_taken, mode, is_file_lru(lru)); nr_taken, mode, is_file_lru(lru));
...@@ -1589,7 +1607,7 @@ static int current_may_throttle(void) ...@@ -1589,7 +1607,7 @@ static int current_may_throttle(void)
} }
/* /*
* shrink_inactive_list() is a helper for shrink_zone(). It returns the number * shrink_inactive_list() is a helper for shrink_node(). It returns the number
* of reclaimed pages * of reclaimed pages
*/ */
static noinline_for_stack unsigned long static noinline_for_stack unsigned long
...@@ -2401,12 +2419,13 @@ static inline bool should_continue_reclaim(struct zone *zone, ...@@ -2401,12 +2419,13 @@ static inline bool should_continue_reclaim(struct zone *zone,
} }
} }
static bool shrink_zone(struct zone *zone, struct scan_control *sc, static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc,
bool is_classzone) enum zone_type classzone_idx)
{ {
struct reclaim_state *reclaim_state = current->reclaim_state; struct reclaim_state *reclaim_state = current->reclaim_state;
unsigned long nr_reclaimed, nr_scanned; unsigned long nr_reclaimed, nr_scanned;
bool reclaimable = false; bool reclaimable = false;
struct zone *zone = &pgdat->node_zones[classzone_idx];
do { do {
struct mem_cgroup *root = sc->target_mem_cgroup; struct mem_cgroup *root = sc->target_mem_cgroup;
...@@ -2438,7 +2457,7 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc, ...@@ -2438,7 +2457,7 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc,
shrink_zone_memcg(zone, memcg, sc, &lru_pages); shrink_zone_memcg(zone, memcg, sc, &lru_pages);
zone_lru_pages += lru_pages; zone_lru_pages += lru_pages;
if (memcg && is_classzone) if (!global_reclaim(sc))
shrink_slab(sc->gfp_mask, zone_to_nid(zone), shrink_slab(sc->gfp_mask, zone_to_nid(zone),
memcg, sc->nr_scanned - scanned, memcg, sc->nr_scanned - scanned,
lru_pages); lru_pages);
...@@ -2469,7 +2488,7 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc, ...@@ -2469,7 +2488,7 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc,
* Shrink the slab caches in the same proportion that * Shrink the slab caches in the same proportion that
* the eligible LRU pages were scanned. * the eligible LRU pages were scanned.
*/ */
if (global_reclaim(sc) && is_classzone) if (global_reclaim(sc))
shrink_slab(sc->gfp_mask, zone_to_nid(zone), NULL, shrink_slab(sc->gfp_mask, zone_to_nid(zone), NULL,
sc->nr_scanned - nr_scanned, sc->nr_scanned - nr_scanned,
zone_lru_pages); zone_lru_pages);
...@@ -2553,7 +2572,7 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc) ...@@ -2553,7 +2572,7 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
unsigned long nr_soft_reclaimed; unsigned long nr_soft_reclaimed;
unsigned long nr_soft_scanned; unsigned long nr_soft_scanned;
gfp_t orig_mask; gfp_t orig_mask;
enum zone_type requested_highidx = gfp_zone(sc->gfp_mask); enum zone_type classzone_idx;
/* /*
* If the number of buffer_heads in the machine exceeds the maximum * If the number of buffer_heads in the machine exceeds the maximum
...@@ -2561,17 +2580,23 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc) ...@@ -2561,17 +2580,23 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
* highmem pages could be pinning lowmem pages storing buffer_heads * highmem pages could be pinning lowmem pages storing buffer_heads
*/ */
orig_mask = sc->gfp_mask; orig_mask = sc->gfp_mask;
if (buffer_heads_over_limit) if (buffer_heads_over_limit) {
sc->gfp_mask |= __GFP_HIGHMEM; sc->gfp_mask |= __GFP_HIGHMEM;
sc->reclaim_idx = classzone_idx = gfp_zone(sc->gfp_mask);
}
for_each_zone_zonelist_nodemask(zone, z, zonelist, for_each_zone_zonelist_nodemask(zone, z, zonelist,
gfp_zone(sc->gfp_mask), sc->nodemask) { sc->reclaim_idx, sc->nodemask) {
enum zone_type classzone_idx;
if (!populated_zone(zone)) if (!populated_zone(zone))
continue; continue;
classzone_idx = requested_highidx; /*
* Note that reclaim_idx does not change as it is the highest
* zone reclaimed from which for empty zones is a no-op but
* classzone_idx is used by shrink_node to test if the slabs
* should be shrunk on a given node.
*/
classzone_idx = sc->reclaim_idx;
while (!populated_zone(zone->zone_pgdat->node_zones + while (!populated_zone(zone->zone_pgdat->node_zones +
classzone_idx)) classzone_idx))
classzone_idx--; classzone_idx--;
...@@ -2600,8 +2625,8 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc) ...@@ -2600,8 +2625,8 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
*/ */
if (IS_ENABLED(CONFIG_COMPACTION) && if (IS_ENABLED(CONFIG_COMPACTION) &&
sc->order > PAGE_ALLOC_COSTLY_ORDER && sc->order > PAGE_ALLOC_COSTLY_ORDER &&
zonelist_zone_idx(z) <= requested_highidx && zonelist_zone_idx(z) <= classzone_idx &&
compaction_ready(zone, sc->order, requested_highidx)) { compaction_ready(zone, sc->order, classzone_idx)) {
sc->compaction_ready = true; sc->compaction_ready = true;
continue; continue;
} }
...@@ -2621,7 +2646,7 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc) ...@@ -2621,7 +2646,7 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
/* need some check for avoid more shrink_zone() */ /* need some check for avoid more shrink_zone() */
} }
shrink_zone(zone, sc, zone_idx(zone) == classzone_idx); shrink_node(zone->zone_pgdat, sc, classzone_idx);
} }
/* /*
...@@ -2847,6 +2872,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, ...@@ -2847,6 +2872,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
struct scan_control sc = { struct scan_control sc = {
.nr_to_reclaim = SWAP_CLUSTER_MAX, .nr_to_reclaim = SWAP_CLUSTER_MAX,
.gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)), .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)),
.reclaim_idx = gfp_zone(gfp_mask),
.order = order, .order = order,
.nodemask = nodemask, .nodemask = nodemask,
.priority = DEF_PRIORITY, .priority = DEF_PRIORITY,
...@@ -2886,6 +2912,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg, ...@@ -2886,6 +2912,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
.target_mem_cgroup = memcg, .target_mem_cgroup = memcg,
.may_writepage = !laptop_mode, .may_writepage = !laptop_mode,
.may_unmap = 1, .may_unmap = 1,
.reclaim_idx = MAX_NR_ZONES - 1,
.may_swap = !noswap, .may_swap = !noswap,
}; };
unsigned long lru_pages; unsigned long lru_pages;
...@@ -2924,6 +2951,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, ...@@ -2924,6 +2951,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
.nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
.reclaim_idx = MAX_NR_ZONES - 1,
.target_mem_cgroup = memcg, .target_mem_cgroup = memcg,
.priority = DEF_PRIORITY, .priority = DEF_PRIORITY,
.may_writepage = !laptop_mode, .may_writepage = !laptop_mode,
...@@ -3118,7 +3146,7 @@ static bool kswapd_shrink_zone(struct zone *zone, ...@@ -3118,7 +3146,7 @@ static bool kswapd_shrink_zone(struct zone *zone,
balance_gap, classzone_idx)) balance_gap, classzone_idx))
return true; return true;
shrink_zone(zone, sc, zone_idx(zone) == classzone_idx); shrink_node(zone->zone_pgdat, sc, classzone_idx);
/* TODO: ANOMALY */ /* TODO: ANOMALY */
clear_bit(PGDAT_WRITEBACK, &pgdat->flags); clear_bit(PGDAT_WRITEBACK, &pgdat->flags);
...@@ -3167,6 +3195,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) ...@@ -3167,6 +3195,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
unsigned long nr_soft_scanned; unsigned long nr_soft_scanned;
struct scan_control sc = { struct scan_control sc = {
.gfp_mask = GFP_KERNEL, .gfp_mask = GFP_KERNEL,
.reclaim_idx = MAX_NR_ZONES - 1,
.order = order, .order = order,
.priority = DEF_PRIORITY, .priority = DEF_PRIORITY,
.may_writepage = !laptop_mode, .may_writepage = !laptop_mode,
...@@ -3237,15 +3266,14 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) ...@@ -3237,15 +3266,14 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
sc.may_writepage = 1; sc.may_writepage = 1;
/* /*
* Now scan the zone in the dma->highmem direction, stopping * Continue scanning in the highmem->dma direction stopping at
* at the last zone which needs scanning. * the last zone which needs scanning. This may reclaim lowmem
* * pages that are not necessary for zone balancing but it
* We do this because the page allocator works in the opposite * preserves LRU ordering. It is assumed that the bulk of
* direction. This prevents the page allocator from allocating * allocation requests can use arbitrary zones with the
* pages behind kswapd's direction of progress, which would * possible exception of big highmem:lowmem configurations.
* cause too much scanning of the lower zones.
*/ */
for (i = 0; i <= end_zone; i++) { for (i = end_zone; i >= 0; i--) {
struct zone *zone = pgdat->node_zones + i; struct zone *zone = pgdat->node_zones + i;
if (!populated_zone(zone)) if (!populated_zone(zone))
...@@ -3256,6 +3284,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) ...@@ -3256,6 +3284,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
continue; continue;
sc.nr_scanned = 0; sc.nr_scanned = 0;
sc.reclaim_idx = i;
nr_soft_scanned = 0; nr_soft_scanned = 0;
/* /*
...@@ -3513,6 +3542,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim) ...@@ -3513,6 +3542,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
struct scan_control sc = { struct scan_control sc = {
.nr_to_reclaim = nr_to_reclaim, .nr_to_reclaim = nr_to_reclaim,
.gfp_mask = GFP_HIGHUSER_MOVABLE, .gfp_mask = GFP_HIGHUSER_MOVABLE,
.reclaim_idx = MAX_NR_ZONES - 1,
.priority = DEF_PRIORITY, .priority = DEF_PRIORITY,
.may_writepage = 1, .may_writepage = 1,
.may_unmap = 1, .may_unmap = 1,
...@@ -3704,6 +3734,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) ...@@ -3704,6 +3734,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
.may_unmap = !!(zone_reclaim_mode & RECLAIM_UNMAP), .may_unmap = !!(zone_reclaim_mode & RECLAIM_UNMAP),
.may_swap = 1, .may_swap = 1,
.reclaim_idx = zone_idx(zone),
}; };
cond_resched(); cond_resched();
...@@ -3723,7 +3754,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) ...@@ -3723,7 +3754,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
* priorities until we have enough memory freed. * priorities until we have enough memory freed.
*/ */
do { do {
shrink_zone(zone, &sc, true); shrink_node(zone->zone_pgdat, &sc, zone_idx(zone));
} while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0); } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment