Commit 0aeb2339 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

vmscan: remove all_unreclaimable scan control

This scan control is abused to communicate a return value from
shrink_zones().  Write this idiomatically and remove the knob.
Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Reviewed-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 142762bd
...@@ -73,8 +73,6 @@ struct scan_control { ...@@ -73,8 +73,6 @@ struct scan_control {
int swappiness; int swappiness;
int all_unreclaimable;
int order; int order;
/* /*
...@@ -1716,14 +1714,14 @@ static void shrink_zone(int priority, struct zone *zone, ...@@ -1716,14 +1714,14 @@ static void shrink_zone(int priority, struct zone *zone,
* If a zone is deemed to be full of pinned pages then just give it a light * If a zone is deemed to be full of pinned pages then just give it a light
* scan then give up on it. * scan then give up on it.
*/ */
static void shrink_zones(int priority, struct zonelist *zonelist, static int shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc) struct scan_control *sc)
{ {
enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
struct zoneref *z; struct zoneref *z;
struct zone *zone; struct zone *zone;
int progress = 0;
sc->all_unreclaimable = 1;
for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
sc->nodemask) { sc->nodemask) {
if (!populated_zone(zone)) if (!populated_zone(zone))
...@@ -1739,19 +1737,19 @@ static void shrink_zones(int priority, struct zonelist *zonelist, ...@@ -1739,19 +1737,19 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
if (zone->all_unreclaimable && priority != DEF_PRIORITY) if (zone->all_unreclaimable && priority != DEF_PRIORITY)
continue; /* Let kswapd poll it */ continue; /* Let kswapd poll it */
sc->all_unreclaimable = 0;
} else { } else {
/* /*
* Ignore cpuset limitation here. We just want to reduce * Ignore cpuset limitation here. We just want to reduce
* # of used pages by us regardless of memory shortage. * # of used pages by us regardless of memory shortage.
*/ */
sc->all_unreclaimable = 0;
mem_cgroup_note_reclaim_priority(sc->mem_cgroup, mem_cgroup_note_reclaim_priority(sc->mem_cgroup,
priority); priority);
} }
shrink_zone(priority, zone, sc); shrink_zone(priority, zone, sc);
progress = 1;
} }
return progress;
} }
/* /*
...@@ -1805,7 +1803,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, ...@@ -1805,7 +1803,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
sc->nr_scanned = 0; sc->nr_scanned = 0;
if (!priority) if (!priority)
disable_swap_token(); disable_swap_token();
shrink_zones(priority, zonelist, sc); ret = shrink_zones(priority, zonelist, sc);
/* /*
* Don't shrink slabs when reclaiming memory from * Don't shrink slabs when reclaiming memory from
* over limit cgroups * over limit cgroups
...@@ -1842,7 +1840,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, ...@@ -1842,7 +1840,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
congestion_wait(BLK_RW_ASYNC, HZ/10); congestion_wait(BLK_RW_ASYNC, HZ/10);
} }
/* top priority shrink_zones still had more to do? don't OOM, then */ /* top priority shrink_zones still had more to do? don't OOM, then */
if (!sc->all_unreclaimable && scanning_global_lru(sc)) if (ret && scanning_global_lru(sc))
ret = sc->nr_reclaimed; ret = sc->nr_reclaimed;
out: out:
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment