Commit de3b0150 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: vmscan: simplify lruvec_lru_size()

Patch series "mm: vmscan: cgroup-related cleanups".

Here are 8 patches that clean up the reclaim code's interaction with
cgroups a bit. They're not supposed to change any behavior, just make
the implementation easier to understand and work with.

This patch (of 8):

This function currently takes the node or lruvec size and subtracts the
zones that are excluded by the classzone index of the allocation.  It uses
four different types of counters to do this.

Just add up the eligible zones.

[cai@lca.pw: fix an undefined behavior for zone id]
  Link: http://lkml.kernel.org/r/20191108204407.1435-1-cai@lca.pw
[akpm@linux-foundation.org: deal with the MAX_NR_ZONES special case. per Qian Cai]
  Link: http://lkml.kernel.org/r/64E60F6F-7582-427B-8DD5-EF97B1656F5A@lca.pw
Link: http://lkml.kernel.org/r/20191022144803.302233-2-hannes@cmpxchg.orgSigned-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Reviewed-by: default avatarRoman Gushchin <guro@fb.com>
Reviewed-by: default avatarShakeel Butt <shakeelb@google.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent cb16556d
...@@ -351,32 +351,21 @@ unsigned long zone_reclaimable_pages(struct zone *zone) ...@@ -351,32 +351,21 @@ unsigned long zone_reclaimable_pages(struct zone *zone)
*/ */
unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx) unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx)
{ {
unsigned long lru_size = 0; unsigned long size = 0;
int zid; int zid;
if (!mem_cgroup_disabled()) { for (zid = 0; zid <= zone_idx && zid < MAX_NR_ZONES; zid++) {
for (zid = 0; zid < MAX_NR_ZONES; zid++)
lru_size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
} else
lru_size = node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru);
for (zid = zone_idx + 1; zid < MAX_NR_ZONES; zid++) {
struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
unsigned long size;
if (!managed_zone(zone)) if (!managed_zone(zone))
continue; continue;
if (!mem_cgroup_disabled()) if (!mem_cgroup_disabled())
size = mem_cgroup_get_zone_lru_size(lruvec, lru, zid); size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
else else
size = zone_page_state(&lruvec_pgdat(lruvec)->node_zones[zid], size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru);
NR_ZONE_LRU_BASE + lru);
lru_size -= min(size, lru_size);
} }
return size;
return lru_size;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment