Commit 2b487e59 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: memcontrol: push down mem_cgroup_node_nr_lru_pages()

mem_cgroup_node_nr_lru_pages() is just a convenience wrapper around
lruvec_page_state() that takes bitmasks of lru indexes and aggregates the
counts for those.

Replace callsites where the bitmask is simple enough with direct
lruvec_page_state() calls.

This removes the last extern user of mem_cgroup_node_nr_lru_pages(), so
make that function private again, too.

Link: http://lkml.kernel.org/r/20190228163020.24100-5-hannes@cmpxchg.orgSigned-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Reviewed-by: default avatarRoman Gushchin <guro@fb.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 22796c84
...@@ -501,9 +501,6 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); ...@@ -501,9 +501,6 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
int zid, int nr_pages); int zid, int nr_pages);
unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
int nid, unsigned int lru_mask);
static inline static inline
unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
enum lru_list lru, int zone_idx) enum lru_list lru, int zone_idx)
...@@ -954,13 +951,6 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, ...@@ -954,13 +951,6 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
return 0; return 0;
} }
static inline unsigned long
mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
int nid, unsigned int lru_mask)
{
return 0;
}
static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
{ {
return 0; return 0;
......
...@@ -725,7 +725,7 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, ...@@ -725,7 +725,7 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
__this_cpu_add(memcg->stat_cpu->nr_page_events, nr_pages); __this_cpu_add(memcg->stat_cpu->nr_page_events, nr_pages);
} }
unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
int nid, unsigned int lru_mask) int nid, unsigned int lru_mask)
{ {
struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg); struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg);
...@@ -1425,11 +1425,15 @@ static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, ...@@ -1425,11 +1425,15 @@ static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg, static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
int nid, bool noswap) int nid, bool noswap)
{ {
if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE)) struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg);
if (lruvec_page_state(lruvec, NR_INACTIVE_FILE) ||
lruvec_page_state(lruvec, NR_ACTIVE_FILE))
return true; return true;
if (noswap || !total_swap_pages) if (noswap || !total_swap_pages)
return false; return false;
if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON)) if (lruvec_page_state(lruvec, NR_INACTIVE_ANON) ||
lruvec_page_state(lruvec, NR_ACTIVE_ANON))
return true; return true;
return false; return false;
......
...@@ -426,10 +426,11 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker, ...@@ -426,10 +426,11 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
if (sc->memcg) { if (sc->memcg) {
struct lruvec *lruvec; struct lruvec *lruvec;
int i;
pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
LRU_ALL);
lruvec = mem_cgroup_lruvec(NODE_DATA(sc->nid), sc->memcg); lruvec = mem_cgroup_lruvec(NODE_DATA(sc->nid), sc->memcg);
for (pages = 0, i = 0; i < NR_LRU_LISTS; i++)
pages += lruvec_page_state(lruvec, NR_LRU_BASE + i);
pages += lruvec_page_state(lruvec, NR_SLAB_RECLAIMABLE); pages += lruvec_page_state(lruvec, NR_SLAB_RECLAIMABLE);
pages += lruvec_page_state(lruvec, NR_SLAB_UNRECLAIMABLE); pages += lruvec_page_state(lruvec, NR_SLAB_UNRECLAIMABLE);
} else } else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment