Commit d79154bb authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

memcg: replace mem and mem_cont stragglers

Replace mem and mem_cont stragglers in memcontrol.c by memcg.
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Reviewed-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: default avatarKirill A. Shutemov <kirill@shutemov.name>
Acked-by: default avatarMichal Hocko <mhocko@suse.cz>
Acked-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 31a79235
...@@ -144,7 +144,7 @@ struct mem_cgroup_per_zone { ...@@ -144,7 +144,7 @@ struct mem_cgroup_per_zone {
unsigned long long usage_in_excess;/* Set to the value by which */ unsigned long long usage_in_excess;/* Set to the value by which */
/* the soft limit is exceeded*/ /* the soft limit is exceeded*/
bool on_tree; bool on_tree;
struct mem_cgroup *mem; /* Back pointer, we cannot */ struct mem_cgroup *memcg; /* Back pointer, we cannot */
/* use container_of */ /* use container_of */
}; };
/* Macro for accessing counter */ /* Macro for accessing counter */
...@@ -612,9 +612,9 @@ __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) ...@@ -612,9 +612,9 @@ __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
* we will to add it back at the end of reclaim to its correct * we will to add it back at the end of reclaim to its correct
* position in the tree. * position in the tree.
*/ */
__mem_cgroup_remove_exceeded(mz->mem, mz, mctz); __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
if (!res_counter_soft_limit_excess(&mz->mem->res) || if (!res_counter_soft_limit_excess(&mz->memcg->res) ||
!css_tryget(&mz->mem->css)) !css_tryget(&mz->memcg->css))
goto retry; goto retry;
done: done:
return mz; return mz;
...@@ -1772,22 +1772,22 @@ static DEFINE_SPINLOCK(memcg_oom_lock); ...@@ -1772,22 +1772,22 @@ static DEFINE_SPINLOCK(memcg_oom_lock);
static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
struct oom_wait_info { struct oom_wait_info {
struct mem_cgroup *mem; struct mem_cgroup *memcg;
wait_queue_t wait; wait_queue_t wait;
}; };
static int memcg_oom_wake_function(wait_queue_t *wait, static int memcg_oom_wake_function(wait_queue_t *wait,
unsigned mode, int sync, void *arg) unsigned mode, int sync, void *arg)
{ {
struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg, struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
*oom_wait_memcg; struct mem_cgroup *oom_wait_memcg;
struct oom_wait_info *oom_wait_info; struct oom_wait_info *oom_wait_info;
oom_wait_info = container_of(wait, struct oom_wait_info, wait); oom_wait_info = container_of(wait, struct oom_wait_info, wait);
oom_wait_memcg = oom_wait_info->mem; oom_wait_memcg = oom_wait_info->memcg;
/* /*
* Both of oom_wait_info->mem and wake_mem are stable under us. * Both of oom_wait_info->memcg and wake_memcg are stable under us.
* Then we can use css_is_ancestor without taking care of RCU. * Then we can use css_is_ancestor without taking care of RCU.
*/ */
if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg) if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg)
...@@ -1816,7 +1816,7 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask, int order) ...@@ -1816,7 +1816,7 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
struct oom_wait_info owait; struct oom_wait_info owait;
bool locked, need_to_kill; bool locked, need_to_kill;
owait.mem = memcg; owait.memcg = memcg;
owait.wait.flags = 0; owait.wait.flags = 0;
owait.wait.func = memcg_oom_wake_function; owait.wait.func = memcg_oom_wake_function;
owait.wait.private = current; owait.wait.private = current;
...@@ -3549,7 +3549,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, ...@@ -3549,7 +3549,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
break; break;
nr_scanned = 0; nr_scanned = 0;
reclaimed = mem_cgroup_soft_reclaim(mz->mem, zone, reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
gfp_mask, &nr_scanned); gfp_mask, &nr_scanned);
nr_reclaimed += reclaimed; nr_reclaimed += reclaimed;
*total_scanned += nr_scanned; *total_scanned += nr_scanned;
...@@ -3576,13 +3576,13 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, ...@@ -3576,13 +3576,13 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
next_mz = next_mz =
__mem_cgroup_largest_soft_limit_node(mctz); __mem_cgroup_largest_soft_limit_node(mctz);
if (next_mz == mz) if (next_mz == mz)
css_put(&next_mz->mem->css); css_put(&next_mz->memcg->css);
else /* next_mz == NULL or other memcg */ else /* next_mz == NULL or other memcg */
break; break;
} while (1); } while (1);
} }
__mem_cgroup_remove_exceeded(mz->mem, mz, mctz); __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
excess = res_counter_soft_limit_excess(&mz->mem->res); excess = res_counter_soft_limit_excess(&mz->memcg->res);
/* /*
* One school of thought says that we should not add * One school of thought says that we should not add
* back the node to the tree if reclaim returns 0. * back the node to the tree if reclaim returns 0.
...@@ -3592,9 +3592,9 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, ...@@ -3592,9 +3592,9 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
* term TODO. * term TODO.
*/ */
/* If excess == 0, no tree ops */ /* If excess == 0, no tree ops */
__mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess); __mem_cgroup_insert_exceeded(mz->memcg, mz, mctz, excess);
spin_unlock(&mctz->lock); spin_unlock(&mctz->lock);
css_put(&mz->mem->css); css_put(&mz->memcg->css);
loop++; loop++;
/* /*
* Could not reclaim anything and there are no more * Could not reclaim anything and there are no more
...@@ -3607,7 +3607,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, ...@@ -3607,7 +3607,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
break; break;
} while (!nr_reclaimed); } while (!nr_reclaimed);
if (next_mz) if (next_mz)
css_put(&next_mz->mem->css); css_put(&next_mz->memcg->css);
return nr_reclaimed; return nr_reclaimed;
} }
...@@ -4098,38 +4098,38 @@ static int mem_control_numa_stat_show(struct seq_file *m, void *arg) ...@@ -4098,38 +4098,38 @@ static int mem_control_numa_stat_show(struct seq_file *m, void *arg)
unsigned long total_nr, file_nr, anon_nr, unevictable_nr; unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
unsigned long node_nr; unsigned long node_nr;
struct cgroup *cont = m->private; struct cgroup *cont = m->private;
struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont); struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
total_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL); total_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL);
seq_printf(m, "total=%lu", total_nr); seq_printf(m, "total=%lu", total_nr);
for_each_node_state(nid, N_HIGH_MEMORY) { for_each_node_state(nid, N_HIGH_MEMORY) {
node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, LRU_ALL); node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL);
seq_printf(m, " N%d=%lu", nid, node_nr); seq_printf(m, " N%d=%lu", nid, node_nr);
} }
seq_putc(m, '\n'); seq_putc(m, '\n');
file_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL_FILE); file_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_FILE);
seq_printf(m, "file=%lu", file_nr); seq_printf(m, "file=%lu", file_nr);
for_each_node_state(nid, N_HIGH_MEMORY) { for_each_node_state(nid, N_HIGH_MEMORY) {
node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
LRU_ALL_FILE); LRU_ALL_FILE);
seq_printf(m, " N%d=%lu", nid, node_nr); seq_printf(m, " N%d=%lu", nid, node_nr);
} }
seq_putc(m, '\n'); seq_putc(m, '\n');
anon_nr = mem_cgroup_nr_lru_pages(mem_cont, LRU_ALL_ANON); anon_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_ANON);
seq_printf(m, "anon=%lu", anon_nr); seq_printf(m, "anon=%lu", anon_nr);
for_each_node_state(nid, N_HIGH_MEMORY) { for_each_node_state(nid, N_HIGH_MEMORY) {
node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
LRU_ALL_ANON); LRU_ALL_ANON);
seq_printf(m, " N%d=%lu", nid, node_nr); seq_printf(m, " N%d=%lu", nid, node_nr);
} }
seq_putc(m, '\n'); seq_putc(m, '\n');
unevictable_nr = mem_cgroup_nr_lru_pages(mem_cont, BIT(LRU_UNEVICTABLE)); unevictable_nr = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE));
seq_printf(m, "unevictable=%lu", unevictable_nr); seq_printf(m, "unevictable=%lu", unevictable_nr);
for_each_node_state(nid, N_HIGH_MEMORY) { for_each_node_state(nid, N_HIGH_MEMORY) {
node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid, node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
BIT(LRU_UNEVICTABLE)); BIT(LRU_UNEVICTABLE));
seq_printf(m, " N%d=%lu", nid, node_nr); seq_printf(m, " N%d=%lu", nid, node_nr);
} }
...@@ -4141,12 +4141,12 @@ static int mem_control_numa_stat_show(struct seq_file *m, void *arg) ...@@ -4141,12 +4141,12 @@ static int mem_control_numa_stat_show(struct seq_file *m, void *arg)
static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
struct cgroup_map_cb *cb) struct cgroup_map_cb *cb)
{ {
struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont); struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
struct mcs_total_stat mystat; struct mcs_total_stat mystat;
int i; int i;
memset(&mystat, 0, sizeof(mystat)); memset(&mystat, 0, sizeof(mystat));
mem_cgroup_get_local_stat(mem_cont, &mystat); mem_cgroup_get_local_stat(memcg, &mystat);
for (i = 0; i < NR_MCS_STAT; i++) { for (i = 0; i < NR_MCS_STAT; i++) {
...@@ -4158,14 +4158,14 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, ...@@ -4158,14 +4158,14 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
/* Hierarchical information */ /* Hierarchical information */
{ {
unsigned long long limit, memsw_limit; unsigned long long limit, memsw_limit;
memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit); memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit);
cb->fill(cb, "hierarchical_memory_limit", limit); cb->fill(cb, "hierarchical_memory_limit", limit);
if (do_swap_account) if (do_swap_account)
cb->fill(cb, "hierarchical_memsw_limit", memsw_limit); cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
} }
memset(&mystat, 0, sizeof(mystat)); memset(&mystat, 0, sizeof(mystat));
mem_cgroup_get_total_stat(mem_cont, &mystat); mem_cgroup_get_total_stat(memcg, &mystat);
for (i = 0; i < NR_MCS_STAT; i++) { for (i = 0; i < NR_MCS_STAT; i++) {
if (i == MCS_SWAP && !do_swap_account) if (i == MCS_SWAP && !do_swap_account)
continue; continue;
...@@ -4181,7 +4181,7 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, ...@@ -4181,7 +4181,7 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
for_each_online_node(nid) for_each_online_node(nid)
for (zid = 0; zid < MAX_NR_ZONES; zid++) { for (zid = 0; zid < MAX_NR_ZONES; zid++) {
mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); mz = mem_cgroup_zoneinfo(memcg, nid, zid);
recent_rotated[0] += recent_rotated[0] +=
mz->reclaim_stat.recent_rotated[0]; mz->reclaim_stat.recent_rotated[0];
...@@ -4758,7 +4758,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) ...@@ -4758,7 +4758,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
INIT_LIST_HEAD(&mz->lruvec.lists[l]); INIT_LIST_HEAD(&mz->lruvec.lists[l]);
mz->usage_in_excess = 0; mz->usage_in_excess = 0;
mz->on_tree = false; mz->on_tree = false;
mz->mem = memcg; mz->memcg = memcg;
} }
memcg->info.nodeinfo[node] = pn; memcg->info.nodeinfo[node] = pn;
return 0; return 0;
...@@ -4771,29 +4771,29 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) ...@@ -4771,29 +4771,29 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
static struct mem_cgroup *mem_cgroup_alloc(void) static struct mem_cgroup *mem_cgroup_alloc(void)
{ {
struct mem_cgroup *mem; struct mem_cgroup *memcg;
int size = sizeof(struct mem_cgroup); int size = sizeof(struct mem_cgroup);
/* Can be very big if MAX_NUMNODES is very big */ /* Can be very big if MAX_NUMNODES is very big */
if (size < PAGE_SIZE) if (size < PAGE_SIZE)
mem = kzalloc(size, GFP_KERNEL); memcg = kzalloc(size, GFP_KERNEL);
else else
mem = vzalloc(size); memcg = vzalloc(size);
if (!mem) if (!memcg)
return NULL; return NULL;
mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu); memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
if (!mem->stat) if (!memcg->stat)
goto out_free; goto out_free;
spin_lock_init(&mem->pcp_counter_lock); spin_lock_init(&memcg->pcp_counter_lock);
return mem; return memcg;
out_free: out_free:
if (size < PAGE_SIZE) if (size < PAGE_SIZE)
kfree(mem); kfree(memcg);
else else
vfree(mem); vfree(memcg);
return NULL; return NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment