Commit 871789d4 authored by Chris Down's avatar Chris Down Committed by Linus Torvalds

mm, memcg: rename ambiguously named memory.stat counters and functions

I spent literally an hour trying to work out why an earlier version of
my memory.events aggregation code doesn't work properly, only to find
out I was calling memcg->events instead of memcg->memory_events, which
is fairly confusing.

This naming seems in need of reworking, so make it harder to do the
wrong thing by using vmevents instead of events, which makes it more
clear that these are vm counters rather than memcg-specific counters.

There are also a few other inconsistent names in both the percpu and
aggregated structs, so these are all cleaned up to be more coherent and
easy to understand.

This commit contains code cleanup only: there are no logic changes.

[akpm@linux-foundation.org: fix it for preceding changes]
Link: http://lkml.kernel.org/r/20190208224319.GA23801@chrisdown.nameSigned-off-by: default avatarChris Down <chris@chrisdown.name>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Roman Gushchin <guro@fb.com>
Cc: Dennis Zhou <dennis@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b09e8936
...@@ -94,8 +94,8 @@ enum mem_cgroup_events_target { ...@@ -94,8 +94,8 @@ enum mem_cgroup_events_target {
MEM_CGROUP_NTARGETS, MEM_CGROUP_NTARGETS,
}; };
struct mem_cgroup_stat_cpu { struct memcg_vmstats_percpu {
long count[MEMCG_NR_STAT]; long stat[MEMCG_NR_STAT];
unsigned long events[NR_VM_EVENT_ITEMS]; unsigned long events[NR_VM_EVENT_ITEMS];
unsigned long nr_page_events; unsigned long nr_page_events;
unsigned long targets[MEM_CGROUP_NTARGETS]; unsigned long targets[MEM_CGROUP_NTARGETS];
...@@ -274,12 +274,12 @@ struct mem_cgroup { ...@@ -274,12 +274,12 @@ struct mem_cgroup {
struct task_struct *move_lock_task; struct task_struct *move_lock_task;
/* memory.stat */ /* memory.stat */
struct mem_cgroup_stat_cpu __percpu *stat_cpu; struct memcg_vmstats_percpu __percpu *vmstats_percpu;
MEMCG_PADDING(_pad2_); MEMCG_PADDING(_pad2_);
atomic_long_t stat[MEMCG_NR_STAT]; atomic_long_t vmstats[MEMCG_NR_STAT];
atomic_long_t events[NR_VM_EVENT_ITEMS]; atomic_long_t vmevents[NR_VM_EVENT_ITEMS];
atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
unsigned long socket_pressure; unsigned long socket_pressure;
...@@ -557,7 +557,7 @@ void unlock_page_memcg(struct page *page); ...@@ -557,7 +557,7 @@ void unlock_page_memcg(struct page *page);
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
int idx) int idx)
{ {
long x = atomic_long_read(&memcg->stat[idx]); long x = atomic_long_read(&memcg->vmstats[idx]);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (x < 0) if (x < 0)
x = 0; x = 0;
...@@ -574,12 +574,12 @@ static inline void __mod_memcg_state(struct mem_cgroup *memcg, ...@@ -574,12 +574,12 @@ static inline void __mod_memcg_state(struct mem_cgroup *memcg,
if (mem_cgroup_disabled()) if (mem_cgroup_disabled())
return; return;
x = val + __this_cpu_read(memcg->stat_cpu->count[idx]); x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]);
if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
atomic_long_add(x, &memcg->stat[idx]); atomic_long_add(x, &memcg->vmstats[idx]);
x = 0; x = 0;
} }
__this_cpu_write(memcg->stat_cpu->count[idx], x); __this_cpu_write(memcg->vmstats_percpu->stat[idx], x);
} }
/* idx can be of type enum memcg_stat_item or node_stat_item */ /* idx can be of type enum memcg_stat_item or node_stat_item */
...@@ -717,12 +717,12 @@ static inline void __count_memcg_events(struct mem_cgroup *memcg, ...@@ -717,12 +717,12 @@ static inline void __count_memcg_events(struct mem_cgroup *memcg,
if (mem_cgroup_disabled()) if (mem_cgroup_disabled())
return; return;
x = count + __this_cpu_read(memcg->stat_cpu->events[idx]); x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]);
if (unlikely(x > MEMCG_CHARGE_BATCH)) { if (unlikely(x > MEMCG_CHARGE_BATCH)) {
atomic_long_add(x, &memcg->events[idx]); atomic_long_add(x, &memcg->vmevents[idx]);
x = 0; x = 0;
} }
__this_cpu_write(memcg->stat_cpu->events[idx], x); __this_cpu_write(memcg->vmstats_percpu->events[idx], x);
} }
static inline void count_memcg_events(struct mem_cgroup *memcg, static inline void count_memcg_events(struct mem_cgroup *memcg,
......
...@@ -690,7 +690,7 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz) ...@@ -690,7 +690,7 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
static unsigned long memcg_sum_events(struct mem_cgroup *memcg, static unsigned long memcg_sum_events(struct mem_cgroup *memcg,
int event) int event)
{ {
return atomic_long_read(&memcg->events[event]); return atomic_long_read(&memcg->vmevents[event]);
} }
static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
...@@ -722,7 +722,7 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, ...@@ -722,7 +722,7 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
nr_pages = -nr_pages; /* for event */ nr_pages = -nr_pages; /* for event */
} }
__this_cpu_add(memcg->stat_cpu->nr_page_events, nr_pages); __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
} }
static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
...@@ -730,8 +730,8 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, ...@@ -730,8 +730,8 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
{ {
unsigned long val, next; unsigned long val, next;
val = __this_cpu_read(memcg->stat_cpu->nr_page_events); val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
next = __this_cpu_read(memcg->stat_cpu->targets[target]); next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
/* from time_after() in jiffies.h */ /* from time_after() in jiffies.h */
if ((long)(next - val) < 0) { if ((long)(next - val) < 0) {
switch (target) { switch (target) {
...@@ -747,7 +747,7 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, ...@@ -747,7 +747,7 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
default: default:
break; break;
} }
__this_cpu_write(memcg->stat_cpu->targets[target], next); __this_cpu_write(memcg->vmstats_percpu->targets[target], next);
return true; return true;
} }
return false; return false;
...@@ -2088,9 +2088,9 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu) ...@@ -2088,9 +2088,9 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu)
int nid; int nid;
long x; long x;
x = this_cpu_xchg(memcg->stat_cpu->count[i], 0); x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0);
if (x) if (x)
atomic_long_add(x, &memcg->stat[i]); atomic_long_add(x, &memcg->vmstats[i]);
if (i >= NR_VM_NODE_STAT_ITEMS) if (i >= NR_VM_NODE_STAT_ITEMS)
continue; continue;
...@@ -2108,9 +2108,9 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu) ...@@ -2108,9 +2108,9 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu)
for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
long x; long x;
x = this_cpu_xchg(memcg->stat_cpu->events[i], 0); x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0);
if (x) if (x)
atomic_long_add(x, &memcg->events[i]); atomic_long_add(x, &memcg->vmevents[i]);
} }
} }
...@@ -2940,30 +2940,34 @@ static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, ...@@ -2940,30 +2940,34 @@ static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
return retval; return retval;
} }
struct accumulated_stats { struct accumulated_vmstats {
unsigned long stat[MEMCG_NR_STAT]; unsigned long vmstats[MEMCG_NR_STAT];
unsigned long events[NR_VM_EVENT_ITEMS]; unsigned long vmevents[NR_VM_EVENT_ITEMS];
unsigned long lru_pages[NR_LRU_LISTS]; unsigned long lru_pages[NR_LRU_LISTS];
const unsigned int *stats_array;
const unsigned int *events_array; /* overrides for v1 */
int stats_size; const unsigned int *vmstats_array;
int events_size; const unsigned int *vmevents_array;
int vmstats_size;
int vmevents_size;
}; };
static void accumulate_memcg_tree(struct mem_cgroup *memcg, static void accumulate_vmstats(struct mem_cgroup *memcg,
struct accumulated_stats *acc) struct accumulated_vmstats *acc)
{ {
struct mem_cgroup *mi; struct mem_cgroup *mi;
int i; int i;
for_each_mem_cgroup_tree(mi, memcg) { for_each_mem_cgroup_tree(mi, memcg) {
for (i = 0; i < acc->stats_size; i++) for (i = 0; i < acc->vmstats_size; i++)
acc->stat[i] += memcg_page_state(mi, acc->vmstats[i] += memcg_page_state(mi,
acc->stats_array ? acc->stats_array[i] : i); acc->vmstats_array ? acc->vmstats_array[i] : i);
for (i = 0; i < acc->events_size; i++) for (i = 0; i < acc->vmevents_size; i++)
acc->events[i] += memcg_sum_events(mi, acc->vmevents[i] += memcg_sum_events(mi,
acc->events_array ? acc->events_array[i] : i); acc->vmevents_array
? acc->vmevents_array[i] : i);
for (i = 0; i < NR_LRU_LISTS; i++) for (i = 0; i < NR_LRU_LISTS; i++)
acc->lru_pages[i] += memcg_page_state(mi, acc->lru_pages[i] += memcg_page_state(mi,
...@@ -3414,7 +3418,7 @@ static int memcg_stat_show(struct seq_file *m, void *v) ...@@ -3414,7 +3418,7 @@ static int memcg_stat_show(struct seq_file *m, void *v)
unsigned long memory, memsw; unsigned long memory, memsw;
struct mem_cgroup *mi; struct mem_cgroup *mi;
unsigned int i; unsigned int i;
struct accumulated_stats acc; struct accumulated_vmstats acc;
BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats)); BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS); BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
...@@ -3449,22 +3453,22 @@ static int memcg_stat_show(struct seq_file *m, void *v) ...@@ -3449,22 +3453,22 @@ static int memcg_stat_show(struct seq_file *m, void *v)
(u64)memsw * PAGE_SIZE); (u64)memsw * PAGE_SIZE);
memset(&acc, 0, sizeof(acc)); memset(&acc, 0, sizeof(acc));
acc.stats_size = ARRAY_SIZE(memcg1_stats); acc.vmstats_size = ARRAY_SIZE(memcg1_stats);
acc.stats_array = memcg1_stats; acc.vmstats_array = memcg1_stats;
acc.events_size = ARRAY_SIZE(memcg1_events); acc.vmevents_size = ARRAY_SIZE(memcg1_events);
acc.events_array = memcg1_events; acc.vmevents_array = memcg1_events;
accumulate_memcg_tree(memcg, &acc); accumulate_vmstats(memcg, &acc);
for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
continue; continue;
seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
(u64)acc.stat[i] * PAGE_SIZE); (u64)acc.vmstats[i] * PAGE_SIZE);
} }
for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
seq_printf(m, "total_%s %llu\n", memcg1_event_names[i], seq_printf(m, "total_%s %llu\n", memcg1_event_names[i],
(u64)acc.events[i]); (u64)acc.vmevents[i]);
for (i = 0; i < NR_LRU_LISTS; i++) for (i = 0; i < NR_LRU_LISTS; i++)
seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i],
...@@ -3901,11 +3905,11 @@ struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) ...@@ -3901,11 +3905,11 @@ struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
*/ */
static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx) static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx)
{ {
long x = atomic_long_read(&memcg->stat[idx]); long x = atomic_long_read(&memcg->vmstats[idx]);
int cpu; int cpu;
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
x += per_cpu_ptr(memcg->stat_cpu, cpu)->count[idx]; x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx];
if (x < 0) if (x < 0)
x = 0; x = 0;
return x; return x;
...@@ -4445,7 +4449,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg) ...@@ -4445,7 +4449,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
for_each_node(node) for_each_node(node)
free_mem_cgroup_per_node_info(memcg, node); free_mem_cgroup_per_node_info(memcg, node);
free_percpu(memcg->stat_cpu); free_percpu(memcg->vmstats_percpu);
kfree(memcg); kfree(memcg);
} }
...@@ -4474,8 +4478,8 @@ static struct mem_cgroup *mem_cgroup_alloc(void) ...@@ -4474,8 +4478,8 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
if (memcg->id.id < 0) if (memcg->id.id < 0)
goto fail; goto fail;
memcg->stat_cpu = alloc_percpu(struct mem_cgroup_stat_cpu); memcg->vmstats_percpu = alloc_percpu(struct memcg_vmstats_percpu);
if (!memcg->stat_cpu) if (!memcg->vmstats_percpu)
goto fail; goto fail;
for_each_node(node) for_each_node(node)
...@@ -5561,7 +5565,7 @@ static int memory_events_show(struct seq_file *m, void *v) ...@@ -5561,7 +5565,7 @@ static int memory_events_show(struct seq_file *m, void *v)
static int memory_stat_show(struct seq_file *m, void *v) static int memory_stat_show(struct seq_file *m, void *v)
{ {
struct mem_cgroup *memcg = mem_cgroup_from_seq(m); struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
struct accumulated_stats acc; struct accumulated_vmstats acc;
int i; int i;
/* /*
...@@ -5576,30 +5580,30 @@ static int memory_stat_show(struct seq_file *m, void *v) ...@@ -5576,30 +5580,30 @@ static int memory_stat_show(struct seq_file *m, void *v)
*/ */
memset(&acc, 0, sizeof(acc)); memset(&acc, 0, sizeof(acc));
acc.stats_size = MEMCG_NR_STAT; acc.vmstats_size = MEMCG_NR_STAT;
acc.events_size = NR_VM_EVENT_ITEMS; acc.vmevents_size = NR_VM_EVENT_ITEMS;
accumulate_memcg_tree(memcg, &acc); accumulate_vmstats(memcg, &acc);
seq_printf(m, "anon %llu\n", seq_printf(m, "anon %llu\n",
(u64)acc.stat[MEMCG_RSS] * PAGE_SIZE); (u64)acc.vmstats[MEMCG_RSS] * PAGE_SIZE);
seq_printf(m, "file %llu\n", seq_printf(m, "file %llu\n",
(u64)acc.stat[MEMCG_CACHE] * PAGE_SIZE); (u64)acc.vmstats[MEMCG_CACHE] * PAGE_SIZE);
seq_printf(m, "kernel_stack %llu\n", seq_printf(m, "kernel_stack %llu\n",
(u64)acc.stat[MEMCG_KERNEL_STACK_KB] * 1024); (u64)acc.vmstats[MEMCG_KERNEL_STACK_KB] * 1024);
seq_printf(m, "slab %llu\n", seq_printf(m, "slab %llu\n",
(u64)(acc.stat[NR_SLAB_RECLAIMABLE] + (u64)(acc.vmstats[NR_SLAB_RECLAIMABLE] +
acc.stat[NR_SLAB_UNRECLAIMABLE]) * PAGE_SIZE); acc.vmstats[NR_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
seq_printf(m, "sock %llu\n", seq_printf(m, "sock %llu\n",
(u64)acc.stat[MEMCG_SOCK] * PAGE_SIZE); (u64)acc.vmstats[MEMCG_SOCK] * PAGE_SIZE);
seq_printf(m, "shmem %llu\n", seq_printf(m, "shmem %llu\n",
(u64)acc.stat[NR_SHMEM] * PAGE_SIZE); (u64)acc.vmstats[NR_SHMEM] * PAGE_SIZE);
seq_printf(m, "file_mapped %llu\n", seq_printf(m, "file_mapped %llu\n",
(u64)acc.stat[NR_FILE_MAPPED] * PAGE_SIZE); (u64)acc.vmstats[NR_FILE_MAPPED] * PAGE_SIZE);
seq_printf(m, "file_dirty %llu\n", seq_printf(m, "file_dirty %llu\n",
(u64)acc.stat[NR_FILE_DIRTY] * PAGE_SIZE); (u64)acc.vmstats[NR_FILE_DIRTY] * PAGE_SIZE);
seq_printf(m, "file_writeback %llu\n", seq_printf(m, "file_writeback %llu\n",
(u64)acc.stat[NR_WRITEBACK] * PAGE_SIZE); (u64)acc.vmstats[NR_WRITEBACK] * PAGE_SIZE);
/* /*
* TODO: We should eventually replace our own MEMCG_RSS_HUGE counter * TODO: We should eventually replace our own MEMCG_RSS_HUGE counter
...@@ -5608,43 +5612,43 @@ static int memory_stat_show(struct seq_file *m, void *v) ...@@ -5608,43 +5612,43 @@ static int memory_stat_show(struct seq_file *m, void *v)
* where the page->mem_cgroup is set up and stable. * where the page->mem_cgroup is set up and stable.
*/ */
seq_printf(m, "anon_thp %llu\n", seq_printf(m, "anon_thp %llu\n",
(u64)acc.stat[MEMCG_RSS_HUGE] * PAGE_SIZE); (u64)acc.vmstats[MEMCG_RSS_HUGE] * PAGE_SIZE);
for (i = 0; i < NR_LRU_LISTS; i++) for (i = 0; i < NR_LRU_LISTS; i++)
seq_printf(m, "%s %llu\n", mem_cgroup_lru_names[i], seq_printf(m, "%s %llu\n", mem_cgroup_lru_names[i],
(u64)acc.lru_pages[i] * PAGE_SIZE); (u64)acc.lru_pages[i] * PAGE_SIZE);
seq_printf(m, "slab_reclaimable %llu\n", seq_printf(m, "slab_reclaimable %llu\n",
(u64)acc.stat[NR_SLAB_RECLAIMABLE] * PAGE_SIZE); (u64)acc.vmstats[NR_SLAB_RECLAIMABLE] * PAGE_SIZE);
seq_printf(m, "slab_unreclaimable %llu\n", seq_printf(m, "slab_unreclaimable %llu\n",
(u64)acc.stat[NR_SLAB_UNRECLAIMABLE] * PAGE_SIZE); (u64)acc.vmstats[NR_SLAB_UNRECLAIMABLE] * PAGE_SIZE);
/* Accumulated memory events */ /* Accumulated memory events */
seq_printf(m, "pgfault %lu\n", acc.events[PGFAULT]); seq_printf(m, "pgfault %lu\n", acc.vmevents[PGFAULT]);
seq_printf(m, "pgmajfault %lu\n", acc.events[PGMAJFAULT]); seq_printf(m, "pgmajfault %lu\n", acc.vmevents[PGMAJFAULT]);
seq_printf(m, "workingset_refault %lu\n", seq_printf(m, "workingset_refault %lu\n",
acc.stat[WORKINGSET_REFAULT]); acc.vmstats[WORKINGSET_REFAULT]);
seq_printf(m, "workingset_activate %lu\n", seq_printf(m, "workingset_activate %lu\n",
acc.stat[WORKINGSET_ACTIVATE]); acc.vmstats[WORKINGSET_ACTIVATE]);
seq_printf(m, "workingset_nodereclaim %lu\n", seq_printf(m, "workingset_nodereclaim %lu\n",
acc.stat[WORKINGSET_NODERECLAIM]); acc.vmstats[WORKINGSET_NODERECLAIM]);
seq_printf(m, "pgrefill %lu\n", acc.events[PGREFILL]); seq_printf(m, "pgrefill %lu\n", acc.vmevents[PGREFILL]);
seq_printf(m, "pgscan %lu\n", acc.events[PGSCAN_KSWAPD] + seq_printf(m, "pgscan %lu\n", acc.vmevents[PGSCAN_KSWAPD] +
acc.events[PGSCAN_DIRECT]); acc.vmevents[PGSCAN_DIRECT]);
seq_printf(m, "pgsteal %lu\n", acc.events[PGSTEAL_KSWAPD] + seq_printf(m, "pgsteal %lu\n", acc.vmevents[PGSTEAL_KSWAPD] +
acc.events[PGSTEAL_DIRECT]); acc.vmevents[PGSTEAL_DIRECT]);
seq_printf(m, "pgactivate %lu\n", acc.events[PGACTIVATE]); seq_printf(m, "pgactivate %lu\n", acc.vmevents[PGACTIVATE]);
seq_printf(m, "pgdeactivate %lu\n", acc.events[PGDEACTIVATE]); seq_printf(m, "pgdeactivate %lu\n", acc.vmevents[PGDEACTIVATE]);
seq_printf(m, "pglazyfree %lu\n", acc.events[PGLAZYFREE]); seq_printf(m, "pglazyfree %lu\n", acc.vmevents[PGLAZYFREE]);
seq_printf(m, "pglazyfreed %lu\n", acc.events[PGLAZYFREED]); seq_printf(m, "pglazyfreed %lu\n", acc.vmevents[PGLAZYFREED]);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
seq_printf(m, "thp_fault_alloc %lu\n", acc.events[THP_FAULT_ALLOC]); seq_printf(m, "thp_fault_alloc %lu\n", acc.vmevents[THP_FAULT_ALLOC]);
seq_printf(m, "thp_collapse_alloc %lu\n", seq_printf(m, "thp_collapse_alloc %lu\n",
acc.events[THP_COLLAPSE_ALLOC]); acc.vmevents[THP_COLLAPSE_ALLOC]);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
return 0; return 0;
...@@ -6080,7 +6084,7 @@ static void uncharge_batch(const struct uncharge_gather *ug) ...@@ -6080,7 +6084,7 @@ static void uncharge_batch(const struct uncharge_gather *ug)
__mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge); __mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
__mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem); __mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem);
__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
__this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages); __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, nr_pages);
memcg_check_events(ug->memcg, ug->dummy_page); memcg_check_events(ug->memcg, ug->dummy_page);
local_irq_restore(flags); local_irq_restore(flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment