Commit 871789d4 authored by Chris Down's avatar Chris Down Committed by Linus Torvalds

mm, memcg: rename ambiguously named memory.stat counters and functions

I spent literally an hour trying to work out why an earlier version of
my memory.events aggregation code doesn't work properly, only to find
out I was calling memcg->events instead of memcg->memory_events, which
is fairly confusing.

This naming seems in need of reworking, so make it harder to do the
wrong thing by using vmevents instead of events, which makes it more
clear that these are vm counters rather than memcg-specific counters.

There are also a few other inconsistent names in both the percpu and
aggregated structs, so these are all cleaned up to be more coherent and
easy to understand.

This commit contains code cleanup only: there are no logic changes.

[akpm@linux-foundation.org: fix it for preceding changes]
Link: http://lkml.kernel.org/r/20190208224319.GA23801@chrisdown.nameSigned-off-by: default avatarChris Down <chris@chrisdown.name>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Roman Gushchin <guro@fb.com>
Cc: Dennis Zhou <dennis@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b09e8936
...@@ -94,8 +94,8 @@ enum mem_cgroup_events_target { ...@@ -94,8 +94,8 @@ enum mem_cgroup_events_target {
MEM_CGROUP_NTARGETS, MEM_CGROUP_NTARGETS,
}; };
struct mem_cgroup_stat_cpu { struct memcg_vmstats_percpu {
long count[MEMCG_NR_STAT]; long stat[MEMCG_NR_STAT];
unsigned long events[NR_VM_EVENT_ITEMS]; unsigned long events[NR_VM_EVENT_ITEMS];
unsigned long nr_page_events; unsigned long nr_page_events;
unsigned long targets[MEM_CGROUP_NTARGETS]; unsigned long targets[MEM_CGROUP_NTARGETS];
...@@ -274,12 +274,12 @@ struct mem_cgroup { ...@@ -274,12 +274,12 @@ struct mem_cgroup {
struct task_struct *move_lock_task; struct task_struct *move_lock_task;
/* memory.stat */ /* memory.stat */
struct mem_cgroup_stat_cpu __percpu *stat_cpu; struct memcg_vmstats_percpu __percpu *vmstats_percpu;
MEMCG_PADDING(_pad2_); MEMCG_PADDING(_pad2_);
atomic_long_t stat[MEMCG_NR_STAT]; atomic_long_t vmstats[MEMCG_NR_STAT];
atomic_long_t events[NR_VM_EVENT_ITEMS]; atomic_long_t vmevents[NR_VM_EVENT_ITEMS];
atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
unsigned long socket_pressure; unsigned long socket_pressure;
...@@ -557,7 +557,7 @@ void unlock_page_memcg(struct page *page); ...@@ -557,7 +557,7 @@ void unlock_page_memcg(struct page *page);
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
int idx) int idx)
{ {
long x = atomic_long_read(&memcg->stat[idx]); long x = atomic_long_read(&memcg->vmstats[idx]);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (x < 0) if (x < 0)
x = 0; x = 0;
...@@ -574,12 +574,12 @@ static inline void __mod_memcg_state(struct mem_cgroup *memcg, ...@@ -574,12 +574,12 @@ static inline void __mod_memcg_state(struct mem_cgroup *memcg,
if (mem_cgroup_disabled()) if (mem_cgroup_disabled())
return; return;
x = val + __this_cpu_read(memcg->stat_cpu->count[idx]); x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]);
if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
atomic_long_add(x, &memcg->stat[idx]); atomic_long_add(x, &memcg->vmstats[idx]);
x = 0; x = 0;
} }
__this_cpu_write(memcg->stat_cpu->count[idx], x); __this_cpu_write(memcg->vmstats_percpu->stat[idx], x);
} }
/* idx can be of type enum memcg_stat_item or node_stat_item */ /* idx can be of type enum memcg_stat_item or node_stat_item */
...@@ -717,12 +717,12 @@ static inline void __count_memcg_events(struct mem_cgroup *memcg, ...@@ -717,12 +717,12 @@ static inline void __count_memcg_events(struct mem_cgroup *memcg,
if (mem_cgroup_disabled()) if (mem_cgroup_disabled())
return; return;
x = count + __this_cpu_read(memcg->stat_cpu->events[idx]); x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]);
if (unlikely(x > MEMCG_CHARGE_BATCH)) { if (unlikely(x > MEMCG_CHARGE_BATCH)) {
atomic_long_add(x, &memcg->events[idx]); atomic_long_add(x, &memcg->vmevents[idx]);
x = 0; x = 0;
} }
__this_cpu_write(memcg->stat_cpu->events[idx], x); __this_cpu_write(memcg->vmstats_percpu->events[idx], x);
} }
static inline void count_memcg_events(struct mem_cgroup *memcg, static inline void count_memcg_events(struct mem_cgroup *memcg,
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment