Commit fd25a9e0 authored by Shakeel Butt's avatar Shakeel Butt Committed by Linus Torvalds

memcg: unify memcg stat flushing

The memcg stats can be flushed in multiple context and potentially in
parallel too.  For example multiple parallel user space readers for
memcg stats will contend on the rstat locks with each other.  There is
no need for that.  We just need one flusher and everyone else can
benefit.

In addition after aa48e47e ("memcg: infrastructure to flush memcg
stats") the kernel periodically flush the memcg stats from the root, so,
the other flushers will potentially have much less work to do.

Link: https://lkml.kernel.org/r/20211001190040.48086-2-shakeelb@google.comSigned-off-by: default avatarShakeel Butt <shakeelb@google.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: "Michal Koutný" <mkoutny@suse.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 11192d9c
...@@ -660,12 +660,14 @@ static inline void memcg_rstat_updated(struct mem_cgroup *memcg) ...@@ -660,12 +660,14 @@ static inline void memcg_rstat_updated(struct mem_cgroup *memcg)
static void __mem_cgroup_flush_stats(void) static void __mem_cgroup_flush_stats(void)
{ {
if (!spin_trylock(&stats_flush_lock)) unsigned long flag;
if (!spin_trylock_irqsave(&stats_flush_lock, flag))
return; return;
cgroup_rstat_flush_irqsafe(root_mem_cgroup->css.cgroup); cgroup_rstat_flush_irqsafe(root_mem_cgroup->css.cgroup);
atomic_set(&stats_flush_threshold, 0); atomic_set(&stats_flush_threshold, 0);
spin_unlock(&stats_flush_lock); spin_unlock_irqrestore(&stats_flush_lock, flag);
} }
void mem_cgroup_flush_stats(void) void mem_cgroup_flush_stats(void)
...@@ -1461,7 +1463,7 @@ static char *memory_stat_format(struct mem_cgroup *memcg) ...@@ -1461,7 +1463,7 @@ static char *memory_stat_format(struct mem_cgroup *memcg)
* *
* Current memory state: * Current memory state:
*/ */
cgroup_rstat_flush(memcg->css.cgroup); mem_cgroup_flush_stats();
for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
u64 size; u64 size;
...@@ -3565,8 +3567,7 @@ static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) ...@@ -3565,8 +3567,7 @@ static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
unsigned long val; unsigned long val;
if (mem_cgroup_is_root(memcg)) { if (mem_cgroup_is_root(memcg)) {
/* mem_cgroup_threshold() calls here from irqsafe context */ mem_cgroup_flush_stats();
cgroup_rstat_flush_irqsafe(memcg->css.cgroup);
val = memcg_page_state(memcg, NR_FILE_PAGES) + val = memcg_page_state(memcg, NR_FILE_PAGES) +
memcg_page_state(memcg, NR_ANON_MAPPED); memcg_page_state(memcg, NR_ANON_MAPPED);
if (swap) if (swap)
...@@ -3947,7 +3948,7 @@ static int memcg_numa_stat_show(struct seq_file *m, void *v) ...@@ -3947,7 +3948,7 @@ static int memcg_numa_stat_show(struct seq_file *m, void *v)
int nid; int nid;
struct mem_cgroup *memcg = mem_cgroup_from_seq(m); struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
cgroup_rstat_flush(memcg->css.cgroup); mem_cgroup_flush_stats();
for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) { for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
seq_printf(m, "%s=%lu", stat->name, seq_printf(m, "%s=%lu", stat->name,
...@@ -4019,7 +4020,7 @@ static int memcg_stat_show(struct seq_file *m, void *v) ...@@ -4019,7 +4020,7 @@ static int memcg_stat_show(struct seq_file *m, void *v)
BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats)); BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
cgroup_rstat_flush(memcg->css.cgroup); mem_cgroup_flush_stats();
for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
unsigned long nr; unsigned long nr;
...@@ -4522,7 +4523,7 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, ...@@ -4522,7 +4523,7 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
struct mem_cgroup *parent; struct mem_cgroup *parent;
cgroup_rstat_flush_irqsafe(memcg->css.cgroup); mem_cgroup_flush_stats();
*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY); *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
*pwriteback = memcg_page_state(memcg, NR_WRITEBACK); *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
...@@ -6405,7 +6406,7 @@ static int memory_numa_stat_show(struct seq_file *m, void *v) ...@@ -6405,7 +6406,7 @@ static int memory_numa_stat_show(struct seq_file *m, void *v)
int i; int i;
struct mem_cgroup *memcg = mem_cgroup_from_seq(m); struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
cgroup_rstat_flush(memcg->css.cgroup); mem_cgroup_flush_stats();
for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
int nid; int nid;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment