Commit 4df91062 authored by Feng Tang's avatar Feng Tang Committed by Linus Torvalds

mm: memcg: relayout structure mem_cgroup to avoid cache interference

0day reported one -22.7% regression for will-it-scale page_fault2
case [1] on a 4 sockets 144 CPU platform, and bisected to it to be
caused by Waiman's optimization (commit bd0b230f) of saving one
'struct page_counter' space for 'struct mem_cgroup'.

Initially we thought it was due to the cache alignment change introduced
by the patch, but further debug shows that it is due to some hot data
members ('vmstats_local', 'vmstats_percpu', 'vmstats') sit in 2 adjacent
cacheline (2N and 2N+1 cacheline), and when adjacent cache line prefetch
is enabled, it triggers an "extended level" of cache false sharing for
2 adjacent cache lines.

So exchange the 2 member blocks, while keeping mostly the original
cache alignment, which can restore and even enhance the performance,
and save 64 bytes of space for 'struct mem_cgroup' (from 2880 to 2816,
with 0day's default RHEL-8.3 kernel config)

[1]. https://lore.kernel.org/lkml/20201102091543.GM31092@shao2-debian/

Fixes: bd0b230f ("mm/memcg: unify swap and memsw page counters")
Reported-by: default avatarkernel test robot <rong.a.chen@intel.com>
Signed-off-by: default avatarFeng Tang <feng.tang@intel.com>
Acked-by: default avatarWaiman Long <longman@redhat.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent fa02fcd9
...@@ -282,20 +282,6 @@ struct mem_cgroup { ...@@ -282,20 +282,6 @@ struct mem_cgroup {
MEMCG_PADDING(_pad1_); MEMCG_PADDING(_pad1_);
/*
* set > 0 if pages under this cgroup are moving to other cgroup.
*/
atomic_t moving_account;
struct task_struct *move_lock_task;
/* Legacy local VM stats and events */
struct memcg_vmstats_percpu __percpu *vmstats_local;
/* Subtree VM stats and events (batched updates) */
struct memcg_vmstats_percpu __percpu *vmstats_percpu;
MEMCG_PADDING(_pad2_);
atomic_long_t vmstats[MEMCG_NR_STAT]; atomic_long_t vmstats[MEMCG_NR_STAT];
atomic_long_t vmevents[NR_VM_EVENT_ITEMS]; atomic_long_t vmevents[NR_VM_EVENT_ITEMS];
...@@ -317,6 +303,20 @@ struct mem_cgroup { ...@@ -317,6 +303,20 @@ struct mem_cgroup {
struct list_head objcg_list; /* list of inherited objcgs */ struct list_head objcg_list; /* list of inherited objcgs */
#endif #endif
MEMCG_PADDING(_pad2_);
/*
* set > 0 if pages under this cgroup are moving to other cgroup.
*/
atomic_t moving_account;
struct task_struct *move_lock_task;
/* Legacy local VM stats and events */
struct memcg_vmstats_percpu __percpu *vmstats_local;
/* Subtree VM stats and events (batched updates) */
struct memcg_vmstats_percpu __percpu *vmstats_percpu;
#ifdef CONFIG_CGROUP_WRITEBACK #ifdef CONFIG_CGROUP_WRITEBACK
struct list_head cgwb_list; struct list_head cgwb_list;
struct wb_domain cgwb_domain; struct wb_domain cgwb_domain;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment