Commit 94b7e5bf authored by Roman Gushchin's avatar Roman Gushchin Committed by Andrew Morton

mm: memcg: put memcg1-specific struct mem_cgroup's members under CONFIG_MEMCG_V1

Put memcg1-specific members of struct mem_cgroup under the CONFIG_MEMCG_V1
config option.  Also group them close to the end of struct mem_cgroup just
before the dynamic per-node part.

Link: https://lkml.kernel.org/r/20240628210317.272856-7-roman.gushchin@linux.devSigned-off-by: default avatarRoman Gushchin <roman.gushchin@linux.dev>
Acked-by: default avatarShakeel Butt <shakeel.butt@linux.dev>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 05dfec12
...@@ -188,10 +188,6 @@ struct mem_cgroup { ...@@ -188,10 +188,6 @@ struct mem_cgroup {
struct page_counter memsw; /* v1 only */ struct page_counter memsw; /* v1 only */
}; };
/* Legacy consumer-oriented counters */
struct page_counter kmem; /* v1 only */
struct page_counter tcpmem; /* v1 only */
/* Range enforcement for interrupt charges */ /* Range enforcement for interrupt charges */
struct work_struct high_work; struct work_struct high_work;
...@@ -205,8 +201,6 @@ struct mem_cgroup { ...@@ -205,8 +201,6 @@ struct mem_cgroup {
bool zswap_writeback; bool zswap_writeback;
#endif #endif
unsigned long soft_limit;
/* vmpressure notifications */ /* vmpressure notifications */
struct vmpressure vmpressure; struct vmpressure vmpressure;
...@@ -215,13 +209,7 @@ struct mem_cgroup { ...@@ -215,13 +209,7 @@ struct mem_cgroup {
*/ */
bool oom_group; bool oom_group;
/* protected by memcg_oom_lock */ int swappiness;
bool oom_lock;
int under_oom;
int swappiness;
/* OOM-Killer disable */
int oom_kill_disable;
/* memory.events and memory.events.local */ /* memory.events and memory.events.local */
struct cgroup_file events_file; struct cgroup_file events_file;
...@@ -230,27 +218,6 @@ struct mem_cgroup { ...@@ -230,27 +218,6 @@ struct mem_cgroup {
/* handle for "memory.swap.events" */ /* handle for "memory.swap.events" */
struct cgroup_file swap_events_file; struct cgroup_file swap_events_file;
/* protect arrays of thresholds */
struct mutex thresholds_lock;
/* thresholds for memory usage. RCU-protected */
struct mem_cgroup_thresholds thresholds;
/* thresholds for mem+swap usage. RCU-protected */
struct mem_cgroup_thresholds memsw_thresholds;
/* For oom notifier event fd */
struct list_head oom_notify;
/*
* Should we move charges of a task when a task is moved into this
* mem_cgroup ? And what type of charges should we move ?
*/
unsigned long move_charge_at_immigrate;
/* taken only while moving_account > 0 */
spinlock_t move_lock;
unsigned long move_lock_flags;
CACHELINE_PADDING(_pad1_); CACHELINE_PADDING(_pad1_);
/* memory.stat */ /* memory.stat */
...@@ -267,10 +234,6 @@ struct mem_cgroup { ...@@ -267,10 +234,6 @@ struct mem_cgroup {
*/ */
unsigned long socket_pressure; unsigned long socket_pressure;
/* Legacy tcp memory accounting */
bool tcpmem_active;
int tcpmem_pressure;
#ifdef CONFIG_MEMCG_KMEM #ifdef CONFIG_MEMCG_KMEM
int kmemcg_id; int kmemcg_id;
/* /*
...@@ -284,14 +247,6 @@ struct mem_cgroup { ...@@ -284,14 +247,6 @@ struct mem_cgroup {
struct list_head objcg_list; struct list_head objcg_list;
#endif #endif
CACHELINE_PADDING(_pad2_);
/*
* set > 0 if pages under this cgroup are moving to other cgroup.
*/
atomic_t moving_account;
struct task_struct *move_lock_task;
struct memcg_vmstats_percpu __percpu *vmstats_percpu; struct memcg_vmstats_percpu __percpu *vmstats_percpu;
#ifdef CONFIG_CGROUP_WRITEBACK #ifdef CONFIG_CGROUP_WRITEBACK
...@@ -300,10 +255,6 @@ struct mem_cgroup { ...@@ -300,10 +255,6 @@ struct mem_cgroup {
struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT]; struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
#endif #endif
/* List of events which userspace want to receive */
struct list_head event_list;
spinlock_t event_list_lock;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
struct deferred_split deferred_split_queue; struct deferred_split deferred_split_queue;
#endif #endif
...@@ -313,6 +264,58 @@ struct mem_cgroup { ...@@ -313,6 +264,58 @@ struct mem_cgroup {
struct lru_gen_mm_list mm_list; struct lru_gen_mm_list mm_list;
#endif #endif
#ifdef CONFIG_MEMCG_V1
/* Legacy consumer-oriented counters */
struct page_counter kmem; /* v1 only */
struct page_counter tcpmem; /* v1 only */
unsigned long soft_limit;
/* protected by memcg_oom_lock */
bool oom_lock;
int under_oom;
/* OOM-Killer disable */
int oom_kill_disable;
/* protect arrays of thresholds */
struct mutex thresholds_lock;
/* thresholds for memory usage. RCU-protected */
struct mem_cgroup_thresholds thresholds;
/* thresholds for mem+swap usage. RCU-protected */
struct mem_cgroup_thresholds memsw_thresholds;
/* For oom notifier event fd */
struct list_head oom_notify;
/*
* Should we move charges of a task when a task is moved into this
* mem_cgroup ? And what type of charges should we move ?
*/
unsigned long move_charge_at_immigrate;
/* taken only while moving_account > 0 */
spinlock_t move_lock;
unsigned long move_lock_flags;
/* Legacy tcp memory accounting */
bool tcpmem_active;
int tcpmem_pressure;
CACHELINE_PADDING(_pad2_);
/*
* set > 0 if pages under this cgroup are moving to other cgroup.
*/
atomic_t moving_account;
struct task_struct *move_lock_task;
/* List of events which userspace want to receive */
struct list_head event_list;
spinlock_t event_list_lock;
#endif /* CONFIG_MEMCG_V1 */
struct mem_cgroup_per_node *nodeinfo[]; struct mem_cgroup_per_node *nodeinfo[];
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment