Commit a52c6330 authored by Alex Shi (Tencent)'s avatar Alex Shi (Tencent) Committed by Vlastimil Babka

mm/memcg: alignment memcg_data define condition

commit 21c690a3 ("mm: introduce slabobj_ext to support slab object
extensions") changed the folio/page->memcg_data define condition from
MEMCG to SLAB_OBJ_EXT. This action make memcg_data exposed while !MEMCG.

As Vlastimil Babka suggested, let's add _unused_slab_obj_exts for
SLAB_MATCH for slab.obj_exts while !MEMCG. That could resolve the match
issue, clean up the feature logical.
Signed-off-by: default avatarAlex Shi (Tencent) <alexs@kernel.org>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Yoann Congal <yoann.congal@smile.fr>
Cc: Masahiro Yamada <masahiroy@kernel.org>
Cc: Petr Mladek <pmladek@suse.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent 7b1fdf2b
...@@ -169,8 +169,10 @@ struct page { ...@@ -169,8 +169,10 @@ struct page {
/* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */ /* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
atomic_t _refcount; atomic_t _refcount;
#ifdef CONFIG_SLAB_OBJ_EXT #ifdef CONFIG_MEMCG
unsigned long memcg_data; unsigned long memcg_data;
#elif defined(CONFIG_SLAB_OBJ_EXT)
unsigned long _unused_slab_obj_exts;
#endif #endif
/* /*
...@@ -298,6 +300,7 @@ typedef struct { ...@@ -298,6 +300,7 @@ typedef struct {
* @_hugetlb_cgroup_rsvd: Do not use directly, use accessor in hugetlb_cgroup.h. * @_hugetlb_cgroup_rsvd: Do not use directly, use accessor in hugetlb_cgroup.h.
* @_hugetlb_hwpoison: Do not use directly, call raw_hwp_list_head(). * @_hugetlb_hwpoison: Do not use directly, call raw_hwp_list_head().
* @_deferred_list: Folios to be split under memory pressure. * @_deferred_list: Folios to be split under memory pressure.
* @_unused_slab_obj_exts: Placeholder to match obj_exts in struct slab.
* *
* A folio is a physically, virtually and logically contiguous set * A folio is a physically, virtually and logically contiguous set
* of bytes. It is a power-of-two in size, and it is aligned to that * of bytes. It is a power-of-two in size, and it is aligned to that
...@@ -332,8 +335,10 @@ struct folio { ...@@ -332,8 +335,10 @@ struct folio {
}; };
atomic_t _mapcount; atomic_t _mapcount;
atomic_t _refcount; atomic_t _refcount;
#ifdef CONFIG_SLAB_OBJ_EXT #ifdef CONFIG_MEMCG
unsigned long memcg_data; unsigned long memcg_data;
#elif defined(CONFIG_SLAB_OBJ_EXT)
unsigned long _unused_slab_obj_exts;
#endif #endif
#if defined(WANT_PAGE_VIRTUAL) #if defined(WANT_PAGE_VIRTUAL)
void *virtual; void *virtual;
......
...@@ -97,8 +97,10 @@ struct slab { ...@@ -97,8 +97,10 @@ struct slab {
SLAB_MATCH(flags, __page_flags); SLAB_MATCH(flags, __page_flags);
SLAB_MATCH(compound_head, slab_cache); /* Ensure bit 0 is clear */ SLAB_MATCH(compound_head, slab_cache); /* Ensure bit 0 is clear */
SLAB_MATCH(_refcount, __page_refcount); SLAB_MATCH(_refcount, __page_refcount);
#ifdef CONFIG_SLAB_OBJ_EXT #ifdef CONFIG_MEMCG
SLAB_MATCH(memcg_data, obj_exts); SLAB_MATCH(memcg_data, obj_exts);
#elif defined(CONFIG_SLAB_OBJ_EXT)
SLAB_MATCH(_unused_slab_obj_exts, obj_exts);
#endif #endif
#undef SLAB_MATCH #undef SLAB_MATCH
static_assert(sizeof(struct slab) <= sizeof(struct page)); static_assert(sizeof(struct slab) <= sizeof(struct page));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment