Commit becacb04 authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Andrew Morton

mm: memcg: add folio_memcg_check()

Patch series "mm: convert page_idle/damon to use folios", v4.


This patch (of 8):

Convert page_memcg_check() into folio_memcg_check() and add a
page_memcg_check() wrapper.  The behaviour of page_memcg_check() is
unchanged; tail pages always had a NULL ->memcg_data.

Link: https://lkml.kernel.org/r/20221230070849.63358-1-wangkefeng.wang@huawei.com
Link: https://lkml.kernel.org/r/20221230070849.63358-2-wangkefeng.wang@huawei.comSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: SeongJae Park <sj@kernel.org>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 071acb30
...@@ -466,34 +466,34 @@ static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) ...@@ -466,34 +466,34 @@ static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
} }
/* /*
* page_memcg_check - get the memory cgroup associated with a page * folio_memcg_check - Get the memory cgroup associated with a folio.
* @page: a pointer to the page struct * @folio: Pointer to the folio.
* *
* Returns a pointer to the memory cgroup associated with the page, * Returns a pointer to the memory cgroup associated with the folio,
* or NULL. This function unlike page_memcg() can take any page * or NULL. This function unlike folio_memcg() can take any folio
* as an argument. It has to be used in cases when it's not known if a page * as an argument. It has to be used in cases when it's not known if a folio
* has an associated memory cgroup pointer or an object cgroups vector or * has an associated memory cgroup pointer or an object cgroups vector or
* an object cgroup. * an object cgroup.
* *
* For a non-kmem page any of the following ensures page and memcg binding * For a non-kmem folio any of the following ensures folio and memcg binding
* stability: * stability:
* *
* - the page lock * - the folio lock
* - LRU isolation * - LRU isolation
* - lock_page_memcg() * - lock_folio_memcg()
* - exclusive reference * - exclusive reference
* - mem_cgroup_trylock_pages() * - mem_cgroup_trylock_pages()
* *
* For a kmem page a caller should hold an rcu read lock to protect memcg * For a kmem folio a caller should hold an rcu read lock to protect memcg
* associated with a kmem page from being released. * associated with a kmem folio from being released.
*/ */
static inline struct mem_cgroup *page_memcg_check(struct page *page) static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
{ {
/* /*
* Because page->memcg_data might be changed asynchronously * Because folio->memcg_data might be changed asynchronously
* for slab pages, READ_ONCE() should be used here. * for slabs, READ_ONCE() should be used here.
*/ */
unsigned long memcg_data = READ_ONCE(page->memcg_data); unsigned long memcg_data = READ_ONCE(folio->memcg_data);
if (memcg_data & MEMCG_DATA_OBJCGS) if (memcg_data & MEMCG_DATA_OBJCGS)
return NULL; return NULL;
...@@ -508,6 +508,13 @@ static inline struct mem_cgroup *page_memcg_check(struct page *page) ...@@ -508,6 +508,13 @@ static inline struct mem_cgroup *page_memcg_check(struct page *page)
return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
} }
static inline struct mem_cgroup *page_memcg_check(struct page *page)
{
if (PageTail(page))
return NULL;
return folio_memcg_check((struct folio *)page);
}
static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg) static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
{ {
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
...@@ -1170,6 +1177,11 @@ static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) ...@@ -1170,6 +1177,11 @@ static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
return NULL; return NULL;
} }
static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
{
return NULL;
}
static inline struct mem_cgroup *page_memcg_check(struct page *page) static inline struct mem_cgroup *page_memcg_check(struct page *page)
{ {
return NULL; return NULL;
......
...@@ -2952,13 +2952,13 @@ struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p) ...@@ -2952,13 +2952,13 @@ struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
} }
/* /*
* page_memcg_check() is used here, because in theory we can encounter * folio_memcg_check() is used here, because in theory we can encounter
* a folio where the slab flag has been cleared already, but * a folio where the slab flag has been cleared already, but
* slab->memcg_data has not been freed yet * slab->memcg_data has not been freed yet
* page_memcg_check(page) will guarantee that a proper memory * folio_memcg_check() will guarantee that a proper memory
* cgroup pointer or NULL will be returned. * cgroup pointer or NULL will be returned.
*/ */
return page_memcg_check(folio_page(folio, 0)); return folio_memcg_check(folio);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment