Commit f074732d authored by Sidhartha Kumar's avatar Sidhartha Kumar Committed by Andrew Morton

mm/hugetlb_cgroup: convert hugetlb_cgroup_from_page() to folios

Introduce folios in __remove_hugetlb_page() by converting
hugetlb_cgroup_from_page() to use folios.

Also gets rid of unsed hugetlb_cgroup_from_page_resv() function.

Link: https://lkml.kernel.org/r/20221101223059.460937-3-sidhartha.kumar@oracle.comSigned-off-by: default avatarSidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: default avatarMuchun Song <songmuchun@bytedance.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Bui Quang Minh <minhquangbui99@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mina Almasry <almasrymina@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent a098c977
......@@ -67,27 +67,34 @@ struct hugetlb_cgroup {
};
static inline struct hugetlb_cgroup *
__hugetlb_cgroup_from_page(struct page *page, bool rsvd)
__hugetlb_cgroup_from_folio(struct folio *folio, bool rsvd)
{
VM_BUG_ON_PAGE(!PageHuge(page), page);
struct page *tail;
if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
if (folio_order(folio) < HUGETLB_CGROUP_MIN_ORDER)
return NULL;
if (rsvd)
return (void *)page_private(page + SUBPAGE_INDEX_CGROUP_RSVD);
else
return (void *)page_private(page + SUBPAGE_INDEX_CGROUP);
if (rsvd) {
tail = folio_page(folio, SUBPAGE_INDEX_CGROUP_RSVD);
return (void *)page_private(tail);
}
else {
tail = folio_page(folio, SUBPAGE_INDEX_CGROUP);
return (void *)page_private(tail);
}
}
static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
static inline struct hugetlb_cgroup *hugetlb_cgroup_from_folio(struct folio *folio)
{
return __hugetlb_cgroup_from_page(page, false);
return __hugetlb_cgroup_from_folio(folio, false);
}
static inline struct hugetlb_cgroup *
hugetlb_cgroup_from_page_rsvd(struct page *page)
hugetlb_cgroup_from_folio_rsvd(struct folio *folio)
{
return __hugetlb_cgroup_from_page(page, true);
return __hugetlb_cgroup_from_folio(folio, true);
}
static inline void __set_hugetlb_cgroup(struct folio *folio,
......@@ -181,19 +188,13 @@ static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
{
}
static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
{
return NULL;
}
static inline struct hugetlb_cgroup *
hugetlb_cgroup_from_page_resv(struct page *page)
static inline struct hugetlb_cgroup *hugetlb_cgroup_from_folio(struct folio *folio)
{
return NULL;
}
static inline struct hugetlb_cgroup *
hugetlb_cgroup_from_page_rsvd(struct page *page)
hugetlb_cgroup_from_folio_rsvd(struct folio *folio)
{
return NULL;
}
......
......@@ -1446,9 +1446,10 @@ static void __remove_hugetlb_page(struct hstate *h, struct page *page,
bool demote)
{
int nid = page_to_nid(page);
struct folio *folio = page_folio(page);
VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
VM_BUG_ON_PAGE(hugetlb_cgroup_from_page_rsvd(page), page);
VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio), folio);
VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio);
lockdep_assert_held(&hugetlb_lock);
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
......
......@@ -191,8 +191,9 @@ static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
struct page_counter *counter;
struct hugetlb_cgroup *page_hcg;
struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
struct folio *folio = page_folio(page);
page_hcg = hugetlb_cgroup_from_page(page);
page_hcg = hugetlb_cgroup_from_folio(folio);
/*
* We can have pages in active list without any cgroup
* ie, hugepage with less than 3 pages. We can safely
......@@ -349,14 +350,15 @@ static void __hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
struct page *page, bool rsvd)
{
struct hugetlb_cgroup *h_cg;
struct folio *folio = page_folio(page);
if (hugetlb_cgroup_disabled())
return;
lockdep_assert_held(&hugetlb_lock);
h_cg = __hugetlb_cgroup_from_page(page, rsvd);
h_cg = __hugetlb_cgroup_from_folio(folio, rsvd);
if (unlikely(!h_cg))
return;
__set_hugetlb_cgroup(page_folio(page), NULL, rsvd);
__set_hugetlb_cgroup(folio, NULL, rsvd);
page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx,
rsvd),
......@@ -888,13 +890,14 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
struct hugetlb_cgroup *h_cg;
struct hugetlb_cgroup *h_cg_rsvd;
struct hstate *h = page_hstate(oldhpage);
struct folio *old_folio = page_folio(oldhpage);
if (hugetlb_cgroup_disabled())
return;
spin_lock_irq(&hugetlb_lock);
h_cg = hugetlb_cgroup_from_page(oldhpage);
h_cg_rsvd = hugetlb_cgroup_from_page_rsvd(oldhpage);
h_cg = hugetlb_cgroup_from_folio(old_folio);
h_cg_rsvd = hugetlb_cgroup_from_folio_rsvd(old_folio);
set_hugetlb_cgroup(oldhpage, NULL);
set_hugetlb_cgroup_rsvd(oldhpage, NULL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment