Commit de656ed3 authored by Sidhartha Kumar's avatar Sidhartha Kumar Committed by Andrew Morton

mm/hugetlb_cgroup: convert set_hugetlb_cgroup*() to folios

Allows __prep_new_huge_page() to operate on a folio by converting
set_hugetlb_cgroup*() to take in a folio.

Link: https://lkml.kernel.org/r/20221101223059.460937-4-sidhartha.kumar@oracle.comSigned-off-by: default avatarSidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Bui Quang Minh <minhquangbui99@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mina Almasry <almasrymina@google.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent f074732d
......@@ -112,16 +112,16 @@ static inline void __set_hugetlb_cgroup(struct folio *folio,
(unsigned long)h_cg);
}
static inline void set_hugetlb_cgroup(struct page *page,
static inline void set_hugetlb_cgroup(struct folio *folio,
struct hugetlb_cgroup *h_cg)
{
__set_hugetlb_cgroup(page_folio(page), h_cg, false);
__set_hugetlb_cgroup(folio, h_cg, false);
}
static inline void set_hugetlb_cgroup_rsvd(struct page *page,
static inline void set_hugetlb_cgroup_rsvd(struct folio *folio,
struct hugetlb_cgroup *h_cg)
{
__set_hugetlb_cgroup(page_folio(page), h_cg, true);
__set_hugetlb_cgroup(folio, h_cg, true);
}
static inline bool hugetlb_cgroup_disabled(void)
......@@ -199,12 +199,12 @@ hugetlb_cgroup_from_folio_rsvd(struct folio *folio)
return NULL;
}
static inline void set_hugetlb_cgroup(struct page *page,
static inline void set_hugetlb_cgroup(struct folio *folio,
struct hugetlb_cgroup *h_cg)
{
}
static inline void set_hugetlb_cgroup_rsvd(struct page *page,
static inline void set_hugetlb_cgroup_rsvd(struct folio *folio,
struct hugetlb_cgroup *h_cg)
{
}
......
......@@ -1774,19 +1774,21 @@ static void __prep_account_new_huge_page(struct hstate *h, int nid)
h->nr_huge_pages_node[nid]++;
}
static void __prep_new_huge_page(struct hstate *h, struct page *page)
static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
{
hugetlb_vmemmap_optimize(h, page);
INIT_LIST_HEAD(&page->lru);
set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
hugetlb_set_page_subpool(page, NULL);
set_hugetlb_cgroup(page, NULL);
set_hugetlb_cgroup_rsvd(page, NULL);
hugetlb_vmemmap_optimize(h, &folio->page);
INIT_LIST_HEAD(&folio->lru);
folio->_folio_dtor = HUGETLB_PAGE_DTOR;
hugetlb_set_folio_subpool(folio, NULL);
set_hugetlb_cgroup(folio, NULL);
set_hugetlb_cgroup_rsvd(folio, NULL);
}
static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
{
__prep_new_huge_page(h, page);
struct folio *folio = page_folio(page);
__prep_new_hugetlb_folio(h, folio);
spin_lock_irq(&hugetlb_lock);
__prep_account_new_huge_page(h, nid);
spin_unlock_irq(&hugetlb_lock);
......@@ -2748,8 +2750,10 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
struct list_head *list)
{
gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
int nid = page_to_nid(old_page);
struct folio *old_folio = page_folio(old_page);
int nid = folio_nid(old_folio);
struct page *new_page;
struct folio *new_folio;
int ret = 0;
/*
......@@ -2762,16 +2766,17 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
new_page = alloc_buddy_huge_page(h, gfp_mask, nid, NULL, NULL);
if (!new_page)
return -ENOMEM;
__prep_new_huge_page(h, new_page);
new_folio = page_folio(new_page);
__prep_new_hugetlb_folio(h, new_folio);
retry:
spin_lock_irq(&hugetlb_lock);
if (!PageHuge(old_page)) {
if (!folio_test_hugetlb(old_folio)) {
/*
* Freed from under us. Drop new_page too.
*/
goto free_new;
} else if (page_count(old_page)) {
} else if (folio_ref_count(old_folio)) {
/*
* Someone has grabbed the page, try to isolate it here.
* Fail with -EBUSY if not possible.
......@@ -2780,7 +2785,7 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
ret = isolate_hugetlb(old_page, list);
spin_lock_irq(&hugetlb_lock);
goto free_new;
} else if (!HPageFreed(old_page)) {
} else if (!folio_test_hugetlb_freed(old_folio)) {
/*
* Page's refcount is 0 but it has not been enqueued in the
* freelist yet. Race window is small, so we can succeed here if
......@@ -2818,7 +2823,7 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
free_new:
spin_unlock_irq(&hugetlb_lock);
/* Page has a zero ref count, but needs a ref to be freed */
set_page_refcounted(new_page);
folio_ref_unfreeze(new_folio, 1);
update_and_free_page(h, new_page, false);
return ret;
......
......@@ -212,7 +212,7 @@ static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
/* Take the pages off the local counter */
page_counter_cancel(counter, nr_pages);
set_hugetlb_cgroup(page, parent);
set_hugetlb_cgroup(folio, parent);
out:
return;
}
......@@ -891,6 +891,7 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
struct hugetlb_cgroup *h_cg_rsvd;
struct hstate *h = page_hstate(oldhpage);
struct folio *old_folio = page_folio(oldhpage);
struct folio *new_folio = page_folio(newhpage);
if (hugetlb_cgroup_disabled())
return;
......@@ -898,12 +899,12 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
spin_lock_irq(&hugetlb_lock);
h_cg = hugetlb_cgroup_from_folio(old_folio);
h_cg_rsvd = hugetlb_cgroup_from_folio_rsvd(old_folio);
set_hugetlb_cgroup(oldhpage, NULL);
set_hugetlb_cgroup_rsvd(oldhpage, NULL);
set_hugetlb_cgroup(old_folio, NULL);
set_hugetlb_cgroup_rsvd(old_folio, NULL);
/* move the h_cg details to new cgroup */
set_hugetlb_cgroup(newhpage, h_cg);
set_hugetlb_cgroup_rsvd(newhpage, h_cg_rsvd);
set_hugetlb_cgroup(new_folio, h_cg);
set_hugetlb_cgroup_rsvd(new_folio, h_cg_rsvd);
list_move(&newhpage->lru, &h->hugepage_activelist);
spin_unlock_irq(&hugetlb_lock);
return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment