Commit 0356c4b9 authored by Sidhartha Kumar's avatar Sidhartha Kumar Committed by Andrew Morton

mm/hugetlb: convert free_huge_page to folios

Use folios inside free_huge_page(), this is in preparation for converting
hugetlb_cgroup_uncharge_page() to take in a folio.

Link: https://lkml.kernel.org/r/20221101223059.460937-7-sidhartha.kumar@oracle.comSigned-off-by: default avatarSidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: default avatarMuchun Song <songmuchun@bytedance.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Bui Quang Minh <minhquangbui99@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mina Almasry <almasrymina@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent d5e33bd8
...@@ -1704,21 +1704,22 @@ void free_huge_page(struct page *page) ...@@ -1704,21 +1704,22 @@ void free_huge_page(struct page *page)
* Can't pass hstate in here because it is called from the * Can't pass hstate in here because it is called from the
* compound page destructor. * compound page destructor.
*/ */
struct hstate *h = page_hstate(page); struct folio *folio = page_folio(page);
int nid = page_to_nid(page); struct hstate *h = folio_hstate(folio);
struct hugepage_subpool *spool = hugetlb_page_subpool(page); int nid = folio_nid(folio);
struct hugepage_subpool *spool = hugetlb_folio_subpool(folio);
bool restore_reserve; bool restore_reserve;
unsigned long flags; unsigned long flags;
VM_BUG_ON_PAGE(page_count(page), page); VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
VM_BUG_ON_PAGE(page_mapcount(page), page); VM_BUG_ON_FOLIO(folio_mapcount(folio), folio);
hugetlb_set_page_subpool(page, NULL); hugetlb_set_folio_subpool(folio, NULL);
if (PageAnon(page)) if (folio_test_anon(folio))
__ClearPageAnonExclusive(page); __ClearPageAnonExclusive(&folio->page);
page->mapping = NULL; folio->mapping = NULL;
restore_reserve = HPageRestoreReserve(page); restore_reserve = folio_test_hugetlb_restore_reserve(folio);
ClearHPageRestoreReserve(page); folio_clear_hugetlb_restore_reserve(folio);
/* /*
* If HPageRestoreReserve was set on page, page allocation consumed a * If HPageRestoreReserve was set on page, page allocation consumed a
...@@ -1740,7 +1741,7 @@ void free_huge_page(struct page *page) ...@@ -1740,7 +1741,7 @@ void free_huge_page(struct page *page)
} }
spin_lock_irqsave(&hugetlb_lock, flags); spin_lock_irqsave(&hugetlb_lock, flags);
ClearHPageMigratable(page); folio_clear_hugetlb_migratable(folio);
hugetlb_cgroup_uncharge_page(hstate_index(h), hugetlb_cgroup_uncharge_page(hstate_index(h),
pages_per_huge_page(h), page); pages_per_huge_page(h), page);
hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h), hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
...@@ -1748,7 +1749,7 @@ void free_huge_page(struct page *page) ...@@ -1748,7 +1749,7 @@ void free_huge_page(struct page *page)
if (restore_reserve) if (restore_reserve)
h->resv_huge_pages++; h->resv_huge_pages++;
if (HPageTemporary(page)) { if (folio_test_hugetlb_temporary(folio)) {
remove_hugetlb_page(h, page, false); remove_hugetlb_page(h, page, false);
spin_unlock_irqrestore(&hugetlb_lock, flags); spin_unlock_irqrestore(&hugetlb_lock, flags);
update_and_free_page(h, page, true); update_and_free_page(h, page, true);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment