Commit 1a7cdab5 authored by Sidhartha Kumar's avatar Sidhartha Kumar Committed by Andrew Morton

mm/hugetlb: convert dissolve_free_huge_page() to folios

Removes compound_head() call by using a folio rather than a head page.

Link: https://lkml.kernel.org/r/20221129225039.82257-4-sidhartha.kumar@oracle.comSigned-off-by: default avatarSidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mina Almasry <almasrymina@google.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
Cc: Tarun Sahu <tsahu@linux.ibm.com>
Cc: Wei Chen <harperchen1110@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 911565b8
......@@ -2128,21 +2128,21 @@ static struct page *remove_pool_huge_page(struct hstate *h,
int dissolve_free_huge_page(struct page *page)
{
int rc = -EBUSY;
struct folio *folio = page_folio(page);
retry:
/* Not to disrupt normal path by vainly holding hugetlb_lock */
if (!PageHuge(page))
if (!folio_test_hugetlb(folio))
return 0;
spin_lock_irq(&hugetlb_lock);
if (!PageHuge(page)) {
if (!folio_test_hugetlb(folio)) {
rc = 0;
goto out;
}
if (!page_count(page)) {
struct page *head = compound_head(page);
struct hstate *h = page_hstate(head);
if (!folio_ref_count(folio)) {
struct hstate *h = folio_hstate(folio);
if (!available_huge_pages(h))
goto out;
......@@ -2150,7 +2150,7 @@ int dissolve_free_huge_page(struct page *page)
* We should make sure that the page is already on the free list
* when it is dissolved.
*/
if (unlikely(!HPageFreed(head))) {
if (unlikely(!folio_test_hugetlb_freed(folio))) {
spin_unlock_irq(&hugetlb_lock);
cond_resched();
......@@ -2165,7 +2165,7 @@ int dissolve_free_huge_page(struct page *page)
goto retry;
}
remove_hugetlb_page(h, head, false);
remove_hugetlb_page(h, &folio->page, false);
h->max_huge_pages--;
spin_unlock_irq(&hugetlb_lock);
......@@ -2177,12 +2177,12 @@ int dissolve_free_huge_page(struct page *page)
* Attempt to allocate vmemmmap here so that we can take
* appropriate action on failure.
*/
rc = hugetlb_vmemmap_restore(h, head);
rc = hugetlb_vmemmap_restore(h, &folio->page);
if (!rc) {
update_and_free_page(h, head, false);
update_and_free_page(h, &folio->page, false);
} else {
spin_lock_irq(&hugetlb_lock);
add_hugetlb_page(h, head, false);
add_hugetlb_page(h, &folio->page, false);
h->max_huge_pages++;
spin_unlock_irq(&hugetlb_lock);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment