Commit a36f1e90 authored by Sidhartha Kumar's avatar Sidhartha Kumar Committed by Andrew Morton

mm/hugetlb: convert dequeue_hugetlb_page functions to folios

dequeue_huge_page_node_exact() is changed to dequeue_hugetlb_folio_node_
exact() and dequeue_huge_page_nodemask() is changed to dequeue_hugetlb_
folio_nodemask().  Update their callers to pass in a folio.

Link: https://lkml.kernel.org/r/20230113223057.173292-4-sidhartha.kumar@oracle.comSigned-off-by: default avatarSidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 6f6956cf
...@@ -1282,32 +1282,33 @@ static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio) ...@@ -1282,32 +1282,33 @@ static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio)
folio_set_hugetlb_freed(folio); folio_set_hugetlb_freed(folio);
} }
static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h,
int nid)
{ {
struct page *page; struct folio *folio;
bool pin = !!(current->flags & PF_MEMALLOC_PIN); bool pin = !!(current->flags & PF_MEMALLOC_PIN);
lockdep_assert_held(&hugetlb_lock); lockdep_assert_held(&hugetlb_lock);
list_for_each_entry(page, &h->hugepage_freelists[nid], lru) { list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) {
if (pin && !is_longterm_pinnable_page(page)) if (pin && !folio_is_longterm_pinnable(folio))
continue; continue;
if (PageHWPoison(page)) if (folio_test_hwpoison(folio))
continue; continue;
list_move(&page->lru, &h->hugepage_activelist); list_move(&folio->lru, &h->hugepage_activelist);
set_page_refcounted(page); folio_ref_unfreeze(folio, 1);
ClearHPageFreed(page); folio_clear_hugetlb_freed(folio);
h->free_huge_pages--; h->free_huge_pages--;
h->free_huge_pages_node[nid]--; h->free_huge_pages_node[nid]--;
return page; return folio;
} }
return NULL; return NULL;
} }
static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid, static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask,
nodemask_t *nmask) int nid, nodemask_t *nmask)
{ {
unsigned int cpuset_mems_cookie; unsigned int cpuset_mems_cookie;
struct zonelist *zonelist; struct zonelist *zonelist;
...@@ -1320,7 +1321,7 @@ static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, ...@@ -1320,7 +1321,7 @@ static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask,
retry_cpuset: retry_cpuset:
cpuset_mems_cookie = read_mems_allowed_begin(); cpuset_mems_cookie = read_mems_allowed_begin();
for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) { for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
struct page *page; struct folio *folio;
if (!cpuset_zone_allowed(zone, gfp_mask)) if (!cpuset_zone_allowed(zone, gfp_mask))
continue; continue;
...@@ -1332,9 +1333,9 @@ static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, ...@@ -1332,9 +1333,9 @@ static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask,
continue; continue;
node = zone_to_nid(zone); node = zone_to_nid(zone);
page = dequeue_huge_page_node_exact(h, node); folio = dequeue_hugetlb_folio_node_exact(h, node);
if (page) if (folio)
return page; return folio;
} }
if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie))) if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
goto retry_cpuset; goto retry_cpuset;
...@@ -1352,7 +1353,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h, ...@@ -1352,7 +1353,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
unsigned long address, int avoid_reserve, unsigned long address, int avoid_reserve,
long chg) long chg)
{ {
struct page *page = NULL; struct folio *folio = NULL;
struct mempolicy *mpol; struct mempolicy *mpol;
gfp_t gfp_mask; gfp_t gfp_mask;
nodemask_t *nodemask; nodemask_t *nodemask;
...@@ -1374,22 +1375,24 @@ static struct page *dequeue_huge_page_vma(struct hstate *h, ...@@ -1374,22 +1375,24 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
if (mpol_is_preferred_many(mpol)) { if (mpol_is_preferred_many(mpol)) {
page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask); folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
nid, nodemask);
/* Fallback to all nodes if page==NULL */ /* Fallback to all nodes if page==NULL */
nodemask = NULL; nodemask = NULL;
} }
if (!page) if (!folio)
page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask); folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
nid, nodemask);
if (page && !avoid_reserve && vma_has_reserves(vma, chg)) { if (folio && !avoid_reserve && vma_has_reserves(vma, chg)) {
SetHPageRestoreReserve(page); folio_set_hugetlb_restore_reserve(folio);
h->resv_huge_pages--; h->resv_huge_pages--;
} }
mpol_cond_put(mpol); mpol_cond_put(mpol);
return page; return &folio->page;
err: err:
return NULL; return NULL;
...@@ -2475,12 +2478,13 @@ struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, ...@@ -2475,12 +2478,13 @@ struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
{ {
spin_lock_irq(&hugetlb_lock); spin_lock_irq(&hugetlb_lock);
if (available_huge_pages(h)) { if (available_huge_pages(h)) {
struct page *page; struct folio *folio;
page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask); folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
if (page) { preferred_nid, nmask);
if (folio) {
spin_unlock_irq(&hugetlb_lock); spin_unlock_irq(&hugetlb_lock);
return page; return &folio->page;
} }
} }
spin_unlock_irq(&hugetlb_lock); spin_unlock_irq(&hugetlb_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment