Commit e37d3e83 authored by Sidhartha Kumar's avatar Sidhartha Kumar Committed by Andrew Morton

mm/hugetlb: convert alloc_migrate_huge_page to folios

Change alloc_huge_page_nodemask() to alloc_hugetlb_folio_nodemask() and
alloc_migrate_huge_page() to alloc_migrate_hugetlb_folio().  Both
functions now return a folio rather than a page.

Link: https://lkml.kernel.org/r/20230113223057.173292-7-sidhartha.kumar@oracle.comSigned-off-by: default avatarSidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent ff7d853b
...@@ -719,7 +719,7 @@ struct huge_bootmem_page { ...@@ -719,7 +719,7 @@ struct huge_bootmem_page {
int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
struct page *alloc_huge_page(struct vm_area_struct *vma, struct page *alloc_huge_page(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve); unsigned long addr, int avoid_reserve);
struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask); nodemask_t *nmask, gfp_t gfp_mask);
struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
unsigned long address); unsigned long address);
...@@ -1040,8 +1040,8 @@ static inline struct page *alloc_huge_page(struct vm_area_struct *vma, ...@@ -1040,8 +1040,8 @@ static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
return NULL; return NULL;
} }
static inline struct page * static inline struct folio *
alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask) nodemask_t *nmask, gfp_t gfp_mask)
{ {
return NULL; return NULL;
......
...@@ -2419,7 +2419,7 @@ static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h, ...@@ -2419,7 +2419,7 @@ static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
return folio; return folio;
} }
static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask,
int nid, nodemask_t *nmask) int nid, nodemask_t *nmask)
{ {
struct folio *folio; struct folio *folio;
...@@ -2439,7 +2439,7 @@ static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, ...@@ -2439,7 +2439,7 @@ static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
*/ */
folio_set_hugetlb_temporary(folio); folio_set_hugetlb_temporary(folio);
return &folio->page; return folio;
} }
/* /*
...@@ -2472,8 +2472,8 @@ struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h, ...@@ -2472,8 +2472,8 @@ struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
return folio; return folio;
} }
/* page migration callback function */ /* folio migration callback function */
struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask) nodemask_t *nmask, gfp_t gfp_mask)
{ {
spin_lock_irq(&hugetlb_lock); spin_lock_irq(&hugetlb_lock);
...@@ -2484,12 +2484,12 @@ struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, ...@@ -2484,12 +2484,12 @@ struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
preferred_nid, nmask); preferred_nid, nmask);
if (folio) { if (folio) {
spin_unlock_irq(&hugetlb_lock); spin_unlock_irq(&hugetlb_lock);
return &folio->page; return folio;
} }
} }
spin_unlock_irq(&hugetlb_lock); spin_unlock_irq(&hugetlb_lock);
return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask); return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask);
} }
/* mempolicy aware migration callback */ /* mempolicy aware migration callback */
...@@ -2498,16 +2498,16 @@ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, ...@@ -2498,16 +2498,16 @@ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
{ {
struct mempolicy *mpol; struct mempolicy *mpol;
nodemask_t *nodemask; nodemask_t *nodemask;
struct page *page; struct folio *folio;
gfp_t gfp_mask; gfp_t gfp_mask;
int node; int node;
gfp_mask = htlb_alloc_mask(h); gfp_mask = htlb_alloc_mask(h);
node = huge_node(vma, address, gfp_mask, &mpol, &nodemask); node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask); folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask);
mpol_cond_put(mpol); mpol_cond_put(mpol);
return page; return &folio->page;
} }
/* /*
......
...@@ -1663,6 +1663,7 @@ struct page *alloc_migration_target(struct page *page, unsigned long private) ...@@ -1663,6 +1663,7 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
struct migration_target_control *mtc; struct migration_target_control *mtc;
gfp_t gfp_mask; gfp_t gfp_mask;
unsigned int order = 0; unsigned int order = 0;
struct folio *hugetlb_folio = NULL;
struct folio *new_folio = NULL; struct folio *new_folio = NULL;
int nid; int nid;
int zidx; int zidx;
...@@ -1677,7 +1678,9 @@ struct page *alloc_migration_target(struct page *page, unsigned long private) ...@@ -1677,7 +1678,9 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
struct hstate *h = folio_hstate(folio); struct hstate *h = folio_hstate(folio);
gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask); hugetlb_folio = alloc_hugetlb_folio_nodemask(h, nid,
mtc->nmask, gfp_mask);
return &hugetlb_folio->page;
} }
if (folio_test_large(folio)) { if (folio_test_large(folio)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment