Commit ebc20dca authored by Muchun Song's avatar Muchun Song Committed by Andrew Morton

mm: hugetlb_vmemmap: convert page to folio

There are still some places where it does not be converted to folio, this
patch convert all of them to folio.  And this patch also does some trival
cleanup to fix the code style problems.

Link: https://lkml.kernel.org/r/20231127084645.27017-5-songmuchun@bytedance.comSigned-off-by: default avatarMuchun Song <songmuchun@bytedance.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent be035a2a
...@@ -447,14 +447,14 @@ EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key); ...@@ -447,14 +447,14 @@ EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON); static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
core_param(hugetlb_free_vmemmap, vmemmap_optimize_enabled, bool, 0); core_param(hugetlb_free_vmemmap, vmemmap_optimize_enabled, bool, 0);
static int __hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio, unsigned long flags) static int __hugetlb_vmemmap_restore_folio(const struct hstate *h,
struct folio *folio, unsigned long flags)
{ {
int ret; int ret;
struct page *head = &folio->page; unsigned long vmemmap_start = (unsigned long)&folio->page, vmemmap_end;
unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
unsigned long vmemmap_reuse; unsigned long vmemmap_reuse;
VM_WARN_ON_ONCE(!PageHuge(head)); VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(folio), folio);
if (!folio_test_hugetlb_vmemmap_optimized(folio)) if (!folio_test_hugetlb_vmemmap_optimized(folio))
return 0; return 0;
...@@ -535,9 +535,9 @@ long hugetlb_vmemmap_restore_folios(const struct hstate *h, ...@@ -535,9 +535,9 @@ long hugetlb_vmemmap_restore_folios(const struct hstate *h,
} }
/* Return true iff a HugeTLB whose vmemmap should and can be optimized. */ /* Return true iff a HugeTLB whose vmemmap should and can be optimized. */
static bool vmemmap_should_optimize(const struct hstate *h, const struct page *head) static bool vmemmap_should_optimize_folio(const struct hstate *h, struct folio *folio)
{ {
if (HPageVmemmapOptimized((struct page *)head)) if (folio_test_hugetlb_vmemmap_optimized(folio))
return false; return false;
if (!READ_ONCE(vmemmap_optimize_enabled)) if (!READ_ONCE(vmemmap_optimize_enabled))
...@@ -555,12 +555,11 @@ static int __hugetlb_vmemmap_optimize_folio(const struct hstate *h, ...@@ -555,12 +555,11 @@ static int __hugetlb_vmemmap_optimize_folio(const struct hstate *h,
unsigned long flags) unsigned long flags)
{ {
int ret = 0; int ret = 0;
struct page *head = &folio->page; unsigned long vmemmap_start = (unsigned long)&folio->page, vmemmap_end;
unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
unsigned long vmemmap_reuse; unsigned long vmemmap_reuse;
VM_WARN_ON_ONCE(!PageHuge(head)); VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(folio), folio);
if (!vmemmap_should_optimize(h, head)) if (!vmemmap_should_optimize_folio(h, folio))
return ret; return ret;
static_branch_inc(&hugetlb_optimize_vmemmap_key); static_branch_inc(&hugetlb_optimize_vmemmap_key);
...@@ -615,12 +614,12 @@ void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio) ...@@ -615,12 +614,12 @@ void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio)
free_vmemmap_page_list(&vmemmap_pages); free_vmemmap_page_list(&vmemmap_pages);
} }
static int hugetlb_vmemmap_split(const struct hstate *h, struct page *head) static int hugetlb_vmemmap_split_folio(const struct hstate *h, struct folio *folio)
{ {
unsigned long vmemmap_start = (unsigned long)head, vmemmap_end; unsigned long vmemmap_start = (unsigned long)&folio->page, vmemmap_end;
unsigned long vmemmap_reuse; unsigned long vmemmap_reuse;
if (!vmemmap_should_optimize(h, head)) if (!vmemmap_should_optimize_folio(h, folio))
return 0; return 0;
vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h); vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h);
...@@ -640,7 +639,7 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l ...@@ -640,7 +639,7 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
LIST_HEAD(vmemmap_pages); LIST_HEAD(vmemmap_pages);
list_for_each_entry(folio, folio_list, lru) { list_for_each_entry(folio, folio_list, lru) {
int ret = hugetlb_vmemmap_split(h, &folio->page); int ret = hugetlb_vmemmap_split_folio(h, folio);
/* /*
* Spliting the PMD requires allocating a page, thus lets fail * Spliting the PMD requires allocating a page, thus lets fail
...@@ -655,8 +654,9 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l ...@@ -655,8 +654,9 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
flush_tlb_all(); flush_tlb_all();
list_for_each_entry(folio, folio_list, lru) { list_for_each_entry(folio, folio_list, lru) {
int ret = __hugetlb_vmemmap_optimize_folio(h, folio, int ret;
&vmemmap_pages,
ret = __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages,
VMEMMAP_REMAP_NO_TLB_FLUSH); VMEMMAP_REMAP_NO_TLB_FLUSH);
/* /*
...@@ -671,8 +671,7 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l ...@@ -671,8 +671,7 @@ void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_l
flush_tlb_all(); flush_tlb_all();
free_vmemmap_page_list(&vmemmap_pages); free_vmemmap_page_list(&vmemmap_pages);
INIT_LIST_HEAD(&vmemmap_pages); INIT_LIST_HEAD(&vmemmap_pages);
__hugetlb_vmemmap_optimize_folio(h, folio, __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages,
&vmemmap_pages,
VMEMMAP_REMAP_NO_TLB_FLUSH); VMEMMAP_REMAP_NO_TLB_FLUSH);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment