Commit e28833bc authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm: convert do_huge_pmd_anonymous_page to huge_zero_folio

Use folios more widely.

Link: https://lkml.kernel.org/r/20240326202833.523759-7-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 5691753d
...@@ -971,14 +971,14 @@ gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma) ...@@ -971,14 +971,14 @@ gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma)
} }
/* Caller must hold page table lock. */ /* Caller must hold page table lock. */
static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, static void set_huge_zero_folio(pgtable_t pgtable, struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
struct page *zero_page) struct folio *zero_folio)
{ {
pmd_t entry; pmd_t entry;
if (!pmd_none(*pmd)) if (!pmd_none(*pmd))
return; return;
entry = mk_pmd(zero_page, vma->vm_page_prot); entry = mk_pmd(&zero_folio->page, vma->vm_page_prot);
entry = pmd_mkhuge(entry); entry = pmd_mkhuge(entry);
pgtable_trans_huge_deposit(mm, pmd, pgtable); pgtable_trans_huge_deposit(mm, pmd, pgtable);
set_pmd_at(mm, haddr, pmd, entry); set_pmd_at(mm, haddr, pmd, entry);
...@@ -1002,13 +1002,14 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) ...@@ -1002,13 +1002,14 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
!mm_forbids_zeropage(vma->vm_mm) && !mm_forbids_zeropage(vma->vm_mm) &&
transparent_hugepage_use_zero_page()) { transparent_hugepage_use_zero_page()) {
pgtable_t pgtable; pgtable_t pgtable;
struct page *zero_page; struct folio *zero_folio;
vm_fault_t ret; vm_fault_t ret;
pgtable = pte_alloc_one(vma->vm_mm); pgtable = pte_alloc_one(vma->vm_mm);
if (unlikely(!pgtable)) if (unlikely(!pgtable))
return VM_FAULT_OOM; return VM_FAULT_OOM;
zero_page = mm_get_huge_zero_page(vma->vm_mm); zero_folio = mm_get_huge_zero_folio(vma->vm_mm);
if (unlikely(!zero_page)) { if (unlikely(!zero_folio)) {
pte_free(vma->vm_mm, pgtable); pte_free(vma->vm_mm, pgtable);
count_vm_event(THP_FAULT_FALLBACK); count_vm_event(THP_FAULT_FALLBACK);
return VM_FAULT_FALLBACK; return VM_FAULT_FALLBACK;
...@@ -1026,8 +1027,8 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) ...@@ -1026,8 +1027,8 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
ret = handle_userfault(vmf, VM_UFFD_MISSING); ret = handle_userfault(vmf, VM_UFFD_MISSING);
VM_BUG_ON(ret & VM_FAULT_FALLBACK); VM_BUG_ON(ret & VM_FAULT_FALLBACK);
} else { } else {
set_huge_zero_page(pgtable, vma->vm_mm, vma, set_huge_zero_folio(pgtable, vma->vm_mm, vma,
haddr, vmf->pmd, zero_page); haddr, vmf->pmd, zero_folio);
update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
spin_unlock(vmf->ptl); spin_unlock(vmf->ptl);
} }
...@@ -1336,9 +1337,9 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, ...@@ -1336,9 +1337,9 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
*/ */
if (is_huge_zero_pmd(pmd)) { if (is_huge_zero_pmd(pmd)) {
/* /*
* get_huge_zero_page() will never allocate a new page here, * mm_get_huge_zero_folio() will never allocate a new
* since we already have a zero page to copy. It just takes a * folio here, since we already have a zero page to
* reference. * copy. It just takes a reference.
*/ */
mm_get_huge_zero_folio(dst_mm); mm_get_huge_zero_folio(dst_mm);
goto out_zero_page; goto out_zero_page;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment