Commit 2fad3d14 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

huge_memory: convert do_huge_pmd_wp_page() to use a folio

Removes many calls to compound_head().  Does not remove the assumption
that a folio may not be larger than a PMD.

Link: https://lkml.kernel.org/r/20220902194653.1739778-43-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent e4a2ed94
...@@ -1305,6 +1305,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf) ...@@ -1305,6 +1305,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
{ {
const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
struct folio *folio;
struct page *page; struct page *page;
unsigned long haddr = vmf->address & HPAGE_PMD_MASK; unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
pmd_t orig_pmd = vmf->orig_pmd; pmd_t orig_pmd = vmf->orig_pmd;
...@@ -1326,46 +1327,48 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf) ...@@ -1326,46 +1327,48 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
} }
page = pmd_page(orig_pmd); page = pmd_page(orig_pmd);
folio = page_folio(page);
VM_BUG_ON_PAGE(!PageHead(page), page); VM_BUG_ON_PAGE(!PageHead(page), page);
/* Early check when only holding the PT lock. */ /* Early check when only holding the PT lock. */
if (PageAnonExclusive(page)) if (PageAnonExclusive(page))
goto reuse; goto reuse;
if (!trylock_page(page)) { if (!folio_trylock(folio)) {
get_page(page); folio_get(folio);
spin_unlock(vmf->ptl); spin_unlock(vmf->ptl);
lock_page(page); folio_lock(folio);
spin_lock(vmf->ptl); spin_lock(vmf->ptl);
if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
spin_unlock(vmf->ptl); spin_unlock(vmf->ptl);
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
return 0; return 0;
} }
put_page(page); folio_put(folio);
} }
/* Recheck after temporarily dropping the PT lock. */ /* Recheck after temporarily dropping the PT lock. */
if (PageAnonExclusive(page)) { if (PageAnonExclusive(page)) {
unlock_page(page); folio_unlock(folio);
goto reuse; goto reuse;
} }
/* /*
* See do_wp_page(): we can only reuse the page exclusively if there are * See do_wp_page(): we can only reuse the folio exclusively if
* no additional references. Note that we always drain the LRU * there are no additional references. Note that we always drain
* pagevecs immediately after adding a THP. * the LRU pagevecs immediately after adding a THP.
*/ */
if (page_count(page) > 1 + PageSwapCache(page) * thp_nr_pages(page)) if (folio_ref_count(folio) >
1 + folio_test_swapcache(folio) * folio_nr_pages(folio))
goto unlock_fallback; goto unlock_fallback;
if (PageSwapCache(page)) if (folio_test_swapcache(folio))
try_to_free_swap(page); folio_free_swap(folio);
if (page_count(page) == 1) { if (folio_ref_count(folio) == 1) {
pmd_t entry; pmd_t entry;
page_move_anon_rmap(page, vma); page_move_anon_rmap(page, vma);
unlock_page(page); folio_unlock(folio);
reuse: reuse:
if (unlikely(unshare)) { if (unlikely(unshare)) {
spin_unlock(vmf->ptl); spin_unlock(vmf->ptl);
...@@ -1380,7 +1383,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf) ...@@ -1380,7 +1383,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
} }
unlock_fallback: unlock_fallback:
unlock_page(page); folio_unlock(folio);
spin_unlock(vmf->ptl); spin_unlock(vmf->ptl);
fallback: fallback:
__split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment