Commit e4a2ed94 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm: convert do_wp_page() to use a folio

Saves many calls to compound_head().

Link: https://lkml.kernel.org/r/20220902194653.1739778-42-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 71fa1a53
...@@ -3368,6 +3368,7 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) ...@@ -3368,6 +3368,7 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
{ {
const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
struct folio *folio;
VM_BUG_ON(unshare && (vmf->flags & FAULT_FLAG_WRITE)); VM_BUG_ON(unshare && (vmf->flags & FAULT_FLAG_WRITE));
VM_BUG_ON(!unshare && !(vmf->flags & FAULT_FLAG_WRITE)); VM_BUG_ON(!unshare && !(vmf->flags & FAULT_FLAG_WRITE));
...@@ -3414,48 +3415,47 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) ...@@ -3414,48 +3415,47 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
* Take out anonymous pages first, anonymous shared vmas are * Take out anonymous pages first, anonymous shared vmas are
* not dirty accountable. * not dirty accountable.
*/ */
if (PageAnon(vmf->page)) { folio = page_folio(vmf->page);
struct page *page = vmf->page; if (folio_test_anon(folio)) {
/* /*
* If the page is exclusive to this process we must reuse the * If the page is exclusive to this process we must reuse the
* page without further checks. * page without further checks.
*/ */
if (PageAnonExclusive(page)) if (PageAnonExclusive(vmf->page))
goto reuse; goto reuse;
/* /*
* We have to verify under page lock: these early checks are * We have to verify under folio lock: these early checks are
* just an optimization to avoid locking the page and freeing * just an optimization to avoid locking the folio and freeing
* the swapcache if there is little hope that we can reuse. * the swapcache if there is little hope that we can reuse.
* *
* PageKsm() doesn't necessarily raise the page refcount. * KSM doesn't necessarily raise the folio refcount.
*/ */
if (PageKsm(page) || page_count(page) > 3) if (folio_test_ksm(folio) || folio_ref_count(folio) > 3)
goto copy; goto copy;
if (!PageLRU(page)) if (!folio_test_lru(folio))
/* /*
* Note: We cannot easily detect+handle references from * Note: We cannot easily detect+handle references from
* remote LRU pagevecs or references to PageLRU() pages. * remote LRU pagevecs or references to LRU folios.
*/ */
lru_add_drain(); lru_add_drain();
if (page_count(page) > 1 + PageSwapCache(page)) if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio))
goto copy; goto copy;
if (!trylock_page(page)) if (!folio_trylock(folio))
goto copy; goto copy;
if (PageSwapCache(page)) if (folio_test_swapcache(folio))
try_to_free_swap(page); folio_free_swap(folio);
if (PageKsm(page) || page_count(page) != 1) { if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) {
unlock_page(page); folio_unlock(folio);
goto copy; goto copy;
} }
/* /*
* Ok, we've got the only page reference from our mapping * Ok, we've got the only folio reference from our mapping
* and the page is locked, it's dark out, and we're wearing * and the folio is locked, it's dark out, and we're wearing
* sunglasses. Hit it. * sunglasses. Hit it.
*/ */
page_move_anon_rmap(page, vma); page_move_anon_rmap(vmf->page, vma);
unlock_page(page); folio_unlock(folio);
reuse: reuse:
if (unlikely(unshare)) { if (unlikely(unshare)) {
pte_unmap_unlock(vmf->pte, vmf->ptl); pte_unmap_unlock(vmf->pte, vmf->ptl);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment