Commit 63ad4add authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm: convert do_swap_page() to use a folio

Removes quite a lot of calls to compound_head().

Link: https://lkml.kernel.org/r/20220902194653.1739778-15-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 4081f744
...@@ -3724,6 +3724,7 @@ static vm_fault_t handle_pte_marker(struct vm_fault *vmf) ...@@ -3724,6 +3724,7 @@ static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
vm_fault_t do_swap_page(struct vm_fault *vmf) vm_fault_t do_swap_page(struct vm_fault *vmf)
{ {
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
struct folio *folio;
struct page *page = NULL, *swapcache; struct page *page = NULL, *swapcache;
struct swap_info_struct *si = NULL; struct swap_info_struct *si = NULL;
rmap_t rmap_flags = RMAP_NONE; rmap_t rmap_flags = RMAP_NONE;
...@@ -3768,19 +3769,23 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) ...@@ -3768,19 +3769,23 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
page = lookup_swap_cache(entry, vma, vmf->address); page = lookup_swap_cache(entry, vma, vmf->address);
swapcache = page; swapcache = page;
if (page)
folio = page_folio(page);
if (!page) { if (!page) {
if (data_race(si->flags & SWP_SYNCHRONOUS_IO) && if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
__swap_count(entry) == 1) { __swap_count(entry) == 1) {
/* skip swapcache */ /* skip swapcache */
page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
vmf->address); vma, vmf->address, false);
if (page) { page = &folio->page;
__SetPageLocked(page); if (folio) {
__SetPageSwapBacked(page); __folio_set_locked(folio);
__folio_set_swapbacked(folio);
if (mem_cgroup_swapin_charge_page(page, if (mem_cgroup_swapin_charge_page(page,
vma->vm_mm, GFP_KERNEL, entry)) { vma->vm_mm, GFP_KERNEL,
entry)) {
ret = VM_FAULT_OOM; ret = VM_FAULT_OOM;
goto out_page; goto out_page;
} }
...@@ -3788,20 +3793,21 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) ...@@ -3788,20 +3793,21 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
shadow = get_shadow_from_swap_cache(entry); shadow = get_shadow_from_swap_cache(entry);
if (shadow) if (shadow)
workingset_refault(page_folio(page), workingset_refault(folio, shadow);
shadow);
lru_cache_add(page); folio_add_lru(folio);
/* To provide entry to swap_readpage() */ /* To provide entry to swap_readpage() */
set_page_private(page, entry.val); folio_set_swap_entry(folio, entry);
swap_readpage(page, true, NULL); swap_readpage(page, true, NULL);
set_page_private(page, 0); folio->private = NULL;
} }
} else { } else {
page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
vmf); vmf);
swapcache = page; swapcache = page;
if (page)
folio = page_folio(page);
} }
if (!page) { if (!page) {
...@@ -3844,7 +3850,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) ...@@ -3844,7 +3850,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
* swapcache, we need to check that the page's swap has not * swapcache, we need to check that the page's swap has not
* changed. * changed.
*/ */
if (unlikely(!PageSwapCache(page) || if (unlikely(!folio_test_swapcache(folio) ||
page_private(page) != entry.val)) page_private(page) != entry.val))
goto out_page; goto out_page;
...@@ -3859,6 +3865,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) ...@@ -3859,6 +3865,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
page = swapcache; page = swapcache;
goto out_page; goto out_page;
} }
folio = page_folio(page);
/* /*
* If we want to map a page that's in the swapcache writable, we * If we want to map a page that's in the swapcache writable, we
...@@ -3867,7 +3874,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) ...@@ -3867,7 +3874,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
* pagevecs if required. * pagevecs if required.
*/ */
if ((vmf->flags & FAULT_FLAG_WRITE) && page == swapcache && if ((vmf->flags & FAULT_FLAG_WRITE) && page == swapcache &&
!PageKsm(page) && !PageLRU(page)) !folio_test_ksm(folio) && !folio_test_lru(folio))
lru_add_drain(); lru_add_drain();
} }
...@@ -3881,7 +3888,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) ...@@ -3881,7 +3888,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
goto out_nomap; goto out_nomap;
if (unlikely(!PageUptodate(page))) { if (unlikely(!folio_test_uptodate(folio))) {
ret = VM_FAULT_SIGBUS; ret = VM_FAULT_SIGBUS;
goto out_nomap; goto out_nomap;
} }
...@@ -3894,14 +3901,14 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) ...@@ -3894,14 +3901,14 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
* check after taking the PT lock and making sure that nobody * check after taking the PT lock and making sure that nobody
* concurrently faulted in this page and set PG_anon_exclusive. * concurrently faulted in this page and set PG_anon_exclusive.
*/ */
BUG_ON(!PageAnon(page) && PageMappedToDisk(page)); BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio));
BUG_ON(PageAnon(page) && PageAnonExclusive(page)); BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page));
/* /*
* Check under PT lock (to protect against concurrent fork() sharing * Check under PT lock (to protect against concurrent fork() sharing
* the swap entry concurrently) for certainly exclusive pages. * the swap entry concurrently) for certainly exclusive pages.
*/ */
if (!PageKsm(page)) { if (!folio_test_ksm(folio)) {
/* /*
* Note that pte_swp_exclusive() == false for architectures * Note that pte_swp_exclusive() == false for architectures
* without __HAVE_ARCH_PTE_SWP_EXCLUSIVE. * without __HAVE_ARCH_PTE_SWP_EXCLUSIVE.
...@@ -3913,7 +3920,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) ...@@ -3913,7 +3920,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
* swapcache -> certainly exclusive. * swapcache -> certainly exclusive.
*/ */
exclusive = true; exclusive = true;
} else if (exclusive && PageWriteback(page) && } else if (exclusive && folio_test_writeback(folio) &&
data_race(si->flags & SWP_STABLE_WRITES)) { data_race(si->flags & SWP_STABLE_WRITES)) {
/* /*
* This is tricky: not all swap backends support * This is tricky: not all swap backends support
...@@ -3956,7 +3963,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) ...@@ -3956,7 +3963,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
* exposing them to the swapcache or because the swap entry indicates * exposing them to the swapcache or because the swap entry indicates
* exclusivity. * exclusivity.
*/ */
if (!PageKsm(page) && (exclusive || page_count(page) == 1)) { if (!folio_test_ksm(folio) &&
(exclusive || folio_ref_count(folio) == 1)) {
if (vmf->flags & FAULT_FLAG_WRITE) { if (vmf->flags & FAULT_FLAG_WRITE) {
pte = maybe_mkwrite(pte_mkdirty(pte), vma); pte = maybe_mkwrite(pte_mkdirty(pte), vma);
vmf->flags &= ~FAULT_FLAG_WRITE; vmf->flags &= ~FAULT_FLAG_WRITE;
...@@ -3976,16 +3984,17 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) ...@@ -3976,16 +3984,17 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
/* ksm created a completely new copy */ /* ksm created a completely new copy */
if (unlikely(page != swapcache && swapcache)) { if (unlikely(page != swapcache && swapcache)) {
page_add_new_anon_rmap(page, vma, vmf->address); page_add_new_anon_rmap(page, vma, vmf->address);
lru_cache_add_inactive_or_unevictable(page, vma); folio_add_lru_vma(folio, vma);
} else { } else {
page_add_anon_rmap(page, vma, vmf->address, rmap_flags); page_add_anon_rmap(page, vma, vmf->address, rmap_flags);
} }
VM_BUG_ON(!PageAnon(page) || (pte_write(pte) && !PageAnonExclusive(page))); VM_BUG_ON(!folio_test_anon(folio) ||
(pte_write(pte) && !PageAnonExclusive(page)));
set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte); arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
unlock_page(page); folio_unlock(folio);
if (page != swapcache && swapcache) { if (page != swapcache && swapcache) {
/* /*
* Hold the lock to avoid the swap entry to be reused * Hold the lock to avoid the swap entry to be reused
...@@ -4017,9 +4026,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) ...@@ -4017,9 +4026,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
out_nomap: out_nomap:
pte_unmap_unlock(vmf->pte, vmf->ptl); pte_unmap_unlock(vmf->pte, vmf->ptl);
out_page: out_page:
unlock_page(page); folio_unlock(folio);
out_release: out_release:
put_page(page); folio_put(folio);
if (page != swapcache && swapcache) { if (page != swapcache && swapcache) {
unlock_page(swapcache); unlock_page(swapcache);
put_page(swapcache); put_page(swapcache);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment