Commit 94dc8bff authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm: return the folio from swapin_readahead

The unuse_pte_range() caller only wants the folio while do_swap_page()
wants both the page and the folio.  Since do_swap_page() already has logic
for handling both the folio and the page, move the folio-to-page logic
there.  This also lets us allocate larger folios in the SWP_SYNCHRONOUS_IO
path in future.

Link: https://lkml.kernel.org/r/20240807193734.1865400-1-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 09022bc1
...@@ -4091,7 +4091,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) ...@@ -4091,7 +4091,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
/* skip swapcache */ /* skip swapcache */
folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
vma, vmf->address, false); vma, vmf->address, false);
page = &folio->page;
if (folio) { if (folio) {
__folio_set_locked(folio); __folio_set_locked(folio);
__folio_set_swapbacked(folio); __folio_set_swapbacked(folio);
...@@ -4116,10 +4115,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) ...@@ -4116,10 +4115,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
folio->private = NULL; folio->private = NULL;
} }
} else { } else {
page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
vmf); vmf);
if (page)
folio = page_folio(page);
swapcache = folio; swapcache = folio;
} }
...@@ -4140,6 +4137,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) ...@@ -4140,6 +4137,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
ret = VM_FAULT_MAJOR; ret = VM_FAULT_MAJOR;
count_vm_event(PGMAJFAULT); count_vm_event(PGMAJFAULT);
count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
page = folio_file_page(folio, swp_offset(entry));
} else if (PageHWPoison(page)) { } else if (PageHWPoison(page)) {
/* /*
* hwpoisoned dirty swapcache pages are kept for killing * hwpoisoned dirty swapcache pages are kept for killing
......
...@@ -73,7 +73,7 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_flags, ...@@ -73,7 +73,7 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_flags,
bool skip_if_exists); bool skip_if_exists);
struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t flag, struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
struct mempolicy *mpol, pgoff_t ilx); struct mempolicy *mpol, pgoff_t ilx);
struct page *swapin_readahead(swp_entry_t entry, gfp_t flag, struct folio *swapin_readahead(swp_entry_t entry, gfp_t flag,
struct vm_fault *vmf); struct vm_fault *vmf);
static inline unsigned int folio_swap_flags(struct folio *folio) static inline unsigned int folio_swap_flags(struct folio *folio)
...@@ -109,7 +109,7 @@ static inline struct folio *swap_cluster_readahead(swp_entry_t entry, ...@@ -109,7 +109,7 @@ static inline struct folio *swap_cluster_readahead(swp_entry_t entry,
return NULL; return NULL;
} }
static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, static inline struct folio *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
struct vm_fault *vmf) struct vm_fault *vmf)
{ {
return NULL; return NULL;
......
...@@ -863,13 +863,13 @@ static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask, ...@@ -863,13 +863,13 @@ static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
* @gfp_mask: memory allocation flags * @gfp_mask: memory allocation flags
* @vmf: fault information * @vmf: fault information
* *
* Returns the struct page for entry and addr, after queueing swapin. * Returns the struct folio for entry and addr, after queueing swapin.
* *
* It's a main entry function for swap readahead. By the configuration, * It's a main entry function for swap readahead. By the configuration,
* it will read ahead blocks by cluster-based(ie, physical disk based) * it will read ahead blocks by cluster-based(ie, physical disk based)
* or vma-based(ie, virtual address based on faulty address) readahead. * or vma-based(ie, virtual address based on faulty address) readahead.
*/ */
struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, struct folio *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
struct vm_fault *vmf) struct vm_fault *vmf)
{ {
struct mempolicy *mpol; struct mempolicy *mpol;
...@@ -882,9 +882,7 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, ...@@ -882,9 +882,7 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
swap_cluster_readahead(entry, gfp_mask, mpol, ilx); swap_cluster_readahead(entry, gfp_mask, mpol, ilx);
mpol_cond_put(mpol); mpol_cond_put(mpol);
if (!folio) return folio;
return NULL;
return folio_file_page(folio, swp_offset(entry));
} }
#ifdef CONFIG_SYSFS #ifdef CONFIG_SYSFS
......
...@@ -1980,7 +1980,6 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -1980,7 +1980,6 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
folio = swap_cache_get_folio(entry, vma, addr); folio = swap_cache_get_folio(entry, vma, addr);
if (!folio) { if (!folio) {
struct page *page;
struct vm_fault vmf = { struct vm_fault vmf = {
.vma = vma, .vma = vma,
.address = addr, .address = addr,
...@@ -1988,10 +1987,8 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -1988,10 +1987,8 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
.pmd = pmd, .pmd = pmd,
}; };
page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
&vmf); &vmf);
if (page)
folio = page_folio(page);
} }
if (!folio) { if (!folio) {
swp_count = READ_ONCE(si->swap_map[offset]); swp_count = READ_ONCE(si->swap_map[offset]);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment