Commit f102cd8b authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

swapfile: convert unuse_pte_range() to use a folio

Delay fetching the precise page from the folio until we're in unuse_pte().
Saves many calls to compound_head().

Link: https://lkml.kernel.org/r/20220902194653.1739778-37-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 2c3f6194
...@@ -1758,8 +1758,9 @@ static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte) ...@@ -1758,8 +1758,9 @@ static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
* force COW, vm_page_prot omits write permission from any private vma. * force COW, vm_page_prot omits write permission from any private vma.
*/ */
static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, swp_entry_t entry, struct page *page) unsigned long addr, swp_entry_t entry, struct folio *folio)
{ {
struct page *page = folio_file_page(folio, swp_offset(entry));
struct page *swapcache; struct page *swapcache;
spinlock_t *ptl; spinlock_t *ptl;
pte_t *pte, new_pte; pte_t *pte, new_pte;
...@@ -1831,17 +1832,18 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -1831,17 +1832,18 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end, unsigned long addr, unsigned long end,
unsigned int type) unsigned int type)
{ {
struct page *page;
swp_entry_t entry; swp_entry_t entry;
pte_t *pte; pte_t *pte;
struct swap_info_struct *si; struct swap_info_struct *si;
unsigned long offset;
int ret = 0; int ret = 0;
volatile unsigned char *swap_map; volatile unsigned char *swap_map;
si = swap_info[type]; si = swap_info[type];
pte = pte_offset_map(pmd, addr); pte = pte_offset_map(pmd, addr);
do { do {
struct folio *folio;
unsigned long offset;
if (!is_swap_pte(*pte)) if (!is_swap_pte(*pte))
continue; continue;
...@@ -1852,8 +1854,9 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -1852,8 +1854,9 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
offset = swp_offset(entry); offset = swp_offset(entry);
pte_unmap(pte); pte_unmap(pte);
swap_map = &si->swap_map[offset]; swap_map = &si->swap_map[offset];
page = lookup_swap_cache(entry, vma, addr); folio = swap_cache_get_folio(entry, vma, addr);
if (!page) { if (!folio) {
struct page *page;
struct vm_fault vmf = { struct vm_fault vmf = {
.vma = vma, .vma = vma,
.address = addr, .address = addr,
...@@ -1863,25 +1866,27 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -1863,25 +1866,27 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
&vmf); &vmf);
if (page)
folio = page_folio(page);
} }
if (!page) { if (!folio) {
if (*swap_map == 0 || *swap_map == SWAP_MAP_BAD) if (*swap_map == 0 || *swap_map == SWAP_MAP_BAD)
goto try_next; goto try_next;
return -ENOMEM; return -ENOMEM;
} }
lock_page(page); folio_lock(folio);
wait_on_page_writeback(page); folio_wait_writeback(folio);
ret = unuse_pte(vma, pmd, addr, entry, page); ret = unuse_pte(vma, pmd, addr, entry, folio);
if (ret < 0) { if (ret < 0) {
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
goto out; goto out;
} }
try_to_free_swap(page); folio_free_swap(folio);
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
try_next: try_next:
pte = pte_offset_map(pmd, addr); pte = pte_offset_map(pmd, addr);
} while (pte++, addr += PAGE_SIZE, addr != end); } while (pte++, addr += PAGE_SIZE, addr != end);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment