Commit a0d3374b authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm/swap: convert __read_swap_cache_async() to use a folio

Remove a few hidden (and one visible) calls to compound_head().

Link: https://lkml.kernel.org/r/20220902194653.1739778-12-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent bdb0ed54
...@@ -411,7 +411,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, ...@@ -411,7 +411,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
bool *new_page_allocated) bool *new_page_allocated)
{ {
struct swap_info_struct *si; struct swap_info_struct *si;
struct page *page; struct folio *folio;
void *shadow = NULL; void *shadow = NULL;
*new_page_allocated = false; *new_page_allocated = false;
...@@ -426,11 +426,11 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, ...@@ -426,11 +426,11 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
si = get_swap_device(entry); si = get_swap_device(entry);
if (!si) if (!si)
return NULL; return NULL;
page = find_get_page(swap_address_space(entry), folio = filemap_get_folio(swap_address_space(entry),
swp_offset(entry)); swp_offset(entry));
put_swap_device(si); put_swap_device(si);
if (page) if (folio)
return page; return folio_file_page(folio, swp_offset(entry));
/* /*
* Just skip read ahead for unused swap slot. * Just skip read ahead for unused swap slot.
...@@ -448,8 +448,8 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, ...@@ -448,8 +448,8 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* before marking swap_map SWAP_HAS_CACHE, when -EEXIST will * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
* cause any racers to loop around until we add it to cache. * cause any racers to loop around until we add it to cache.
*/ */
page = alloc_page_vma(gfp_mask, vma, addr); folio = vma_alloc_folio(gfp_mask, 0, vma, addr, false);
if (!page) if (!folio)
return NULL; return NULL;
/* /*
...@@ -459,7 +459,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, ...@@ -459,7 +459,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
if (!err) if (!err)
break; break;
put_page(page); folio_put(folio);
if (err != -EEXIST) if (err != -EEXIST)
return NULL; return NULL;
...@@ -477,30 +477,30 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, ...@@ -477,30 +477,30 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* The swap entry is ours to swap in. Prepare the new page. * The swap entry is ours to swap in. Prepare the new page.
*/ */
__SetPageLocked(page); __folio_set_locked(folio);
__SetPageSwapBacked(page); __folio_set_swapbacked(folio);
if (mem_cgroup_swapin_charge_page(page, NULL, gfp_mask, entry)) if (mem_cgroup_swapin_charge_page(&folio->page, NULL, gfp_mask, entry))
goto fail_unlock; goto fail_unlock;
/* May fail (-ENOMEM) if XArray node allocation failed. */ /* May fail (-ENOMEM) if XArray node allocation failed. */
if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) if (add_to_swap_cache(&folio->page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
goto fail_unlock; goto fail_unlock;
mem_cgroup_swapin_uncharge_swap(entry); mem_cgroup_swapin_uncharge_swap(entry);
if (shadow) if (shadow)
workingset_refault(page_folio(page), shadow); workingset_refault(folio, shadow);
/* Caller will initiate read into locked page */ /* Caller will initiate read into locked folio */
lru_cache_add(page); folio_add_lru(folio);
*new_page_allocated = true; *new_page_allocated = true;
return page; return &folio->page;
fail_unlock: fail_unlock:
put_swap_page(page, entry); put_swap_page(&folio->page, entry);
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
return NULL; return NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment