Commit 8d93b41c authored by Matthew Wilcox's avatar Matthew Wilcox

mm: Convert add_to_swap_cache to XArray

Combine __add_to_swap_cache and add_to_swap_cache into one function
since there is no more need to preload.
Signed-off-by: default avatarMatthew Wilcox <willy@infradead.org>
parent 69b6c131
...@@ -107,14 +107,15 @@ void show_swap_cache_info(void) ...@@ -107,14 +107,15 @@ void show_swap_cache_info(void)
} }
/* /*
* __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
* but sets SwapCache flag and private instead of mapping and index. * but sets SwapCache flag and private instead of mapping and index.
*/ */
int __add_to_swap_cache(struct page *page, swp_entry_t entry) int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
{ {
int error, i, nr = hpage_nr_pages(page); struct address_space *address_space = swap_address_space(entry);
struct address_space *address_space;
pgoff_t idx = swp_offset(entry); pgoff_t idx = swp_offset(entry);
XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
unsigned long i, nr = 1UL << compound_order(page);
VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageSwapCache(page), page); VM_BUG_ON_PAGE(PageSwapCache(page), page);
...@@ -123,50 +124,30 @@ int __add_to_swap_cache(struct page *page, swp_entry_t entry) ...@@ -123,50 +124,30 @@ int __add_to_swap_cache(struct page *page, swp_entry_t entry)
page_ref_add(page, nr); page_ref_add(page, nr);
SetPageSwapCache(page); SetPageSwapCache(page);
address_space = swap_address_space(entry); do {
xa_lock_irq(&address_space->i_pages); xas_lock_irq(&xas);
for (i = 0; i < nr; i++) { xas_create_range(&xas);
set_page_private(page + i, entry.val + i); if (xas_error(&xas))
error = radix_tree_insert(&address_space->i_pages, goto unlock;
idx + i, page + i); for (i = 0; i < nr; i++) {
if (unlikely(error)) VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
break; set_page_private(page + i, entry.val + i);
} xas_store(&xas, page + i);
if (likely(!error)) { xas_next(&xas);
}
address_space->nrpages += nr; address_space->nrpages += nr;
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
ADD_CACHE_INFO(add_total, nr); ADD_CACHE_INFO(add_total, nr);
} else { unlock:
/* xas_unlock_irq(&xas);
* Only the context which have set SWAP_HAS_CACHE flag } while (xas_nomem(&xas, gfp));
* would call add_to_swap_cache().
* So add_to_swap_cache() doesn't returns -EEXIST.
*/
VM_BUG_ON(error == -EEXIST);
set_page_private(page + i, 0UL);
while (i--) {
radix_tree_delete(&address_space->i_pages, idx + i);
set_page_private(page + i, 0UL);
}
ClearPageSwapCache(page);
page_ref_sub(page, nr);
}
xa_unlock_irq(&address_space->i_pages);
return error; if (!xas_error(&xas))
} return 0;
int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
{
int error;
error = radix_tree_maybe_preload_order(gfp_mask, compound_order(page)); ClearPageSwapCache(page);
if (!error) { page_ref_sub(page, nr);
error = __add_to_swap_cache(page, entry); return xas_error(&xas);
radix_tree_preload_end();
}
return error;
} }
/* /*
...@@ -217,7 +198,7 @@ int add_to_swap(struct page *page) ...@@ -217,7 +198,7 @@ int add_to_swap(struct page *page)
return 0; return 0;
/* /*
* Radix-tree node allocations from PF_MEMALLOC contexts could * XArray node allocations from PF_MEMALLOC contexts could
* completely exhaust the page allocator. __GFP_NOMEMALLOC * completely exhaust the page allocator. __GFP_NOMEMALLOC
* stops emergency reserves from being allocated. * stops emergency reserves from being allocated.
* *
...@@ -229,7 +210,6 @@ int add_to_swap(struct page *page) ...@@ -229,7 +210,6 @@ int add_to_swap(struct page *page)
*/ */
err = add_to_swap_cache(page, entry, err = add_to_swap_cache(page, entry,
__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN); __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
/* -ENOMEM radix-tree allocation failure */
if (err) if (err)
/* /*
* add_to_swap_cache() doesn't return -EEXIST, so we can safely * add_to_swap_cache() doesn't return -EEXIST, so we can safely
...@@ -413,19 +393,11 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, ...@@ -413,19 +393,11 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
break; /* Out of memory */ break; /* Out of memory */
} }
/*
* call radix_tree_preload() while we can wait.
*/
err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
if (err)
break;
/* /*
* Swap entry may have been freed since our caller observed it. * Swap entry may have been freed since our caller observed it.
*/ */
err = swapcache_prepare(entry); err = swapcache_prepare(entry);
if (err == -EEXIST) { if (err == -EEXIST) {
radix_tree_preload_end();
/* /*
* We might race against get_swap_page() and stumble * We might race against get_swap_page() and stumble
* across a SWAP_HAS_CACHE swap_map entry whose page * across a SWAP_HAS_CACHE swap_map entry whose page
...@@ -433,26 +405,19 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, ...@@ -433,26 +405,19 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
*/ */
cond_resched(); cond_resched();
continue; continue;
} } else if (err) /* swp entry is obsolete ? */
if (err) { /* swp entry is obsolete ? */
radix_tree_preload_end();
break; break;
}
/* May fail (-ENOMEM) if radix-tree node allocation failed. */ /* May fail (-ENOMEM) if XArray node allocation failed. */
__SetPageLocked(new_page); __SetPageLocked(new_page);
__SetPageSwapBacked(new_page); __SetPageSwapBacked(new_page);
err = __add_to_swap_cache(new_page, entry); err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
if (likely(!err)) { if (likely(!err)) {
radix_tree_preload_end(); /* Initiate read into locked page */
/*
* Initiate read into locked page and return.
*/
lru_cache_add_anon(new_page); lru_cache_add_anon(new_page);
*new_page_allocated = true; *new_page_allocated = true;
return new_page; return new_page;
} }
radix_tree_preload_end();
__ClearPageLocked(new_page); __ClearPageLocked(new_page);
/* /*
* add_to_swap_cache() doesn't return -EEXIST, so we can safely * add_to_swap_cache() doesn't return -EEXIST, so we can safely
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment