Commit 5b999aad authored by Dmitry Safonov's avatar Dmitry Safonov Committed by Linus Torvalds

mm: swap: zswap: maybe_preload & refactoring

zswap_get_swap_cache_page and read_swap_cache_async have pretty much the
same code with only significant difference in return value and usage of
swap_readpage.

I a helper __read_swap_cache_async() with the common code.  Behavior
change: now zswap_get_swap_cache_page will use radix_tree_maybe_preload
instead radix_tree_preload.  Looks like, this wasn't changed only by the
reason of code duplication.
Signed-off-by: default avatarDmitry Safonov <0x7f454c46@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Vladimir Davydov <vdavydov@parallels.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Hugh Dickins <hughd@google.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Jens Axboe <axboe@fb.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: David Herrmann <dh.herrmann@gmail.com>
Cc: Seth Jennings <sjennings@variantweb.net>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 70864969
...@@ -406,6 +406,9 @@ extern void free_pages_and_swap_cache(struct page **, int); ...@@ -406,6 +406,9 @@ extern void free_pages_and_swap_cache(struct page **, int);
extern struct page *lookup_swap_cache(swp_entry_t); extern struct page *lookup_swap_cache(swp_entry_t);
extern struct page *read_swap_cache_async(swp_entry_t, gfp_t, extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
struct vm_area_struct *vma, unsigned long addr); struct vm_area_struct *vma, unsigned long addr);
extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
struct vm_area_struct *vma, unsigned long addr,
bool *new_page_allocated);
extern struct page *swapin_readahead(swp_entry_t, gfp_t, extern struct page *swapin_readahead(swp_entry_t, gfp_t,
struct vm_area_struct *vma, unsigned long addr); struct vm_area_struct *vma, unsigned long addr);
......
...@@ -288,17 +288,14 @@ struct page * lookup_swap_cache(swp_entry_t entry) ...@@ -288,17 +288,14 @@ struct page * lookup_swap_cache(swp_entry_t entry)
return page; return page;
} }
/* struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* Locate a page of swap in physical memory, reserving swap cache space struct vm_area_struct *vma, unsigned long addr,
* and reading the disk if it is not already cached. bool *new_page_allocated)
* A failure return means that either the page allocation failed or that
* the swap entry is no longer in use.
*/
struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr)
{ {
struct page *found_page, *new_page = NULL; struct page *found_page, *new_page = NULL;
struct address_space *swapper_space = swap_address_space(entry);
int err; int err;
*new_page_allocated = false;
do { do {
/* /*
...@@ -306,8 +303,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, ...@@ -306,8 +303,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* called after lookup_swap_cache() failed, re-calling * called after lookup_swap_cache() failed, re-calling
* that would confuse statistics. * that would confuse statistics.
*/ */
found_page = find_get_page(swap_address_space(entry), found_page = find_get_page(swapper_space, entry.val);
entry.val);
if (found_page) if (found_page)
break; break;
...@@ -366,7 +362,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, ...@@ -366,7 +362,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* Initiate read into locked page and return. * Initiate read into locked page and return.
*/ */
lru_cache_add_anon(new_page); lru_cache_add_anon(new_page);
swap_readpage(new_page); *new_page_allocated = true;
return new_page; return new_page;
} }
radix_tree_preload_end(); radix_tree_preload_end();
...@@ -384,6 +380,25 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, ...@@ -384,6 +380,25 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
return found_page; return found_page;
} }
/*
* Locate a page of swap in physical memory, reserving swap cache space
* and reading the disk if it is not already cached.
* A failure return means that either the page allocation failed or that
* the swap entry is no longer in use.
*/
struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr)
{
bool page_was_allocated;
struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
vma, addr, &page_was_allocated);
if (page_was_allocated)
swap_readpage(retpage);
return retpage;
}
static unsigned long swapin_nr_pages(unsigned long offset) static unsigned long swapin_nr_pages(unsigned long offset)
{ {
static unsigned long prev_offset; static unsigned long prev_offset;
......
...@@ -446,75 +446,14 @@ enum zswap_get_swap_ret { ...@@ -446,75 +446,14 @@ enum zswap_get_swap_ret {
static int zswap_get_swap_cache_page(swp_entry_t entry, static int zswap_get_swap_cache_page(swp_entry_t entry,
struct page **retpage) struct page **retpage)
{ {
struct page *found_page, *new_page = NULL; bool page_was_allocated;
struct address_space *swapper_space = swap_address_space(entry);
int err;
*retpage = NULL; *retpage = __read_swap_cache_async(entry, GFP_KERNEL,
do { NULL, 0, &page_was_allocated);
/* if (page_was_allocated)
* First check the swap cache. Since this is normally return ZSWAP_SWAPCACHE_NEW;
* called after lookup_swap_cache() failed, re-calling if (!*retpage)
* that would confuse statistics.
*/
found_page = find_get_page(swapper_space, entry.val);
if (found_page)
break;
/*
* Get a new page to read into from swap.
*/
if (!new_page) {
new_page = alloc_page(GFP_KERNEL);
if (!new_page)
break; /* Out of memory */
}
/*
* call radix_tree_preload() while we can wait.
*/
err = radix_tree_preload(GFP_KERNEL);
if (err)
break;
/*
* Swap entry may have been freed since our caller observed it.
*/
err = swapcache_prepare(entry);
if (err == -EEXIST) { /* seems racy */
radix_tree_preload_end();
continue;
}
if (err) { /* swp entry is obsolete ? */
radix_tree_preload_end();
break;
}
/* May fail (-ENOMEM) if radix-tree node allocation failed. */
__set_page_locked(new_page);
SetPageSwapBacked(new_page);
err = __add_to_swap_cache(new_page, entry);
if (likely(!err)) {
radix_tree_preload_end();
lru_cache_add_anon(new_page);
*retpage = new_page;
return ZSWAP_SWAPCACHE_NEW;
}
radix_tree_preload_end();
ClearPageSwapBacked(new_page);
__clear_page_locked(new_page);
/*
* add_to_swap_cache() doesn't return -EEXIST, so we can safely
* clear SWAP_HAS_CACHE flag.
*/
swapcache_free(entry);
} while (err != -ENOMEM);
if (new_page)
page_cache_release(new_page);
if (!found_page)
return ZSWAP_SWAPCACHE_FAIL; return ZSWAP_SWAPCACHE_FAIL;
*retpage = found_page;
return ZSWAP_SWAPCACHE_EXIST; return ZSWAP_SWAPCACHE_EXIST;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment