Commit 98804a94 authored by Johannes Weiner's avatar Johannes Weiner Committed by Andrew Morton

mm: zswap: kill zswap_get_swap_cache_page()

The __read_swap_cache_async() interface isn't more difficult to understand
than what the helper abstracts.  Save the indirection and a level of
indentation for the primary work of the writeback func.

Link: https://lkml.kernel.org/r/20230727162343.1415598-4-hannes@cmpxchg.orgSigned-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Reviewed-by: default avatarYosry Ahmed <yosryahmed@google.com>
Cc: Vitaly Wool <vitaly.wool@konsulko.com>
Cc: Barry Song <song.bao.hua@hisilicon.com>
Cc: Seth Jennings <sjenning@redhat.com>
Cc: Domenico Cerasuolo <cerasuolodomenico@gmail.com>
Cc: Nhat Pham <nphamcs@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 73108957
...@@ -1039,43 +1039,6 @@ static int zswap_enabled_param_set(const char *val, ...@@ -1039,43 +1039,6 @@ static int zswap_enabled_param_set(const char *val,
/********************************* /*********************************
* writeback code * writeback code
**********************************/ **********************************/
/* return enum for zswap_get_swap_cache_page */
enum zswap_get_swap_ret {
ZSWAP_SWAPCACHE_NEW,
ZSWAP_SWAPCACHE_EXIST,
ZSWAP_SWAPCACHE_FAIL,
};
/*
* zswap_get_swap_cache_page
*
* This is an adaption of read_swap_cache_async()
*
* This function tries to find a page with the given swap entry
* in the swapper_space address space (the swap cache). If the page
* is found, it is returned in retpage. Otherwise, a page is allocated,
* added to the swap cache, and returned in retpage.
*
* If success, the swap cache page is returned in retpage
* Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
* Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
* the new page is added to swapcache and locked
* Returns ZSWAP_SWAPCACHE_FAIL on error
*/
static int zswap_get_swap_cache_page(swp_entry_t entry,
struct page **retpage)
{
bool page_was_allocated;
*retpage = __read_swap_cache_async(entry, GFP_KERNEL,
NULL, 0, &page_was_allocated);
if (page_was_allocated)
return ZSWAP_SWAPCACHE_NEW;
if (!*retpage)
return ZSWAP_SWAPCACHE_FAIL;
return ZSWAP_SWAPCACHE_EXIST;
}
/* /*
* Attempts to free an entry by adding a page to the swap cache, * Attempts to free an entry by adding a page to the swap cache,
* decompressing the entry data into the page, and issuing a * decompressing the entry data into the page, and issuing a
...@@ -1096,7 +1059,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry, ...@@ -1096,7 +1059,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
struct scatterlist input, output; struct scatterlist input, output;
struct crypto_acomp_ctx *acomp_ctx; struct crypto_acomp_ctx *acomp_ctx;
struct zpool *pool = zswap_find_zpool(entry); struct zpool *pool = zswap_find_zpool(entry);
bool page_was_allocated;
u8 *src, *tmp = NULL; u8 *src, *tmp = NULL;
unsigned int dlen; unsigned int dlen;
int ret; int ret;
...@@ -1111,24 +1074,26 @@ static int zswap_writeback_entry(struct zswap_entry *entry, ...@@ -1111,24 +1074,26 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
} }
/* try to allocate swap cache page */ /* try to allocate swap cache page */
switch (zswap_get_swap_cache_page(swpentry, &page)) { page = __read_swap_cache_async(swpentry, GFP_KERNEL, NULL, 0,
case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */ &page_was_allocated);
if (!page) {
ret = -ENOMEM; ret = -ENOMEM;
goto fail; goto fail;
}
case ZSWAP_SWAPCACHE_EXIST: /* Found an existing page, we raced with load/swapin */
/* page is already in the swap cache, ignore for now */ if (!page_was_allocated) {
put_page(page); put_page(page);
ret = -EEXIST; ret = -EEXIST;
goto fail; goto fail;
}
case ZSWAP_SWAPCACHE_NEW: /* page is locked */
/* /*
* Having a local reference to the zswap entry doesn't exclude * Page is locked, and the swapcache is now secured against
* swapping from invalidating and recycling the swap slot. Once * concurrent swapping to and from the slot. Verify that the
* the swapcache is secured against concurrent swapping to and * swap entry hasn't been invalidated and recycled behind our
* from the slot, recheck that the entry is still current before * backs (our zswap_entry reference doesn't prevent that), to
* writing. * avoid overwriting a new swap page with old compressed data.
*/ */
spin_lock(&tree->lock); spin_lock(&tree->lock);
if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) { if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) {
...@@ -1169,7 +1134,6 @@ static int zswap_writeback_entry(struct zswap_entry *entry, ...@@ -1169,7 +1134,6 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
/* page is up to date */ /* page is up to date */
SetPageUptodate(page); SetPageUptodate(page);
}
/* move it to the tail of the inactive list after end_writeback */ /* move it to the tail of the inactive list after end_writeback */
SetPageReclaim(page); SetPageReclaim(page);
...@@ -1180,15 +1144,15 @@ static int zswap_writeback_entry(struct zswap_entry *entry, ...@@ -1180,15 +1144,15 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
zswap_written_back_pages++; zswap_written_back_pages++;
return ret; return ret;
fail: fail:
if (!zpool_can_sleep_mapped(pool)) if (!zpool_can_sleep_mapped(pool))
kfree(tmp); kfree(tmp);
/* /*
* if we get here due to ZSWAP_SWAPCACHE_EXIST * If we get here because the page is already in swapcache, a
* a load may be happening concurrently. * load may be happening concurrently. It is safe and okay to
* it is safe and okay to not free the entry. * not free the entry. It is also okay to return !0.
* it is also okay to return !0
*/ */
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment