Commit 98804a94 authored by Johannes Weiner's avatar Johannes Weiner Committed by Andrew Morton

mm: zswap: kill zswap_get_swap_cache_page()

The __read_swap_cache_async() interface isn't more difficult to understand
than what the helper abstracts.  Save the indirection and a level of
indentation for the primary work of the writeback func.

Link: https://lkml.kernel.org/r/20230727162343.1415598-4-hannes@cmpxchg.orgSigned-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Reviewed-by: default avatarYosry Ahmed <yosryahmed@google.com>
Cc: Vitaly Wool <vitaly.wool@konsulko.com>
Cc: Barry Song <song.bao.hua@hisilicon.com>
Cc: Seth Jennings <sjenning@redhat.com>
Cc: Domenico Cerasuolo <cerasuolodomenico@gmail.com>
Cc: Nhat Pham <nphamcs@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 73108957
...@@ -1039,43 +1039,6 @@ static int zswap_enabled_param_set(const char *val, ...@@ -1039,43 +1039,6 @@ static int zswap_enabled_param_set(const char *val,
/********************************* /*********************************
* writeback code * writeback code
**********************************/ **********************************/
/* return enum for zswap_get_swap_cache_page */
enum zswap_get_swap_ret {
ZSWAP_SWAPCACHE_NEW,
ZSWAP_SWAPCACHE_EXIST,
ZSWAP_SWAPCACHE_FAIL,
};
/*
* zswap_get_swap_cache_page
*
* This is an adaption of read_swap_cache_async()
*
* This function tries to find a page with the given swap entry
* in the swapper_space address space (the swap cache). If the page
* is found, it is returned in retpage. Otherwise, a page is allocated,
* added to the swap cache, and returned in retpage.
*
* If success, the swap cache page is returned in retpage
* Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
* Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
* the new page is added to swapcache and locked
* Returns ZSWAP_SWAPCACHE_FAIL on error
*/
static int zswap_get_swap_cache_page(swp_entry_t entry,
struct page **retpage)
{
bool page_was_allocated;
*retpage = __read_swap_cache_async(entry, GFP_KERNEL,
NULL, 0, &page_was_allocated);
if (page_was_allocated)
return ZSWAP_SWAPCACHE_NEW;
if (!*retpage)
return ZSWAP_SWAPCACHE_FAIL;
return ZSWAP_SWAPCACHE_EXIST;
}
/* /*
* Attempts to free an entry by adding a page to the swap cache, * Attempts to free an entry by adding a page to the swap cache,
* decompressing the entry data into the page, and issuing a * decompressing the entry data into the page, and issuing a
...@@ -1096,7 +1059,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry, ...@@ -1096,7 +1059,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
struct scatterlist input, output; struct scatterlist input, output;
struct crypto_acomp_ctx *acomp_ctx; struct crypto_acomp_ctx *acomp_ctx;
struct zpool *pool = zswap_find_zpool(entry); struct zpool *pool = zswap_find_zpool(entry);
bool page_was_allocated;
u8 *src, *tmp = NULL; u8 *src, *tmp = NULL;
unsigned int dlen; unsigned int dlen;
int ret; int ret;
...@@ -1111,65 +1074,66 @@ static int zswap_writeback_entry(struct zswap_entry *entry, ...@@ -1111,65 +1074,66 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
} }
/* try to allocate swap cache page */ /* try to allocate swap cache page */
switch (zswap_get_swap_cache_page(swpentry, &page)) { page = __read_swap_cache_async(swpentry, GFP_KERNEL, NULL, 0,
case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */ &page_was_allocated);
if (!page) {
ret = -ENOMEM; ret = -ENOMEM;
goto fail; goto fail;
}
case ZSWAP_SWAPCACHE_EXIST: /* Found an existing page, we raced with load/swapin */
/* page is already in the swap cache, ignore for now */ if (!page_was_allocated) {
put_page(page); put_page(page);
ret = -EEXIST; ret = -EEXIST;
goto fail; goto fail;
}
case ZSWAP_SWAPCACHE_NEW: /* page is locked */ /*
/* * Page is locked, and the swapcache is now secured against
* Having a local reference to the zswap entry doesn't exclude * concurrent swapping to and from the slot. Verify that the
* swapping from invalidating and recycling the swap slot. Once * swap entry hasn't been invalidated and recycled behind our
* the swapcache is secured against concurrent swapping to and * backs (our zswap_entry reference doesn't prevent that), to
* from the slot, recheck that the entry is still current before * avoid overwriting a new swap page with old compressed data.
* writing. */
*/ spin_lock(&tree->lock);
spin_lock(&tree->lock); if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) {
if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) {
spin_unlock(&tree->lock);
delete_from_swap_cache(page_folio(page));
ret = -ENOMEM;
goto fail;
}
spin_unlock(&tree->lock); spin_unlock(&tree->lock);
delete_from_swap_cache(page_folio(page));
ret = -ENOMEM;
goto fail;
}
spin_unlock(&tree->lock);
/* decompress */ /* decompress */
acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
dlen = PAGE_SIZE; dlen = PAGE_SIZE;
src = zpool_map_handle(pool, entry->handle, ZPOOL_MM_RO); src = zpool_map_handle(pool, entry->handle, ZPOOL_MM_RO);
if (!zpool_can_sleep_mapped(pool)) { if (!zpool_can_sleep_mapped(pool)) {
memcpy(tmp, src, entry->length); memcpy(tmp, src, entry->length);
src = tmp; src = tmp;
zpool_unmap_handle(pool, entry->handle); zpool_unmap_handle(pool, entry->handle);
} }
mutex_lock(acomp_ctx->mutex); mutex_lock(acomp_ctx->mutex);
sg_init_one(&input, src, entry->length); sg_init_one(&input, src, entry->length);
sg_init_table(&output, 1); sg_init_table(&output, 1);
sg_set_page(&output, page, PAGE_SIZE, 0); sg_set_page(&output, page, PAGE_SIZE, 0);
acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen); acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen);
ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait); ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
dlen = acomp_ctx->req->dlen; dlen = acomp_ctx->req->dlen;
mutex_unlock(acomp_ctx->mutex); mutex_unlock(acomp_ctx->mutex);
if (!zpool_can_sleep_mapped(pool)) if (!zpool_can_sleep_mapped(pool))
kfree(tmp); kfree(tmp);
else else
zpool_unmap_handle(pool, entry->handle); zpool_unmap_handle(pool, entry->handle);
BUG_ON(ret); BUG_ON(ret);
BUG_ON(dlen != PAGE_SIZE); BUG_ON(dlen != PAGE_SIZE);
/* page is up to date */ /* page is up to date */
SetPageUptodate(page); SetPageUptodate(page);
}
/* move it to the tail of the inactive list after end_writeback */ /* move it to the tail of the inactive list after end_writeback */
SetPageReclaim(page); SetPageReclaim(page);
...@@ -1180,16 +1144,16 @@ static int zswap_writeback_entry(struct zswap_entry *entry, ...@@ -1180,16 +1144,16 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
zswap_written_back_pages++; zswap_written_back_pages++;
return ret; return ret;
fail: fail:
if (!zpool_can_sleep_mapped(pool)) if (!zpool_can_sleep_mapped(pool))
kfree(tmp); kfree(tmp);
/* /*
* if we get here due to ZSWAP_SWAPCACHE_EXIST * If we get here because the page is already in swapcache, a
* a load may be happening concurrently. * load may be happening concurrently. It is safe and okay to
* it is safe and okay to not free the entry. * not free the entry. It is also okay to return !0.
* it is also okay to return !0 */
*/
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment