Commit 5561347a authored by Sergey Senozhatsky's avatar Sergey Senozhatsky Committed by Andrew Morton

zram: factor out WB and non-WB zram read functions

We will use non-WB variant in ZRAM page recompression path.

Link: https://lkml.kernel.org/r/20221109115047.2921851-4-senozhatsky@chromium.orgSigned-off-by: default avatarSergey Senozhatsky <senozhatsky@chromium.org>
Acked-by: default avatarMinchan Kim <minchan@kernel.org>
Cc: Alexey Romanov <avromanov@sberdevices.ru>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Suleiman Souhlal <suleiman@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 001d9273
...@@ -1336,8 +1336,29 @@ static void zram_free_page(struct zram *zram, size_t index) ...@@ -1336,8 +1336,29 @@ static void zram_free_page(struct zram *zram, size_t index)
~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB)); ~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB));
} }
static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, /*
struct bio *bio, bool partial_io) * Reads a page from the writeback devices. Corresponding ZRAM slot
* should be unlocked.
*/
static int zram_bvec_read_from_bdev(struct zram *zram, struct page *page,
u32 index, struct bio *bio, bool partial_io)
{
struct bio_vec bvec = {
.bv_page = page,
.bv_len = PAGE_SIZE,
.bv_offset = 0,
};
return read_from_bdev(zram, &bvec, zram_get_element(zram, index), bio,
partial_io);
}
/*
* Reads (decompresses if needed) a page from zspool (zsmalloc).
* Corresponding ZRAM slot should be locked.
*/
static int zram_read_from_zspool(struct zram *zram, struct page *page,
u32 index)
{ {
struct zcomp_strm *zstrm; struct zcomp_strm *zstrm;
unsigned long handle; unsigned long handle;
...@@ -1345,23 +1366,6 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, ...@@ -1345,23 +1366,6 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
void *src, *dst; void *src, *dst;
int ret; int ret;
zram_slot_lock(zram, index);
if (zram_test_flag(zram, index, ZRAM_WB)) {
struct bio_vec bvec;
zram_slot_unlock(zram, index);
/* A null bio means rw_page was used, we must fallback to bio */
if (!bio)
return -EOPNOTSUPP;
bvec.bv_page = page;
bvec.bv_len = PAGE_SIZE;
bvec.bv_offset = 0;
return read_from_bdev(zram, &bvec,
zram_get_element(zram, index),
bio, partial_io);
}
handle = zram_get_handle(zram, index); handle = zram_get_handle(zram, index);
if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) { if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
unsigned long value; unsigned long value;
...@@ -1371,7 +1375,6 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, ...@@ -1371,7 +1375,6 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
mem = kmap_atomic(page); mem = kmap_atomic(page);
zram_fill_page(mem, PAGE_SIZE, value); zram_fill_page(mem, PAGE_SIZE, value);
kunmap_atomic(mem); kunmap_atomic(mem);
zram_slot_unlock(zram, index);
return 0; return 0;
} }
...@@ -1393,10 +1396,33 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, ...@@ -1393,10 +1396,33 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
} }
zs_unmap_object(zram->mem_pool, handle); zs_unmap_object(zram->mem_pool, handle);
return ret;
}
static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
struct bio *bio, bool partial_io)
{
int ret;
zram_slot_lock(zram, index);
if (!zram_test_flag(zram, index, ZRAM_WB)) {
/* Slot should be locked through out the function call */
ret = zram_read_from_zspool(zram, page, index);
zram_slot_unlock(zram, index); zram_slot_unlock(zram, index);
} else {
/* Slot should be unlocked before the function call */
zram_slot_unlock(zram, index);
/* A null bio means rw_page was used, we must fallback to bio */
if (!bio)
return -EOPNOTSUPP;
ret = zram_bvec_read_from_bdev(zram, page, index, bio,
partial_io);
}
/* Should NEVER happen. Return bio error if it does. */ /* Should NEVER happen. Return bio error if it does. */
if (WARN_ON(ret)) if (WARN_ON(ret < 0))
pr_err("Decompression failed! err=%d, page=%u\n", ret, index); pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment