Commit 37b51fdd authored by Sergey Senozhatsky's avatar Sergey Senozhatsky Committed by Greg Kroah-Hartman

staging: zram: factor-out zram_decompress_page() function

zram_bvec_read() shared decompress functionality with zram_read_before_write() function.
Factor-out and make commonly used zram_decompress_page() function, which also simplified
error handling in zram_bvec_read().
Signed-off-by: default avatarSergey Senozhatsky <sergey.senozhatsky@gmail.com>
Reviewed-by: default avatarNitin Gupta <ngupta@vflare.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent e446f5a8
...@@ -183,62 +183,25 @@ static inline int is_partial_io(struct bio_vec *bvec) ...@@ -183,62 +183,25 @@ static inline int is_partial_io(struct bio_vec *bvec)
return bvec->bv_len != PAGE_SIZE; return bvec->bv_len != PAGE_SIZE;
} }
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
u32 index, int offset, struct bio *bio)
{ {
int ret; int ret = LZO_E_OK;
size_t clen; size_t clen = PAGE_SIZE;
struct page *page; unsigned char *cmem;
unsigned char *user_mem, *cmem, *uncmem = NULL; unsigned long handle = zram->table[index].handle;
page = bvec->bv_page;
if (zram_test_flag(zram, index, ZRAM_ZERO)) {
handle_zero_page(bvec);
return 0;
}
/* Requested page is not present in compressed area */ if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) {
if (unlikely(!zram->table[index].handle)) { memset(mem, 0, PAGE_SIZE);
pr_debug("Read before write: sector=%lu, size=%u",
(ulong)(bio->bi_sector), bio->bi_size);
handle_zero_page(bvec);
return 0; return 0;
} }
if (is_partial_io(bvec)) { cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
/* Use a temporary buffer to decompress the page */ if (zram->table[index].size == PAGE_SIZE)
uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL); memcpy(mem, cmem, PAGE_SIZE);
if (!uncmem) { else
pr_info("Error allocating temp memory!\n");
return -ENOMEM;
}
}
user_mem = kmap_atomic(page);
if (!is_partial_io(bvec))
uncmem = user_mem;
clen = PAGE_SIZE;
cmem = zs_map_object(zram->mem_pool, zram->table[index].handle,
ZS_MM_RO);
if (zram->table[index].size == PAGE_SIZE) {
memcpy(uncmem, cmem, PAGE_SIZE);
ret = LZO_E_OK;
} else {
ret = lzo1x_decompress_safe(cmem, zram->table[index].size, ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
uncmem, &clen); mem, &clen);
} zs_unmap_object(zram->mem_pool, handle);
if (is_partial_io(bvec)) {
memcpy(user_mem + bvec->bv_offset, uncmem + offset,
bvec->bv_len);
kfree(uncmem);
}
zs_unmap_object(zram->mem_pool, zram->table[index].handle);
kunmap_atomic(user_mem);
/* Should NEVER happen. Return bio error if it does. */ /* Should NEVER happen. Return bio error if it does. */
if (unlikely(ret != LZO_E_OK)) { if (unlikely(ret != LZO_E_OK)) {
...@@ -247,36 +210,56 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, ...@@ -247,36 +210,56 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
return ret; return ret;
} }
flush_dcache_page(page);
return 0; return 0;
} }
static int zram_read_before_write(struct zram *zram, char *mem, u32 index) static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
u32 index, int offset, struct bio *bio)
{ {
int ret; int ret;
size_t clen = PAGE_SIZE; struct page *page;
unsigned char *cmem; unsigned char *user_mem, *uncmem = NULL;
unsigned long handle = zram->table[index].handle;
if (zram_test_flag(zram, index, ZRAM_ZERO) || !handle) { page = bvec->bv_page;
memset(mem, 0, PAGE_SIZE);
if (unlikely(!zram->table[index].handle) ||
zram_test_flag(zram, index, ZRAM_ZERO)) {
handle_zero_page(bvec);
return 0; return 0;
} }
cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); user_mem = kmap_atomic(page);
ret = lzo1x_decompress_safe(cmem, zram->table[index].size, if (is_partial_io(bvec))
mem, &clen); /* Use a temporary buffer to decompress the page */
zs_unmap_object(zram->mem_pool, handle); uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
else
uncmem = user_mem;
if (!uncmem) {
pr_info("Unable to allocate temp memory\n");
ret = -ENOMEM;
goto out_cleanup;
}
ret = zram_decompress_page(zram, uncmem, index);
/* Should NEVER happen. Return bio error if it does. */ /* Should NEVER happen. Return bio error if it does. */
if (unlikely(ret != LZO_E_OK)) { if (unlikely(ret != LZO_E_OK)) {
pr_err("Decompression failed! err=%d, page=%u\n", ret, index); pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
zram_stat64_inc(zram, &zram->stats.failed_reads); zram_stat64_inc(zram, &zram->stats.failed_reads);
return ret; goto out_cleanup;
} }
return 0; if (is_partial_io(bvec))
memcpy(user_mem + bvec->bv_offset, uncmem + offset,
bvec->bv_len);
flush_dcache_page(page);
ret = 0;
out_cleanup:
kunmap_atomic(user_mem);
if (is_partial_io(bvec))
kfree(uncmem);
return ret;
} }
static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
...@@ -302,7 +285,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, ...@@ -302,7 +285,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
ret = zram_read_before_write(zram, uncmem, index); ret = zram_decompress_page(zram, uncmem, index);
if (ret) { if (ret) {
kfree(uncmem); kfree(uncmem);
goto out; goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment