Commit 8c921b2b authored by Jerome Marchand's avatar Jerome Marchand Committed by Greg Kroah-Hartman

Staging: zram: Refactor zram_read/write() functions

This patch refactor the code of zram_read/write() functions. It does
not removes a lot of duplicate code alone, but is mostly a helper for
the third patch of this series (Staging: zram: allow partial page
operations).
Signed-off-by: default avatarJerome Marchand <jmarchan@redhat.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent 6a587e83
...@@ -203,196 +203,199 @@ static void handle_uncompressed_page(struct zram *zram, ...@@ -203,196 +203,199 @@ static void handle_uncompressed_page(struct zram *zram,
flush_dcache_page(page); flush_dcache_page(page);
} }
static void zram_read(struct zram *zram, struct bio *bio) static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
u32 index, struct bio *bio)
{ {
int ret;
size_t clen;
struct page *page;
struct zobj_header *zheader;
unsigned char *user_mem, *cmem;
int i; page = bvec->bv_page;
u32 index;
struct bio_vec *bvec;
zram_stat64_inc(zram, &zram->stats.num_reads);
index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
bio_for_each_segment(bvec, bio, i) {
int ret;
size_t clen;
struct page *page;
struct zobj_header *zheader;
unsigned char *user_mem, *cmem;
page = bvec->bv_page;
if (zram_test_flag(zram, index, ZRAM_ZERO)) {
handle_zero_page(page);
index++;
continue;
}
/* Requested page is not present in compressed area */ if (zram_test_flag(zram, index, ZRAM_ZERO)) {
if (unlikely(!zram->table[index].page)) { handle_zero_page(page);
pr_debug("Read before write: sector=%lu, size=%u", return 0;
(ulong)(bio->bi_sector), bio->bi_size); }
handle_zero_page(page);
index++;
continue;
}
/* Page is stored uncompressed since it's incompressible */ /* Requested page is not present in compressed area */
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { if (unlikely(!zram->table[index].page)) {
handle_uncompressed_page(zram, page, index); pr_debug("Read before write: sector=%lu, size=%u",
index++; (ulong)(bio->bi_sector), bio->bi_size);
continue; handle_zero_page(page);
} return 0;
}
user_mem = kmap_atomic(page, KM_USER0); /* Page is stored uncompressed since it's incompressible */
clen = PAGE_SIZE; if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
handle_uncompressed_page(zram, page, index);
return 0;
}
cmem = kmap_atomic(zram->table[index].page, KM_USER1) + user_mem = kmap_atomic(page, KM_USER0);
zram->table[index].offset; clen = PAGE_SIZE;
ret = lzo1x_decompress_safe( cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
cmem + sizeof(*zheader), zram->table[index].offset;
xv_get_object_size(cmem) - sizeof(*zheader),
user_mem, &clen);
kunmap_atomic(user_mem, KM_USER0); ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
kunmap_atomic(cmem, KM_USER1); xv_get_object_size(cmem) - sizeof(*zheader),
user_mem, &clen);
/* Should NEVER happen. Return bio error if it does. */ kunmap_atomic(user_mem, KM_USER0);
if (unlikely(ret != LZO_E_OK)) { kunmap_atomic(cmem, KM_USER1);
pr_err("Decompression failed! err=%d, page=%u\n",
ret, index);
zram_stat64_inc(zram, &zram->stats.failed_reads);
goto out;
}
flush_dcache_page(page); /* Should NEVER happen. Return bio error if it does. */
index++; if (unlikely(ret != LZO_E_OK)) {
pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
zram_stat64_inc(zram, &zram->stats.failed_reads);
return ret;
} }
set_bit(BIO_UPTODATE, &bio->bi_flags); flush_dcache_page(page);
bio_endio(bio, 0);
return;
out: return 0;
bio_io_error(bio);
} }
static void zram_write(struct zram *zram, struct bio *bio) static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index)
{ {
int i; int ret;
u32 index; u32 offset;
struct bio_vec *bvec; size_t clen;
struct zobj_header *zheader;
struct page *page, *page_store;
unsigned char *user_mem, *cmem, *src;
zram_stat64_inc(zram, &zram->stats.num_writes); page = bvec->bv_page;
index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT; src = zram->compress_buffer;
bio_for_each_segment(bvec, bio, i) { /*
int ret; * System overwrites unused sectors. Free memory associated
u32 offset; * with this sector now.
size_t clen; */
struct zobj_header *zheader; if (zram->table[index].page ||
struct page *page, *page_store; zram_test_flag(zram, index, ZRAM_ZERO))
unsigned char *user_mem, *cmem, *src; zram_free_page(zram, index);
page = bvec->bv_page; mutex_lock(&zram->lock);
src = zram->compress_buffer;
/* user_mem = kmap_atomic(page, KM_USER0);
* System overwrites unused sectors. Free memory associated if (page_zero_filled(user_mem)) {
* with this sector now. kunmap_atomic(user_mem, KM_USER0);
*/ mutex_unlock(&zram->lock);
if (zram->table[index].page || zram_stat_inc(&zram->stats.pages_zero);
zram_test_flag(zram, index, ZRAM_ZERO)) zram_set_flag(zram, index, ZRAM_ZERO);
zram_free_page(zram, index); return 0;
}
mutex_lock(&zram->lock); ret = lzo1x_1_compress(user_mem, PAGE_SIZE, src, &clen,
zram->compress_workmem);
user_mem = kmap_atomic(page, KM_USER0); kunmap_atomic(user_mem, KM_USER0);
if (page_zero_filled(user_mem)) {
kunmap_atomic(user_mem, KM_USER0);
mutex_unlock(&zram->lock);
zram_stat_inc(&zram->stats.pages_zero);
zram_set_flag(zram, index, ZRAM_ZERO);
index++;
continue;
}
ret = lzo1x_1_compress(user_mem, PAGE_SIZE, src, &clen,
zram->compress_workmem);
kunmap_atomic(user_mem, KM_USER0); if (unlikely(ret != LZO_E_OK)) {
mutex_unlock(&zram->lock);
pr_err("Compression failed! err=%d\n", ret);
zram_stat64_inc(zram, &zram->stats.failed_writes);
return ret;
}
if (unlikely(ret != LZO_E_OK)) { /*
* Page is incompressible. Store it as-is (uncompressed)
* since we do not want to return too many disk write
* errors which has side effect of hanging the system.
*/
if (unlikely(clen > max_zpage_size)) {
clen = PAGE_SIZE;
page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
if (unlikely(!page_store)) {
mutex_unlock(&zram->lock); mutex_unlock(&zram->lock);
pr_err("Compression failed! err=%d\n", ret); pr_info("Error allocating memory for "
"incompressible page: %u\n", index);
zram_stat64_inc(zram, &zram->stats.failed_writes); zram_stat64_inc(zram, &zram->stats.failed_writes);
goto out; return -ENOMEM;
}
/*
* Page is incompressible. Store it as-is (uncompressed)
* since we do not want to return too many disk write
* errors which has side effect of hanging the system.
*/
if (unlikely(clen > max_zpage_size)) {
clen = PAGE_SIZE;
page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
if (unlikely(!page_store)) {
mutex_unlock(&zram->lock);
pr_info("Error allocating memory for "
"incompressible page: %u\n", index);
zram_stat64_inc(zram,
&zram->stats.failed_writes);
goto out;
} }
offset = 0; offset = 0;
zram_set_flag(zram, index, ZRAM_UNCOMPRESSED); zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
zram_stat_inc(&zram->stats.pages_expand); zram_stat_inc(&zram->stats.pages_expand);
zram->table[index].page = page_store; zram->table[index].page = page_store;
src = kmap_atomic(page, KM_USER0); src = kmap_atomic(page, KM_USER0);
goto memstore; goto memstore;
} }
if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader), if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader),
&zram->table[index].page, &offset, &zram->table[index].page, &offset,
GFP_NOIO | __GFP_HIGHMEM)) { GFP_NOIO | __GFP_HIGHMEM)) {
mutex_unlock(&zram->lock); mutex_unlock(&zram->lock);
pr_info("Error allocating memory for compressed " pr_info("Error allocating memory for compressed "
"page: %u, size=%zu\n", index, clen); "page: %u, size=%zu\n", index, clen);
zram_stat64_inc(zram, &zram->stats.failed_writes); zram_stat64_inc(zram, &zram->stats.failed_writes);
goto out; return -ENOMEM;
} }
memstore: memstore:
zram->table[index].offset = offset; zram->table[index].offset = offset;
cmem = kmap_atomic(zram->table[index].page, KM_USER1) + cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
zram->table[index].offset; zram->table[index].offset;
#if 0 #if 0
/* Back-reference needed for memory defragmentation */ /* Back-reference needed for memory defragmentation */
if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) { if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
zheader = (struct zobj_header *)cmem; zheader = (struct zobj_header *)cmem;
zheader->table_idx = index; zheader->table_idx = index;
cmem += sizeof(*zheader); cmem += sizeof(*zheader);
} }
#endif #endif
memcpy(cmem, src, clen); memcpy(cmem, src, clen);
kunmap_atomic(cmem, KM_USER1); kunmap_atomic(cmem, KM_USER1);
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
kunmap_atomic(src, KM_USER0); kunmap_atomic(src, KM_USER0);
/* Update stats */ /* Update stats */
zram_stat64_add(zram, &zram->stats.compr_size, clen); zram_stat64_add(zram, &zram->stats.compr_size, clen);
zram_stat_inc(&zram->stats.pages_stored); zram_stat_inc(&zram->stats.pages_stored);
if (clen <= PAGE_SIZE / 2) if (clen <= PAGE_SIZE / 2)
zram_stat_inc(&zram->stats.good_compress); zram_stat_inc(&zram->stats.good_compress);
mutex_unlock(&zram->lock); mutex_unlock(&zram->lock);
return 0;
}
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
struct bio *bio, int rw)
{
if (rw == READ)
return zram_bvec_read(zram, bvec, index, bio);
return zram_bvec_write(zram, bvec, index);
}
static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
{
int i;
u32 index;
struct bio_vec *bvec;
switch (rw) {
case READ:
zram_stat64_inc(zram, &zram->stats.num_reads);
break;
case WRITE:
zram_stat64_inc(zram, &zram->stats.num_writes);
break;
}
index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
bio_for_each_segment(bvec, bio, i) {
if (zram_bvec_rw(zram, bvec, index, bio, rw) < 0)
goto out;
index++; index++;
} }
...@@ -439,15 +442,7 @@ static int zram_make_request(struct request_queue *queue, struct bio *bio) ...@@ -439,15 +442,7 @@ static int zram_make_request(struct request_queue *queue, struct bio *bio)
return 0; return 0;
} }
switch (bio_data_dir(bio)) { __zram_make_request(zram, bio, bio_data_dir(bio));
case READ:
zram_read(zram, bio);
break;
case WRITE:
zram_write(zram, bio);
break;
}
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment