Commit fd1a30de authored by Nitin Gupta's avatar Nitin Gupta Committed by Greg Kroah-Hartman

staging: zram: replace xvmalloc with zsmalloc

Replaces xvmalloc with zsmalloc as the compressed page allocator
for zram
Signed-off-by: default avatarNitin Gupta <ngupta@vflare.org>
Acked-by: default avatarSeth Jennings <sjenning@linux.vnet.ibm.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 94add674
config XVMALLOC
bool
default n
config ZRAM config ZRAM
tristate "Compressed RAM block device support" tristate "Compressed RAM block device support"
depends on BLOCK && SYSFS depends on BLOCK && SYSFS
select XVMALLOC select ZSMALLOC
select LZO_COMPRESS select LZO_COMPRESS
select LZO_DECOMPRESS select LZO_DECOMPRESS
default n default n
......
zram-y := zram_drv.o zram_sysfs.o zram-y := zram_drv.o zram_sysfs.o
obj-$(CONFIG_ZRAM) += zram.o obj-$(CONFIG_ZRAM) += zram.o
obj-$(CONFIG_XVMALLOC) += xvmalloc.o
\ No newline at end of file
...@@ -135,13 +135,9 @@ static void zram_set_disksize(struct zram *zram, size_t totalram_bytes) ...@@ -135,13 +135,9 @@ static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
static void zram_free_page(struct zram *zram, size_t index) static void zram_free_page(struct zram *zram, size_t index)
{ {
u32 clen; void *handle = zram->table[index].handle;
void *obj;
struct page *page = zram->table[index].page; if (unlikely(!handle)) {
u32 offset = zram->table[index].offset;
if (unlikely(!page)) {
/* /*
* No memory is allocated for zero filled pages. * No memory is allocated for zero filled pages.
* Simply clear zero page flag. * Simply clear zero page flag.
...@@ -154,27 +150,24 @@ static void zram_free_page(struct zram *zram, size_t index) ...@@ -154,27 +150,24 @@ static void zram_free_page(struct zram *zram, size_t index)
} }
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
clen = PAGE_SIZE; __free_page(handle);
__free_page(page);
zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED); zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
zram_stat_dec(&zram->stats.pages_expand); zram_stat_dec(&zram->stats.pages_expand);
goto out; goto out;
} }
obj = kmap_atomic(page, KM_USER0) + offset; zs_free(zram->mem_pool, handle);
clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
kunmap_atomic(obj, KM_USER0);
xv_free(zram->mem_pool, page, offset); if (zram->table[index].size <= PAGE_SIZE / 2)
if (clen <= PAGE_SIZE / 2)
zram_stat_dec(&zram->stats.good_compress); zram_stat_dec(&zram->stats.good_compress);
out: out:
zram_stat64_sub(zram, &zram->stats.compr_size, clen); zram_stat64_sub(zram, &zram->stats.compr_size,
zram->table[index].size);
zram_stat_dec(&zram->stats.pages_stored); zram_stat_dec(&zram->stats.pages_stored);
zram->table[index].page = NULL; zram->table[index].handle = NULL;
zram->table[index].offset = 0; zram->table[index].size = 0;
} }
static void handle_zero_page(struct bio_vec *bvec) static void handle_zero_page(struct bio_vec *bvec)
...@@ -196,7 +189,7 @@ static void handle_uncompressed_page(struct zram *zram, struct bio_vec *bvec, ...@@ -196,7 +189,7 @@ static void handle_uncompressed_page(struct zram *zram, struct bio_vec *bvec,
unsigned char *user_mem, *cmem; unsigned char *user_mem, *cmem;
user_mem = kmap_atomic(page, KM_USER0); user_mem = kmap_atomic(page, KM_USER0);
cmem = kmap_atomic(zram->table[index].page, KM_USER1); cmem = kmap_atomic(zram->table[index].handle, KM_USER1);
memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len); memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
kunmap_atomic(cmem, KM_USER1); kunmap_atomic(cmem, KM_USER1);
...@@ -227,7 +220,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, ...@@ -227,7 +220,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
} }
/* Requested page is not present in compressed area */ /* Requested page is not present in compressed area */
if (unlikely(!zram->table[index].page)) { if (unlikely(!zram->table[index].handle)) {
pr_debug("Read before write: sector=%lu, size=%u", pr_debug("Read before write: sector=%lu, size=%u",
(ulong)(bio->bi_sector), bio->bi_size); (ulong)(bio->bi_sector), bio->bi_size);
handle_zero_page(bvec); handle_zero_page(bvec);
...@@ -254,11 +247,10 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, ...@@ -254,11 +247,10 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
uncmem = user_mem; uncmem = user_mem;
clen = PAGE_SIZE; clen = PAGE_SIZE;
cmem = kmap_atomic(zram->table[index].page, KM_USER1) + cmem = zs_map_object(zram->mem_pool, zram->table[index].handle);
zram->table[index].offset;
ret = lzo1x_decompress_safe(cmem + sizeof(*zheader), ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
xv_get_object_size(cmem) - sizeof(*zheader), zram->table[index].size,
uncmem, &clen); uncmem, &clen);
if (is_partial_io(bvec)) { if (is_partial_io(bvec)) {
...@@ -267,7 +259,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, ...@@ -267,7 +259,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
kfree(uncmem); kfree(uncmem);
} }
kunmap_atomic(cmem, KM_USER1); zs_unmap_object(zram->mem_pool, zram->table[index].handle);
kunmap_atomic(user_mem, KM_USER0); kunmap_atomic(user_mem, KM_USER0);
/* Should NEVER happen. Return bio error if it does. */ /* Should NEVER happen. Return bio error if it does. */
...@@ -290,13 +282,12 @@ static int zram_read_before_write(struct zram *zram, char *mem, u32 index) ...@@ -290,13 +282,12 @@ static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
unsigned char *cmem; unsigned char *cmem;
if (zram_test_flag(zram, index, ZRAM_ZERO) || if (zram_test_flag(zram, index, ZRAM_ZERO) ||
!zram->table[index].page) { !zram->table[index].handle) {
memset(mem, 0, PAGE_SIZE); memset(mem, 0, PAGE_SIZE);
return 0; return 0;
} }
cmem = kmap_atomic(zram->table[index].page, KM_USER0) + cmem = zs_map_object(zram->mem_pool, zram->table[index].handle);
zram->table[index].offset;
/* Page is stored uncompressed since it's incompressible */ /* Page is stored uncompressed since it's incompressible */
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
...@@ -306,9 +297,9 @@ static int zram_read_before_write(struct zram *zram, char *mem, u32 index) ...@@ -306,9 +297,9 @@ static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
} }
ret = lzo1x_decompress_safe(cmem + sizeof(*zheader), ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
xv_get_object_size(cmem) - sizeof(*zheader), zram->table[index].size,
mem, &clen); mem, &clen);
kunmap_atomic(cmem, KM_USER0); zs_unmap_object(zram->mem_pool, zram->table[index].handle);
/* Should NEVER happen. Return bio error if it does. */ /* Should NEVER happen. Return bio error if it does. */
if (unlikely(ret != LZO_E_OK)) { if (unlikely(ret != LZO_E_OK)) {
...@@ -326,6 +317,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, ...@@ -326,6 +317,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
int ret; int ret;
u32 store_offset; u32 store_offset;
size_t clen; size_t clen;
void *handle;
struct zobj_header *zheader; struct zobj_header *zheader;
struct page *page, *page_store; struct page *page, *page_store;
unsigned char *user_mem, *cmem, *src, *uncmem = NULL; unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
...@@ -355,7 +347,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, ...@@ -355,7 +347,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
* System overwrites unused sectors. Free memory associated * System overwrites unused sectors. Free memory associated
* with this sector now. * with this sector now.
*/ */
if (zram->table[index].page || if (zram->table[index].handle ||
zram_test_flag(zram, index, ZRAM_ZERO)) zram_test_flag(zram, index, ZRAM_ZERO))
zram_free_page(zram, index); zram_free_page(zram, index);
...@@ -407,26 +399,22 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, ...@@ -407,26 +399,22 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
store_offset = 0; store_offset = 0;
zram_set_flag(zram, index, ZRAM_UNCOMPRESSED); zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
zram_stat_inc(&zram->stats.pages_expand); zram_stat_inc(&zram->stats.pages_expand);
zram->table[index].page = page_store; handle = page_store;
src = kmap_atomic(page, KM_USER0); src = kmap_atomic(page, KM_USER0);
cmem = kmap_atomic(page_store, KM_USER1);
goto memstore; goto memstore;
} }
if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader), handle = zs_malloc(zram->mem_pool, clen + sizeof(*zheader));
&zram->table[index].page, &store_offset, if (!handle) {
GFP_NOIO | __GFP_HIGHMEM)) {
pr_info("Error allocating memory for compressed " pr_info("Error allocating memory for compressed "
"page: %u, size=%zu\n", index, clen); "page: %u, size=%zu\n", index, clen);
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
cmem = zs_map_object(zram->mem_pool, handle);
memstore: memstore:
zram->table[index].offset = store_offset;
cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
zram->table[index].offset;
#if 0 #if 0
/* Back-reference needed for memory defragmentation */ /* Back-reference needed for memory defragmentation */
if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) { if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
...@@ -438,9 +426,15 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, ...@@ -438,9 +426,15 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
memcpy(cmem, src, clen); memcpy(cmem, src, clen);
kunmap_atomic(cmem, KM_USER1); if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) kunmap_atomic(cmem, KM_USER1);
kunmap_atomic(src, KM_USER0); kunmap_atomic(src, KM_USER0);
} else {
zs_unmap_object(zram->mem_pool, handle);
}
zram->table[index].handle = handle;
zram->table[index].size = clen;
/* Update stats */ /* Update stats */
zram_stat64_add(zram, &zram->stats.compr_size, clen); zram_stat64_add(zram, &zram->stats.compr_size, clen);
...@@ -598,25 +592,20 @@ void __zram_reset_device(struct zram *zram) ...@@ -598,25 +592,20 @@ void __zram_reset_device(struct zram *zram)
/* Free all pages that are still in this zram device */ /* Free all pages that are still in this zram device */
for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) { for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
struct page *page; void *handle = zram->table[index].handle;
u16 offset; if (!handle)
page = zram->table[index].page;
offset = zram->table[index].offset;
if (!page)
continue; continue;
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
__free_page(page); __free_page(handle);
else else
xv_free(zram->mem_pool, page, offset); zs_free(zram->mem_pool, handle);
} }
vfree(zram->table); vfree(zram->table);
zram->table = NULL; zram->table = NULL;
xv_destroy_pool(zram->mem_pool); zs_destroy_pool(zram->mem_pool);
zram->mem_pool = NULL; zram->mem_pool = NULL;
/* Reset stats */ /* Reset stats */
...@@ -674,7 +663,7 @@ int zram_init_device(struct zram *zram) ...@@ -674,7 +663,7 @@ int zram_init_device(struct zram *zram)
/* zram devices sort of resembles non-rotational disks */ /* zram devices sort of resembles non-rotational disks */
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
zram->mem_pool = xv_create_pool(); zram->mem_pool = zs_create_pool("zram", GFP_NOIO | __GFP_HIGHMEM);
if (!zram->mem_pool) { if (!zram->mem_pool) {
pr_err("Error creating memory pool\n"); pr_err("Error creating memory pool\n");
ret = -ENOMEM; ret = -ENOMEM;
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include "xvmalloc.h" #include "../zsmalloc/zsmalloc.h"
/* /*
* Some arbitrary value. This is just to catch * Some arbitrary value. This is just to catch
...@@ -51,7 +51,7 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3; ...@@ -51,7 +51,7 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
/* /*
* NOTE: max_zpage_size must be less than or equal to: * NOTE: max_zpage_size must be less than or equal to:
* XV_MAX_ALLOC_SIZE - sizeof(struct zobj_header) * ZS_MAX_ALLOC_SIZE - sizeof(struct zobj_header)
* otherwise, xv_malloc() would always return failure. * otherwise, xv_malloc() would always return failure.
*/ */
...@@ -81,8 +81,8 @@ enum zram_pageflags { ...@@ -81,8 +81,8 @@ enum zram_pageflags {
/* Allocated for each disk page */ /* Allocated for each disk page */
struct table { struct table {
struct page *page; void *handle;
u16 offset; u16 size; /* object size (excluding header) */
u8 count; /* object ref count (not yet used) */ u8 count; /* object ref count (not yet used) */
u8 flags; u8 flags;
} __attribute__((aligned(4))); } __attribute__((aligned(4)));
...@@ -102,7 +102,7 @@ struct zram_stats { ...@@ -102,7 +102,7 @@ struct zram_stats {
}; };
struct zram { struct zram {
struct xv_pool *mem_pool; struct zs_pool *mem_pool;
void *compress_workmem; void *compress_workmem;
void *compress_buffer; void *compress_buffer;
struct table *table; struct table *table;
......
...@@ -187,7 +187,7 @@ static ssize_t mem_used_total_show(struct device *dev, ...@@ -187,7 +187,7 @@ static ssize_t mem_used_total_show(struct device *dev,
struct zram *zram = dev_to_zram(dev); struct zram *zram = dev_to_zram(dev);
if (zram->init_done) { if (zram->init_done) {
val = xv_get_total_size_bytes(zram->mem_pool) + val = zs_get_total_size_bytes(zram->mem_pool) +
((u64)(zram->stats.pages_expand) << PAGE_SHIFT); ((u64)(zram->stats.pages_expand) << PAGE_SHIFT);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment