Commit a49aeb1d authored by Seth Jennings's avatar Seth Jennings Committed by Greg Kroah-Hartman

staging: zcache: replace xvmalloc with zsmalloc

Replaces xvmalloc with zsmalloc as the persistent memory allocator
for zcache
Signed-off-by: default avatarSeth Jennings <sjenning@linux.vnet.ibm.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 72a9826b
config ZCACHE config ZCACHE
tristate "Dynamic compression of swap pages and clean pagecache pages" tristate "Dynamic compression of swap pages and clean pagecache pages"
depends on (CLEANCACHE || FRONTSWAP) && CRYPTO depends on (CLEANCACHE || FRONTSWAP) && CRYPTO
select XVMALLOC select ZSMALLOC
select CRYPTO_LZO select CRYPTO_LZO
default n default n
help help
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
* page-accessible memory [1] interfaces, both utilizing the crypto compression * page-accessible memory [1] interfaces, both utilizing the crypto compression
* API: * API:
* 1) "compression buddies" ("zbud") is used for ephemeral pages * 1) "compression buddies" ("zbud") is used for ephemeral pages
* 2) xvmalloc is used for persistent pages. * 2) zsmalloc is used for persistent pages.
* Xvmalloc (based on the TLSF allocator) has very low fragmentation * Xvmalloc (based on the TLSF allocator) has very low fragmentation
* so maximizes space efficiency, while zbud allows pairs (and potentially, * so maximizes space efficiency, while zbud allows pairs (and potentially,
* in the future, more than a pair of) compressed pages to be closely linked * in the future, more than a pair of) compressed pages to be closely linked
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
#include <linux/string.h> #include <linux/string.h>
#include "tmem.h" #include "tmem.h"
#include "../zram/xvmalloc.h" /* if built in drivers/staging */ #include "../zsmalloc/zsmalloc.h"
#if (!defined(CONFIG_CLEANCACHE) && !defined(CONFIG_FRONTSWAP)) #if (!defined(CONFIG_CLEANCACHE) && !defined(CONFIG_FRONTSWAP))
#error "zcache is useless without CONFIG_CLEANCACHE or CONFIG_FRONTSWAP" #error "zcache is useless without CONFIG_CLEANCACHE or CONFIG_FRONTSWAP"
...@@ -62,7 +62,7 @@ MODULE_LICENSE("GPL"); ...@@ -62,7 +62,7 @@ MODULE_LICENSE("GPL");
struct zcache_client { struct zcache_client {
struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT]; struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT];
struct xv_pool *xvpool; struct zs_pool *zspool;
bool allocated; bool allocated;
atomic_t refcount; atomic_t refcount;
}; };
...@@ -658,7 +658,7 @@ static int zbud_show_cumul_chunk_counts(char *buf) ...@@ -658,7 +658,7 @@ static int zbud_show_cumul_chunk_counts(char *buf)
#endif #endif
/********** /**********
* This "zv" PAM implementation combines the TLSF-based xvMalloc * This "zv" PAM implementation combines the slab-based zsmalloc
* with the crypto compression API to maximize the amount of data that can * with the crypto compression API to maximize the amount of data that can
* be packed into a physical page. * be packed into a physical page.
* *
...@@ -672,6 +672,7 @@ struct zv_hdr { ...@@ -672,6 +672,7 @@ struct zv_hdr {
uint32_t pool_id; uint32_t pool_id;
struct tmem_oid oid; struct tmem_oid oid;
uint32_t index; uint32_t index;
size_t size;
DECL_SENTINEL DECL_SENTINEL
}; };
...@@ -693,71 +694,73 @@ static unsigned int zv_max_mean_zsize = (PAGE_SIZE / 8) * 5; ...@@ -693,71 +694,73 @@ static unsigned int zv_max_mean_zsize = (PAGE_SIZE / 8) * 5;
static atomic_t zv_curr_dist_counts[NCHUNKS]; static atomic_t zv_curr_dist_counts[NCHUNKS];
static atomic_t zv_cumul_dist_counts[NCHUNKS]; static atomic_t zv_cumul_dist_counts[NCHUNKS];
static struct zv_hdr *zv_create(struct xv_pool *xvpool, uint32_t pool_id, static struct zv_hdr *zv_create(struct zs_pool *pool, uint32_t pool_id,
struct tmem_oid *oid, uint32_t index, struct tmem_oid *oid, uint32_t index,
void *cdata, unsigned clen) void *cdata, unsigned clen)
{ {
struct page *page; struct zv_hdr *zv;
struct zv_hdr *zv = NULL; u32 size = clen + sizeof(struct zv_hdr);
uint32_t offset; int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
int alloc_size = clen + sizeof(struct zv_hdr); void *handle = NULL;
int chunks = (alloc_size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT; char *buf;
int ret;
BUG_ON(!irqs_disabled()); BUG_ON(!irqs_disabled());
BUG_ON(chunks >= NCHUNKS); BUG_ON(chunks >= NCHUNKS);
ret = xv_malloc(xvpool, alloc_size, handle = zs_malloc(pool, size);
&page, &offset, ZCACHE_GFP_MASK); if (!handle)
if (unlikely(ret))
goto out; goto out;
atomic_inc(&zv_curr_dist_counts[chunks]); atomic_inc(&zv_curr_dist_counts[chunks]);
atomic_inc(&zv_cumul_dist_counts[chunks]); atomic_inc(&zv_cumul_dist_counts[chunks]);
zv = kmap_atomic(page, KM_USER0) + offset; zv = (struct zv_hdr *)((char *)cdata - sizeof(*zv));
zv->index = index; zv->index = index;
zv->oid = *oid; zv->oid = *oid;
zv->pool_id = pool_id; zv->pool_id = pool_id;
zv->size = clen;
SET_SENTINEL(zv, ZVH); SET_SENTINEL(zv, ZVH);
memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen); buf = zs_map_object(pool, handle);
kunmap_atomic(zv, KM_USER0); memcpy(buf, zv, clen + sizeof(*zv));
zs_unmap_object(pool, handle);
out: out:
return zv; return handle;
} }
static void zv_free(struct xv_pool *xvpool, struct zv_hdr *zv) static void zv_free(struct zs_pool *pool, void *handle)
{ {
unsigned long flags; unsigned long flags;
struct page *page; struct zv_hdr *zv;
uint32_t offset; uint16_t size;
uint16_t size = xv_get_object_size(zv); int chunks;
int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
zv = zs_map_object(pool, handle);
ASSERT_SENTINEL(zv, ZVH); ASSERT_SENTINEL(zv, ZVH);
size = zv->size + sizeof(struct zv_hdr);
INVERT_SENTINEL(zv, ZVH);
zs_unmap_object(pool, handle);
chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT;
BUG_ON(chunks >= NCHUNKS); BUG_ON(chunks >= NCHUNKS);
atomic_dec(&zv_curr_dist_counts[chunks]); atomic_dec(&zv_curr_dist_counts[chunks]);
size -= sizeof(*zv);
BUG_ON(size == 0);
INVERT_SENTINEL(zv, ZVH);
page = virt_to_page(zv);
offset = (unsigned long)zv & ~PAGE_MASK;
local_irq_save(flags); local_irq_save(flags);
xv_free(xvpool, page, offset); zs_free(pool, handle);
local_irq_restore(flags); local_irq_restore(flags);
} }
static void zv_decompress(struct page *page, struct zv_hdr *zv) static void zv_decompress(struct page *page, void *handle)
{ {
unsigned int clen = PAGE_SIZE; unsigned int clen = PAGE_SIZE;
char *to_va; char *to_va;
unsigned size;
int ret; int ret;
struct zv_hdr *zv;
zv = zs_map_object(zcache_host.zspool, handle);
BUG_ON(zv->size == 0);
ASSERT_SENTINEL(zv, ZVH); ASSERT_SENTINEL(zv, ZVH);
size = xv_get_object_size(zv) - sizeof(*zv);
BUG_ON(size == 0);
to_va = kmap_atomic(page, KM_USER0); to_va = kmap_atomic(page, KM_USER0);
ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, (char *)zv + sizeof(*zv), ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, (char *)zv + sizeof(*zv),
size, to_va, &clen); zv->size, to_va, &clen);
kunmap_atomic(to_va, KM_USER0); kunmap_atomic(to_va, KM_USER0);
zs_unmap_object(zcache_host.zspool, handle);
BUG_ON(ret); BUG_ON(ret);
BUG_ON(clen != PAGE_SIZE); BUG_ON(clen != PAGE_SIZE);
} }
...@@ -984,8 +987,8 @@ int zcache_new_client(uint16_t cli_id) ...@@ -984,8 +987,8 @@ int zcache_new_client(uint16_t cli_id)
goto out; goto out;
cli->allocated = 1; cli->allocated = 1;
#ifdef CONFIG_FRONTSWAP #ifdef CONFIG_FRONTSWAP
cli->xvpool = xv_create_pool(); cli->zspool = zs_create_pool("zcache", ZCACHE_GFP_MASK);
if (cli->xvpool == NULL) if (cli->zspool == NULL)
goto out; goto out;
#endif #endif
ret = 0; ret = 0;
...@@ -1216,7 +1219,7 @@ static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph, ...@@ -1216,7 +1219,7 @@ static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
} }
/* reject if mean compression is too poor */ /* reject if mean compression is too poor */
if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) { if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) {
total_zsize = xv_get_total_size_bytes(cli->xvpool); total_zsize = zs_get_total_size_bytes(cli->zspool);
zv_mean_zsize = div_u64(total_zsize, zv_mean_zsize = div_u64(total_zsize,
curr_pers_pampd_count); curr_pers_pampd_count);
if (zv_mean_zsize > zv_max_mean_zsize) { if (zv_mean_zsize > zv_max_mean_zsize) {
...@@ -1224,7 +1227,7 @@ static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph, ...@@ -1224,7 +1227,7 @@ static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph,
goto out; goto out;
} }
} }
pampd = (void *)zv_create(cli->xvpool, pool->pool_id, pampd = (void *)zv_create(cli->zspool, pool->pool_id,
oid, index, cdata, clen); oid, index, cdata, clen);
if (pampd == NULL) if (pampd == NULL)
goto out; goto out;
...@@ -1282,7 +1285,7 @@ static void zcache_pampd_free(void *pampd, struct tmem_pool *pool, ...@@ -1282,7 +1285,7 @@ static void zcache_pampd_free(void *pampd, struct tmem_pool *pool,
atomic_dec(&zcache_curr_eph_pampd_count); atomic_dec(&zcache_curr_eph_pampd_count);
BUG_ON(atomic_read(&zcache_curr_eph_pampd_count) < 0); BUG_ON(atomic_read(&zcache_curr_eph_pampd_count) < 0);
} else { } else {
zv_free(cli->xvpool, (struct zv_hdr *)pampd); zv_free(cli->zspool, pampd);
atomic_dec(&zcache_curr_pers_pampd_count); atomic_dec(&zcache_curr_pers_pampd_count);
BUG_ON(atomic_read(&zcache_curr_pers_pampd_count) < 0); BUG_ON(atomic_read(&zcache_curr_pers_pampd_count) < 0);
} }
...@@ -2072,7 +2075,7 @@ static int __init zcache_init(void) ...@@ -2072,7 +2075,7 @@ static int __init zcache_init(void)
old_ops = zcache_frontswap_register_ops(); old_ops = zcache_frontswap_register_ops();
pr_info("zcache: frontswap enabled using kernel " pr_info("zcache: frontswap enabled using kernel "
"transcendent memory and xvmalloc\n"); "transcendent memory and zsmalloc\n");
if (old_ops.init != NULL) if (old_ops.init != NULL)
pr_warning("zcache: frontswap_ops overridden"); pr_warning("zcache: frontswap_ops overridden");
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment