Commit 835f2f51 authored by Dan Magenheimer's avatar Dan Magenheimer Committed by Linus Torvalds

staging: zcache: enable zcache to be built/loaded as a module

Allow zcache to be built/loaded as a module.  Note runtime dependency
disallows loading if cleancache/frontswap lazy initialization patches
are not present.  Zsmalloc support has not yet been merged into zcache
but, once merged, could now easily be selected via a module_param.

If built-in (not built as a module), the original mechanism of enabling
via a kernel boot parameter is retained, but this should be considered
deprecated.

Note that module unload is explicitly not yet supported.
Signed-off-by: default avatarDan Magenheimer <dan.magenheimer@oracle.com>
[v1: Rebased with different order of patches]
[v2: Removed [CLEANCACHE|FRONTSWAP]_HAS_LAZY_INIT ifdef]
[v3: Rebased on top of ramster->zcache move]
[v4: Redid the Makefile]
[v5: s/ZCACHE2/ZCACHE/]
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: default avatarBob Liu <lliubbo@gmail.com>
Cc: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Cc: Andor Daam <andor.daam@googlemail.com>
Cc: Florian Schmaus <fschmaus@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Stefan Hengelein <ilendir@googlemail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 1ac37bee
config ZCACHE config ZCACHE
bool "Dynamic compression of swap pages and clean pagecache pages" tristate "Dynamic compression of swap pages and clean pagecache pages"
depends on CRYPTO=y && SWAP=y && CLEANCACHE && FRONTSWAP depends on CRYPTO=y && SWAP=y && CLEANCACHE && FRONTSWAP
select CRYPTO_LZO select CRYPTO_LZO
default n default n
...@@ -19,8 +19,8 @@ config ZCACHE_DEBUG ...@@ -19,8 +19,8 @@ config ZCACHE_DEBUG
how zcache is doing. You probably want to set this to 'N'. how zcache is doing. You probably want to set this to 'N'.
config RAMSTER config RAMSTER
bool "Cross-machine RAM capacity sharing, aka peer-to-peer tmem" tristate "Cross-machine RAM capacity sharing, aka peer-to-peer tmem"
depends on CONFIGFS_FS=y && SYSFS=y && !HIGHMEM && ZCACHE=y depends on CONFIGFS_FS=y && SYSFS=y && !HIGHMEM && ZCACHE
depends on NET depends on NET
# must ensure struct page is 8-byte aligned # must ensure struct page is 8-byte aligned
select HAVE_ALIGNED_STRUCT_PAGE if !64BIT select HAVE_ALIGNED_STRUCT_PAGE if !64BIT
......
...@@ -35,7 +35,8 @@ ...@@ -35,7 +35,8 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#ifdef CONFIG_RAMSTER #include <linux/export.h>
#if defined(CONFIG_RAMSTER) || defined(CONFIG_RAMSTER_MODULE)
#include <linux/delay.h> #include <linux/delay.h>
#endif #endif
...@@ -641,6 +642,7 @@ void *tmem_localify_get_pampd(struct tmem_pool *pool, struct tmem_oid *oidp, ...@@ -641,6 +642,7 @@ void *tmem_localify_get_pampd(struct tmem_pool *pool, struct tmem_oid *oidp,
/* note, hashbucket remains locked */ /* note, hashbucket remains locked */
return pampd; return pampd;
} }
EXPORT_SYMBOL_GPL(tmem_localify_get_pampd);
void tmem_localify_finish(struct tmem_obj *obj, uint32_t index, void tmem_localify_finish(struct tmem_obj *obj, uint32_t index,
void *pampd, void *saved_hb, bool delete) void *pampd, void *saved_hb, bool delete)
...@@ -658,6 +660,7 @@ void tmem_localify_finish(struct tmem_obj *obj, uint32_t index, ...@@ -658,6 +660,7 @@ void tmem_localify_finish(struct tmem_obj *obj, uint32_t index,
} }
spin_unlock(&hb->lock); spin_unlock(&hb->lock);
} }
EXPORT_SYMBOL_GPL(tmem_localify_finish);
/* /*
* For ramster only. Helper function to support asynchronous tmem_get. * For ramster only. Helper function to support asynchronous tmem_get.
...@@ -719,6 +722,7 @@ int tmem_replace(struct tmem_pool *pool, struct tmem_oid *oidp, ...@@ -719,6 +722,7 @@ int tmem_replace(struct tmem_pool *pool, struct tmem_oid *oidp,
spin_unlock(&hb->lock); spin_unlock(&hb->lock);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(tmem_replace);
#endif #endif
/* /*
......
...@@ -126,7 +126,7 @@ static inline unsigned tmem_oid_hash(struct tmem_oid *oidp) ...@@ -126,7 +126,7 @@ static inline unsigned tmem_oid_hash(struct tmem_oid *oidp)
TMEM_HASH_BUCKET_BITS); TMEM_HASH_BUCKET_BITS);
} }
#ifdef CONFIG_RAMSTER #if defined(CONFIG_RAMSTER) || defined(CONFIG_RAMSTER_MODULE)
struct tmem_xhandle { struct tmem_xhandle {
uint8_t client_id; uint8_t client_id;
uint8_t xh_data_cksum; uint8_t xh_data_cksum;
...@@ -171,7 +171,7 @@ struct tmem_obj { ...@@ -171,7 +171,7 @@ struct tmem_obj {
unsigned int objnode_tree_height; unsigned int objnode_tree_height;
unsigned long objnode_count; unsigned long objnode_count;
long pampd_count; long pampd_count;
#ifdef CONFIG_RAMSTER #if defined(CONFIG_RAMSTER) || defined(CONFIG_RAMSTER_MODULE)
/* /*
* for current design of ramster, all pages belonging to * for current design of ramster, all pages belonging to
* an object reside on the same remotenode and extra is * an object reside on the same remotenode and extra is
...@@ -215,7 +215,7 @@ struct tmem_pamops { ...@@ -215,7 +215,7 @@ struct tmem_pamops {
uint32_t); uint32_t);
void (*free)(void *, struct tmem_pool *, void (*free)(void *, struct tmem_pool *,
struct tmem_oid *, uint32_t, bool); struct tmem_oid *, uint32_t, bool);
#ifdef CONFIG_RAMSTER #if defined(CONFIG_RAMSTER) || defined(CONFIG_RAMSTER_MODULE)
void (*new_obj)(struct tmem_obj *); void (*new_obj)(struct tmem_obj *);
void (*free_obj)(struct tmem_pool *, struct tmem_obj *, bool); void (*free_obj)(struct tmem_pool *, struct tmem_obj *, bool);
void *(*repatriate_preload)(void *, struct tmem_pool *, void *(*repatriate_preload)(void *, struct tmem_pool *,
...@@ -247,7 +247,7 @@ extern int tmem_flush_page(struct tmem_pool *, struct tmem_oid *, ...@@ -247,7 +247,7 @@ extern int tmem_flush_page(struct tmem_pool *, struct tmem_oid *,
extern int tmem_flush_object(struct tmem_pool *, struct tmem_oid *); extern int tmem_flush_object(struct tmem_pool *, struct tmem_oid *);
extern int tmem_destroy_pool(struct tmem_pool *); extern int tmem_destroy_pool(struct tmem_pool *);
extern void tmem_new_pool(struct tmem_pool *, uint32_t); extern void tmem_new_pool(struct tmem_pool *, uint32_t);
#ifdef CONFIG_RAMSTER #if defined(CONFIG_RAMSTER) || defined(CONFIG_RAMSTER_MODULE)
extern int tmem_replace(struct tmem_pool *, struct tmem_oid *, uint32_t index, extern int tmem_replace(struct tmem_pool *, struct tmem_oid *, uint32_t index,
void *); void *);
extern void *tmem_localify_get_pampd(struct tmem_pool *, struct tmem_oid *, extern void *tmem_localify_get_pampd(struct tmem_pool *, struct tmem_oid *,
......
...@@ -37,8 +37,10 @@ ...@@ -37,8 +37,10 @@
#include "debug.h" #include "debug.h"
#ifdef CONFIG_RAMSTER #ifdef CONFIG_RAMSTER
static bool ramster_enabled __read_mostly; static bool ramster_enabled __read_mostly;
static int disable_frontswap_selfshrink;
#else #else
#define ramster_enabled false #define ramster_enabled false
#define disable_frontswap_selfshrink 0
#endif #endif
#ifndef __PG_WAS_ACTIVE #ifndef __PG_WAS_ACTIVE
...@@ -81,8 +83,12 @@ static char *namestr __read_mostly = "zcache"; ...@@ -81,8 +83,12 @@ static char *namestr __read_mostly = "zcache";
(__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC) (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC)
/* crypto API for zcache */ /* crypto API for zcache */
#ifdef CONFIG_ZCACHE_MODULE
static char *zcache_comp_name = "lzo";
#else
#define ZCACHE_COMP_NAME_SZ CRYPTO_MAX_ALG_NAME #define ZCACHE_COMP_NAME_SZ CRYPTO_MAX_ALG_NAME
static char zcache_comp_name[ZCACHE_COMP_NAME_SZ] __read_mostly; static char zcache_comp_name[ZCACHE_COMP_NAME_SZ] __read_mostly;
#endif
static struct crypto_comp * __percpu *zcache_comp_pcpu_tfms __read_mostly; static struct crypto_comp * __percpu *zcache_comp_pcpu_tfms __read_mostly;
enum comp_op { enum comp_op {
...@@ -1721,6 +1727,7 @@ struct frontswap_ops *zcache_frontswap_register_ops(void) ...@@ -1721,6 +1727,7 @@ struct frontswap_ops *zcache_frontswap_register_ops(void)
* OR NOTHING HAPPENS! * OR NOTHING HAPPENS!
*/ */
#ifndef CONFIG_ZCACHE_MODULE
static int __init enable_zcache(char *s) static int __init enable_zcache(char *s)
{ {
zcache_enabled = true; zcache_enabled = true;
...@@ -1787,18 +1794,27 @@ static int __init enable_zcache_compressor(char *s) ...@@ -1787,18 +1794,27 @@ static int __init enable_zcache_compressor(char *s)
return 1; return 1;
} }
__setup("zcache=", enable_zcache_compressor); __setup("zcache=", enable_zcache_compressor);
#endif
static int __init zcache_comp_init(void) static int zcache_comp_init(void)
{ {
int ret = 0; int ret = 0;
/* check crypto algorithm */ /* check crypto algorithm */
#ifdef CONFIG_ZCACHE_MODULE
ret = crypto_has_comp(zcache_comp_name, 0, 0);
if (!ret) {
ret = -1;
goto out;
}
#else
if (*zcache_comp_name != '\0') { if (*zcache_comp_name != '\0') {
ret = crypto_has_comp(zcache_comp_name, 0, 0); ret = crypto_has_comp(zcache_comp_name, 0, 0);
if (!ret) if (!ret)
pr_info("zcache: %s not supported\n", pr_info("zcache: %s not supported\n",
zcache_comp_name); zcache_comp_name);
goto out;
} }
if (!ret) if (!ret)
strcpy(zcache_comp_name, "lzo"); strcpy(zcache_comp_name, "lzo");
...@@ -1807,6 +1823,7 @@ static int __init zcache_comp_init(void) ...@@ -1807,6 +1823,7 @@ static int __init zcache_comp_init(void)
ret = 1; ret = 1;
goto out; goto out;
} }
#endif
pr_info("zcache: using %s compressor\n", zcache_comp_name); pr_info("zcache: using %s compressor\n", zcache_comp_name);
/* alloc percpu transforms */ /* alloc percpu transforms */
...@@ -1818,10 +1835,13 @@ static int __init zcache_comp_init(void) ...@@ -1818,10 +1835,13 @@ static int __init zcache_comp_init(void)
return ret; return ret;
} }
static int __init zcache_init(void) static int zcache_init(void)
{ {
int ret = 0; int ret = 0;
#ifdef CONFIG_ZCACHE_MODULE
zcache_enabled = 1;
#endif
if (ramster_enabled) { if (ramster_enabled) {
namestr = "ramster"; namestr = "ramster";
ramster_register_pamops(&zcache_pamops); ramster_register_pamops(&zcache_pamops);
...@@ -1894,9 +1914,28 @@ static int __init zcache_init(void) ...@@ -1894,9 +1914,28 @@ static int __init zcache_init(void)
} }
if (ramster_enabled) if (ramster_enabled)
ramster_init(!disable_cleancache, !disable_frontswap, ramster_init(!disable_cleancache, !disable_frontswap,
frontswap_has_exclusive_gets, false); frontswap_has_exclusive_gets,
!disable_frontswap_selfshrink);
out: out:
return ret; return ret;
} }
#ifdef CONFIG_ZCACHE_MODULE
#ifdef CONFIG_RAMSTER
module_param(ramster_enabled, int, S_IRUGO);
module_param(disable_frontswap_selfshrink, int, S_IRUGO);
#endif
module_param(disable_cleancache, int, S_IRUGO);
module_param(disable_frontswap, int, S_IRUGO);
#ifdef FRONTSWAP_HAS_EXCLUSIVE_GETS
module_param(frontswap_has_exclusive_gets, bool, S_IRUGO);
#endif
module_param(disable_frontswap_ignore_nonactive, int, S_IRUGO);
module_param(zcache_comp_name, charp, S_IRUGO);
module_init(zcache_init);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dan Magenheimer <dan.magenheimer@oracle.com>");
MODULE_DESCRIPTION("In-kernel compression of cleancache/frontswap pages");
#else
late_initcall(zcache_init); late_initcall(zcache_init);
#endif
...@@ -39,7 +39,7 @@ extern int zcache_flush_page(int, int, struct tmem_oid *, uint32_t); ...@@ -39,7 +39,7 @@ extern int zcache_flush_page(int, int, struct tmem_oid *, uint32_t);
extern int zcache_flush_object(int, int, struct tmem_oid *); extern int zcache_flush_object(int, int, struct tmem_oid *);
extern void zcache_decompress_to_page(char *, unsigned int, struct page *); extern void zcache_decompress_to_page(char *, unsigned int, struct page *);
#ifdef CONFIG_RAMSTER #if defined(CONFIG_RAMSTER) || defined(CONFIG_RAMSTER_MODULE)
extern void *zcache_pampd_create(char *, unsigned int, bool, int, extern void *zcache_pampd_create(char *, unsigned int, bool, int,
struct tmem_handle *); struct tmem_handle *);
int zcache_autocreate_pool(unsigned int cli_id, unsigned int pool_id, bool eph); int zcache_autocreate_pool(unsigned int cli_id, unsigned int pool_id, bool eph);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment