Commit ed19f192 authored by Sebastian Andrzej Siewior's avatar Sebastian Andrzej Siewior Committed by Ingo Molnar

zram: Allocate struct zcomp_strm as per-CPU memory

zcomp::stream is a per-CPU pointer, pointing to struct zcomp_strm
which contains two pointers. Having struct zcomp_strm allocated
directly as per-CPU memory would avoid one additional memory
allocation and a pointer dereference. This also simplifies the
addition of a local_lock to struct zcomp_strm.

Allocate zcomp::stream directly as per-CPU memory.
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Acked-by: default avatarPeter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20200527201119.1692513-7-bigeasy@linutronix.de
parent 3e92fd7b
...@@ -37,19 +37,16 @@ static void zcomp_strm_free(struct zcomp_strm *zstrm) ...@@ -37,19 +37,16 @@ static void zcomp_strm_free(struct zcomp_strm *zstrm)
if (!IS_ERR_OR_NULL(zstrm->tfm)) if (!IS_ERR_OR_NULL(zstrm->tfm))
crypto_free_comp(zstrm->tfm); crypto_free_comp(zstrm->tfm);
free_pages((unsigned long)zstrm->buffer, 1); free_pages((unsigned long)zstrm->buffer, 1);
kfree(zstrm); zstrm->tfm = NULL;
zstrm->buffer = NULL;
} }
/* /*
* allocate new zcomp_strm structure with ->tfm initialized by * Initialize zcomp_strm structure with ->tfm initialized by backend, and
* backend, return NULL on error * ->buffer. Return a negative value on error.
*/ */
static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp) static int zcomp_strm_init(struct zcomp_strm *zstrm, struct zcomp *comp)
{ {
struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), GFP_KERNEL);
if (!zstrm)
return NULL;
zstrm->tfm = crypto_alloc_comp(comp->name, 0, 0); zstrm->tfm = crypto_alloc_comp(comp->name, 0, 0);
/* /*
* allocate 2 pages. 1 for compressed data, plus 1 extra for the * allocate 2 pages. 1 for compressed data, plus 1 extra for the
...@@ -58,9 +55,9 @@ static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp) ...@@ -58,9 +55,9 @@ static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp)
zstrm->buffer = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1); zstrm->buffer = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
if (IS_ERR_OR_NULL(zstrm->tfm) || !zstrm->buffer) { if (IS_ERR_OR_NULL(zstrm->tfm) || !zstrm->buffer) {
zcomp_strm_free(zstrm); zcomp_strm_free(zstrm);
zstrm = NULL; return -ENOMEM;
} }
return zstrm; return 0;
} }
bool zcomp_available_algorithm(const char *comp) bool zcomp_available_algorithm(const char *comp)
...@@ -113,7 +110,7 @@ ssize_t zcomp_available_show(const char *comp, char *buf) ...@@ -113,7 +110,7 @@ ssize_t zcomp_available_show(const char *comp, char *buf)
struct zcomp_strm *zcomp_stream_get(struct zcomp *comp) struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
{ {
return *get_cpu_ptr(comp->stream); return get_cpu_ptr(comp->stream);
} }
void zcomp_stream_put(struct zcomp *comp) void zcomp_stream_put(struct zcomp *comp)
...@@ -159,17 +156,13 @@ int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node) ...@@ -159,17 +156,13 @@ int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
{ {
struct zcomp *comp = hlist_entry(node, struct zcomp, node); struct zcomp *comp = hlist_entry(node, struct zcomp, node);
struct zcomp_strm *zstrm; struct zcomp_strm *zstrm;
int ret;
if (WARN_ON(*per_cpu_ptr(comp->stream, cpu))) zstrm = per_cpu_ptr(comp->stream, cpu);
return 0; ret = zcomp_strm_init(zstrm, comp);
if (ret)
zstrm = zcomp_strm_alloc(comp);
if (IS_ERR_OR_NULL(zstrm)) {
pr_err("Can't allocate a compression stream\n"); pr_err("Can't allocate a compression stream\n");
return -ENOMEM; return ret;
}
*per_cpu_ptr(comp->stream, cpu) = zstrm;
return 0;
} }
int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node) int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
...@@ -177,10 +170,8 @@ int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node) ...@@ -177,10 +170,8 @@ int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
struct zcomp *comp = hlist_entry(node, struct zcomp, node); struct zcomp *comp = hlist_entry(node, struct zcomp, node);
struct zcomp_strm *zstrm; struct zcomp_strm *zstrm;
zstrm = *per_cpu_ptr(comp->stream, cpu); zstrm = per_cpu_ptr(comp->stream, cpu);
if (!IS_ERR_OR_NULL(zstrm)) zcomp_strm_free(zstrm);
zcomp_strm_free(zstrm);
*per_cpu_ptr(comp->stream, cpu) = NULL;
return 0; return 0;
} }
...@@ -188,7 +179,7 @@ static int zcomp_init(struct zcomp *comp) ...@@ -188,7 +179,7 @@ static int zcomp_init(struct zcomp *comp)
{ {
int ret; int ret;
comp->stream = alloc_percpu(struct zcomp_strm *); comp->stream = alloc_percpu(struct zcomp_strm);
if (!comp->stream) if (!comp->stream)
return -ENOMEM; return -ENOMEM;
......
...@@ -14,7 +14,7 @@ struct zcomp_strm { ...@@ -14,7 +14,7 @@ struct zcomp_strm {
/* dynamic per-device compression frontend */ /* dynamic per-device compression frontend */
struct zcomp { struct zcomp {
struct zcomp_strm * __percpu *stream; struct zcomp_strm __percpu *stream;
const char *name; const char *name;
struct hlist_node node; struct hlist_node node;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment