Commit 6a559ecd authored by Sergey Senozhatsky's avatar Sergey Senozhatsky Committed by Andrew Morton

zram: add dictionary support to zstd backend

This adds support for pre-trained zstd dictionaries [1] Dictionary is
setup in params once (per-comp) and loaded to Cctx and Dctx by reference,
so we don't allocate extra memory.

TEST
====

*** zstd
/sys/block/zram0/mm_stat
1750654976 504565092 514203648        0 514203648        1        0    34204    34204

*** zstd dict=/etc/zstd-dict-amd64
/sys/block/zram0/mm_stat
1750638592 465851259 475373568        0 475373568        1        0    34185    34185

*** zstd level=8 dict=/etc/zstd-dict-amd64
/sys/block/zram0/mm_stat
1750642688 430765171 439955456        0 439955456        1        0    34185    34185

[1] https://github.com/facebook/zstd/blob/dev/programs/zstd.1.md#dictionary-builder

Link: https://lkml.kernel.org/r/20240902105656.1383858-23-senozhatsky@chromium.orgSigned-off-by: default avatarSergey Senozhatsky <senozhatsky@chromium.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nick Terrell <terrelln@fb.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 1e673c8c
......@@ -15,29 +15,87 @@ struct zstd_ctx {
};
struct zstd_params {
zstd_custom_mem custom_mem;
zstd_cdict *cdict;
zstd_ddict *ddict;
zstd_parameters cprm;
};
/*
* For C/D dictionaries we need to provide zstd with zstd_custom_mem,
* which zstd uses internally to allocate/free memory when needed.
*
* This means that allocator.customAlloc() can be called from zcomp_compress()
* under local-lock (per-CPU compression stream), in which case we must use
* GFP_ATOMIC.
*
* Another complication here is that we can be configured as a swap device.
*/
static void *zstd_custom_alloc(void *opaque, size_t size)
{
if (!preemptible())
return kvzalloc(size, GFP_ATOMIC);
return kvzalloc(size, __GFP_KSWAPD_RECLAIM | __GFP_NOWARN);
}
static void zstd_custom_free(void *opaque, void *address)
{
kvfree(address);
}
static void zstd_release_params(struct zcomp_params *params)
{
kfree(params->drv_data);
struct zstd_params *zp = params->drv_data;
params->drv_data = NULL;
if (!zp)
return;
zstd_free_cdict(zp->cdict);
zstd_free_ddict(zp->ddict);
kfree(zp);
}
static int zstd_setup_params(struct zcomp_params *params)
{
zstd_compression_parameters prm;
struct zstd_params *zp;
zp = kzalloc(sizeof(*zp), GFP_KERNEL);
if (!zp)
return -ENOMEM;
params->drv_data = zp;
if (params->level == ZCOMP_PARAM_NO_LEVEL)
params->level = zstd_default_clevel();
zp->cprm = zstd_get_params(params->level, PAGE_SIZE);
params->drv_data = zp;
zp->custom_mem.customAlloc = zstd_custom_alloc;
zp->custom_mem.customFree = zstd_custom_free;
prm = zstd_get_cparams(params->level, PAGE_SIZE,
params->dict_sz);
zp->cdict = zstd_create_cdict_byreference(params->dict,
params->dict_sz,
prm,
zp->custom_mem);
if (!zp->cdict)
goto error;
zp->ddict = zstd_create_ddict_byreference(params->dict,
params->dict_sz,
zp->custom_mem);
if (!zp->ddict)
goto error;
return 0;
error:
zstd_release_params(params);
return -EINVAL;
}
static void zstd_destroy(struct zcomp_ctx *ctx)
......@@ -47,8 +105,23 @@ static void zstd_destroy(struct zcomp_ctx *ctx)
if (!zctx)
return;
vfree(zctx->cctx_mem);
vfree(zctx->dctx_mem);
/*
* If ->cctx_mem and ->dctx_mem were allocated then we didn't use
* C/D dictionary and ->cctx / ->dctx were "embedded" into these
* buffers.
*
* If otherwise then we need to explicitly release ->cctx / ->dctx.
*/
if (zctx->cctx_mem)
vfree(zctx->cctx_mem);
else
zstd_free_cctx(zctx->cctx);
if (zctx->dctx_mem)
vfree(zctx->dctx_mem);
else
zstd_free_dctx(zctx->dctx);
kfree(zctx);
}
......@@ -63,28 +136,41 @@ static int zstd_create(struct zcomp_params *params, struct zcomp_ctx *ctx)
return -ENOMEM;
ctx->context = zctx;
prm = zstd_get_params(params->level, PAGE_SIZE);
sz = zstd_cctx_workspace_bound(&prm.cParams);
zctx->cctx_mem = vzalloc(sz);
if (!zctx->cctx_mem)
goto error;
zctx->cctx = zstd_init_cctx(zctx->cctx_mem, sz);
if (!zctx->cctx)
goto error;
sz = zstd_dctx_workspace_bound();
zctx->dctx_mem = vzalloc(sz);
if (!zctx->dctx_mem)
goto error;
zctx->dctx = zstd_init_dctx(zctx->dctx_mem, sz);
if (!zctx->dctx)
goto error;
if (params->dict_sz == 0) {
prm = zstd_get_params(params->level, PAGE_SIZE);
sz = zstd_cctx_workspace_bound(&prm.cParams);
zctx->cctx_mem = vzalloc(sz);
if (!zctx->cctx_mem)
goto error;
zctx->cctx = zstd_init_cctx(zctx->cctx_mem, sz);
if (!zctx->cctx)
goto error;
sz = zstd_dctx_workspace_bound();
zctx->dctx_mem = vzalloc(sz);
if (!zctx->dctx_mem)
goto error;
zctx->dctx = zstd_init_dctx(zctx->dctx_mem, sz);
if (!zctx->dctx)
goto error;
} else {
struct zstd_params *zp = params->drv_data;
zctx->cctx = zstd_create_cctx_advanced(zp->custom_mem);
if (!zctx->cctx)
goto error;
zctx->dctx = zstd_create_dctx_advanced(zp->custom_mem);
if (!zctx->dctx)
goto error;
}
return 0;
error:
zstd_release_params(params);
zstd_destroy(ctx);
return -EINVAL;
}
......@@ -96,8 +182,14 @@ static int zstd_compress(struct zcomp_params *params, struct zcomp_ctx *ctx,
struct zstd_ctx *zctx = ctx->context;
size_t ret;
ret = zstd_compress_cctx(zctx->cctx, req->dst, req->dst_len,
req->src, req->src_len, &zp->cprm);
if (params->dict_sz == 0)
ret = zstd_compress_cctx(zctx->cctx, req->dst, req->dst_len,
req->src, req->src_len, &zp->cprm);
else
ret = zstd_compress_using_cdict(zctx->cctx, req->dst,
req->dst_len, req->src,
req->src_len,
zp->cdict);
if (zstd_is_error(ret))
return -EINVAL;
req->dst_len = ret;
......@@ -107,11 +199,17 @@ static int zstd_compress(struct zcomp_params *params, struct zcomp_ctx *ctx,
static int zstd_decompress(struct zcomp_params *params, struct zcomp_ctx *ctx,
struct zcomp_req *req)
{
struct zstd_params *zp = params->drv_data;
struct zstd_ctx *zctx = ctx->context;
size_t ret;
ret = zstd_decompress_dctx(zctx->dctx, req->dst, req->dst_len,
req->src, req->src_len);
if (params->dict_sz == 0)
ret = zstd_decompress_dctx(zctx->dctx, req->dst, req->dst_len,
req->src, req->src_len);
else
ret = zstd_decompress_using_ddict(zctx->dctx, req->dst,
req->dst_len, req->src,
req->src_len, zp->ddict);
if (zstd_is_error(ret))
return -EINVAL;
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment