Commit 22f7c2f8 authored by Corentin Labbe's avatar Corentin Labbe Committed by Herbert Xu

crypto: sun8i-ce - do not allocate memory when handling requests

Instead of allocate memory on each requests, it is easier to
pre-allocate buffer for IV.
This made error path easier.
Signed-off-by: default avatarCorentin Labbe <clabbe@baylibre.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 282ee071
...@@ -152,23 +152,13 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req ...@@ -152,23 +152,13 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
ivsize = crypto_skcipher_ivsize(tfm); ivsize = crypto_skcipher_ivsize(tfm);
if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
rctx->ivlen = ivsize; rctx->ivlen = ivsize;
rctx->bounce_iv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA);
if (!rctx->bounce_iv) {
err = -ENOMEM;
goto theend_key;
}
if (rctx->op_dir & CE_DECRYPTION) { if (rctx->op_dir & CE_DECRYPTION) {
rctx->backup_iv = kzalloc(ivsize, GFP_KERNEL);
if (!rctx->backup_iv) {
err = -ENOMEM;
goto theend_key;
}
offset = areq->cryptlen - ivsize; offset = areq->cryptlen - ivsize;
scatterwalk_map_and_copy(rctx->backup_iv, areq->src, scatterwalk_map_and_copy(chan->backup_iv, areq->src,
offset, ivsize, 0); offset, ivsize, 0);
} }
memcpy(rctx->bounce_iv, areq->iv, ivsize); memcpy(chan->bounce_iv, areq->iv, ivsize);
rctx->addr_iv = dma_map_single(ce->dev, rctx->bounce_iv, rctx->ivlen, rctx->addr_iv = dma_map_single(ce->dev, chan->bounce_iv, rctx->ivlen,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(ce->dev, rctx->addr_iv)) { if (dma_mapping_error(ce->dev, rctx->addr_iv)) {
dev_err(ce->dev, "Cannot DMA MAP IV\n"); dev_err(ce->dev, "Cannot DMA MAP IV\n");
...@@ -257,16 +247,15 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req ...@@ -257,16 +247,15 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE); dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
offset = areq->cryptlen - ivsize; offset = areq->cryptlen - ivsize;
if (rctx->op_dir & CE_DECRYPTION) { if (rctx->op_dir & CE_DECRYPTION) {
memcpy(areq->iv, rctx->backup_iv, ivsize); memcpy(areq->iv, chan->backup_iv, ivsize);
kfree_sensitive(rctx->backup_iv); memzero_explicit(chan->backup_iv, ivsize);
} else { } else {
scatterwalk_map_and_copy(areq->iv, areq->dst, offset, scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
ivsize, 0); ivsize, 0);
} }
kfree(rctx->bounce_iv); memzero_explicit(chan->bounce_iv, ivsize);
} }
theend_key:
dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE); dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
theend: theend:
...@@ -322,13 +311,13 @@ static int sun8i_ce_cipher_unprepare(struct crypto_engine *engine, void *async_r ...@@ -322,13 +311,13 @@ static int sun8i_ce_cipher_unprepare(struct crypto_engine *engine, void *async_r
dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE); dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
offset = areq->cryptlen - ivsize; offset = areq->cryptlen - ivsize;
if (rctx->op_dir & CE_DECRYPTION) { if (rctx->op_dir & CE_DECRYPTION) {
memcpy(areq->iv, rctx->backup_iv, ivsize); memcpy(areq->iv, chan->backup_iv, ivsize);
kfree_sensitive(rctx->backup_iv); memzero_explicit(chan->backup_iv, ivsize);
} else { } else {
scatterwalk_map_and_copy(areq->iv, areq->dst, offset, scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
ivsize, 0); ivsize, 0);
} }
kfree(rctx->bounce_iv); memzero_explicit(chan->bounce_iv, ivsize);
} }
dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE); dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
......
...@@ -283,7 +283,7 @@ static struct sun8i_ce_alg_template ce_algs[] = { ...@@ -283,7 +283,7 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_priority = 400, .cra_priority = 400,
.cra_blocksize = AES_BLOCK_SIZE, .cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK, CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
...@@ -310,7 +310,7 @@ static struct sun8i_ce_alg_template ce_algs[] = { ...@@ -310,7 +310,7 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_priority = 400, .cra_priority = 400,
.cra_blocksize = AES_BLOCK_SIZE, .cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK, CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
...@@ -336,7 +336,7 @@ static struct sun8i_ce_alg_template ce_algs[] = { ...@@ -336,7 +336,7 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_priority = 400, .cra_priority = 400,
.cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK, CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
...@@ -363,7 +363,7 @@ static struct sun8i_ce_alg_template ce_algs[] = { ...@@ -363,7 +363,7 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_priority = 400, .cra_priority = 400,
.cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK, CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx),
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
...@@ -673,6 +673,18 @@ static int sun8i_ce_allocate_chanlist(struct sun8i_ce_dev *ce) ...@@ -673,6 +673,18 @@ static int sun8i_ce_allocate_chanlist(struct sun8i_ce_dev *ce)
err = -ENOMEM; err = -ENOMEM;
goto error_engine; goto error_engine;
} }
ce->chanlist[i].bounce_iv = devm_kmalloc(ce->dev, AES_BLOCK_SIZE,
GFP_KERNEL | GFP_DMA);
if (!ce->chanlist[i].bounce_iv) {
err = -ENOMEM;
goto error_engine;
}
ce->chanlist[i].backup_iv = devm_kmalloc(ce->dev, AES_BLOCK_SIZE,
GFP_KERNEL);
if (!ce->chanlist[i].backup_iv) {
err = -ENOMEM;
goto error_engine;
}
} }
return 0; return 0;
error_engine: error_engine:
......
...@@ -186,6 +186,8 @@ struct ce_task { ...@@ -186,6 +186,8 @@ struct ce_task {
* @status: set to 1 by interrupt if task is done * @status: set to 1 by interrupt if task is done
* @t_phy: Physical address of task * @t_phy: Physical address of task
* @tl: pointer to the current ce_task for this flow * @tl: pointer to the current ce_task for this flow
* @backup_iv: buffer which contain the next IV to store
* @bounce_iv: buffer which contain the IV
* @stat_req: number of request done by this flow * @stat_req: number of request done by this flow
*/ */
struct sun8i_ce_flow { struct sun8i_ce_flow {
...@@ -195,6 +197,8 @@ struct sun8i_ce_flow { ...@@ -195,6 +197,8 @@ struct sun8i_ce_flow {
dma_addr_t t_phy; dma_addr_t t_phy;
int timeout; int timeout;
struct ce_task *tl; struct ce_task *tl;
void *backup_iv;
void *bounce_iv;
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
unsigned long stat_req; unsigned long stat_req;
#endif #endif
...@@ -241,8 +245,6 @@ struct sun8i_ce_dev { ...@@ -241,8 +245,6 @@ struct sun8i_ce_dev {
* struct sun8i_cipher_req_ctx - context for a skcipher request * struct sun8i_cipher_req_ctx - context for a skcipher request
* @op_dir: direction (encrypt vs decrypt) for this request * @op_dir: direction (encrypt vs decrypt) for this request
* @flow: the flow to use for this request * @flow: the flow to use for this request
* @backup_iv: buffer which contain the next IV to store
* @bounce_iv: buffer which contain the IV
* @ivlen: size of bounce_iv * @ivlen: size of bounce_iv
* @nr_sgs: The number of source SG (as given by dma_map_sg()) * @nr_sgs: The number of source SG (as given by dma_map_sg())
* @nr_sgd: The number of destination SG (as given by dma_map_sg()) * @nr_sgd: The number of destination SG (as given by dma_map_sg())
...@@ -253,8 +255,6 @@ struct sun8i_ce_dev { ...@@ -253,8 +255,6 @@ struct sun8i_ce_dev {
struct sun8i_cipher_req_ctx { struct sun8i_cipher_req_ctx {
u32 op_dir; u32 op_dir;
int flow; int flow;
void *backup_iv;
void *bounce_iv;
unsigned int ivlen; unsigned int ivlen;
int nr_sgs; int nr_sgs;
int nr_sgd; int nr_sgd;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment