Commit 0605fa0f authored by Corentin Labbe's avatar Corentin Labbe Committed by Herbert Xu

crypto: sun8i-ce - split into prepare/run/unprepare

This patch split the do_one_request into three.
Prepare will handle all DMA mapping and initialisation of the task
structure.
Unprepare will clean all DMA mapping.
And the do_one_request will be limited to just executing the task.
Signed-off-by: default avatarCorentin Labbe <clabbe@baylibre.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent a216f8d5
...@@ -75,8 +75,9 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq) ...@@ -75,8 +75,9 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
return err; return err;
} }
static int sun8i_ce_cipher(struct skcipher_request *areq) static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req)
{ {
struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sun8i_ce_dev *ce = op->ce; struct sun8i_ce_dev *ce = op->ce;
...@@ -87,7 +88,6 @@ static int sun8i_ce_cipher(struct skcipher_request *areq) ...@@ -87,7 +88,6 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
struct ce_task *cet; struct ce_task *cet;
struct scatterlist *sg; struct scatterlist *sg;
unsigned int todo, len, offset, ivsize; unsigned int todo, len, offset, ivsize;
dma_addr_t addr_iv = 0, addr_key = 0;
u32 common, sym; u32 common, sym;
int flow, i; int flow, i;
int nr_sgs = 0; int nr_sgs = 0;
...@@ -140,13 +140,13 @@ static int sun8i_ce_cipher(struct skcipher_request *areq) ...@@ -140,13 +140,13 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
cet->t_sym_ctl = cpu_to_le32(sym); cet->t_sym_ctl = cpu_to_le32(sym);
cet->t_asym_ctl = 0; cet->t_asym_ctl = 0;
addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE); rctx->addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE);
cet->t_key = cpu_to_le32(addr_key); if (dma_mapping_error(ce->dev, rctx->addr_key)) {
if (dma_mapping_error(ce->dev, addr_key)) {
dev_err(ce->dev, "Cannot DMA MAP KEY\n"); dev_err(ce->dev, "Cannot DMA MAP KEY\n");
err = -EFAULT; err = -EFAULT;
goto theend; goto theend;
} }
cet->t_key = cpu_to_le32(rctx->addr_key);
ivsize = crypto_skcipher_ivsize(tfm); ivsize = crypto_skcipher_ivsize(tfm);
if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
...@@ -167,14 +167,14 @@ static int sun8i_ce_cipher(struct skcipher_request *areq) ...@@ -167,14 +167,14 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
offset, ivsize, 0); offset, ivsize, 0);
} }
memcpy(rctx->bounce_iv, areq->iv, ivsize); memcpy(rctx->bounce_iv, areq->iv, ivsize);
addr_iv = dma_map_single(ce->dev, rctx->bounce_iv, rctx->ivlen, rctx->addr_iv = dma_map_single(ce->dev, rctx->bounce_iv, rctx->ivlen,
DMA_TO_DEVICE); DMA_TO_DEVICE);
cet->t_iv = cpu_to_le32(addr_iv); if (dma_mapping_error(ce->dev, rctx->addr_iv)) {
if (dma_mapping_error(ce->dev, addr_iv)) {
dev_err(ce->dev, "Cannot DMA MAP IV\n"); dev_err(ce->dev, "Cannot DMA MAP IV\n");
err = -ENOMEM; err = -ENOMEM;
goto theend_iv; goto theend_iv;
} }
cet->t_iv = cpu_to_le32(rctx->addr_iv);
} }
if (areq->src == areq->dst) { if (areq->src == areq->dst) {
...@@ -234,7 +234,9 @@ static int sun8i_ce_cipher(struct skcipher_request *areq) ...@@ -234,7 +234,9 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
} }
chan->timeout = areq->cryptlen; chan->timeout = areq->cryptlen;
err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm)); rctx->nr_sgs = nr_sgs;
rctx->nr_sgd = nr_sgd;
return 0;
theend_sgs: theend_sgs:
if (areq->src == areq->dst) { if (areq->src == areq->dst) {
...@@ -247,9 +249,8 @@ static int sun8i_ce_cipher(struct skcipher_request *areq) ...@@ -247,9 +249,8 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
theend_iv: theend_iv:
if (areq->iv && ivsize > 0) { if (areq->iv && ivsize > 0) {
if (addr_iv) if (rctx->addr_iv)
dma_unmap_single(ce->dev, addr_iv, rctx->ivlen, dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
DMA_TO_DEVICE);
offset = areq->cryptlen - ivsize; offset = areq->cryptlen - ivsize;
if (rctx->op_dir & CE_DECRYPTION) { if (rctx->op_dir & CE_DECRYPTION) {
memcpy(areq->iv, rctx->backup_iv, ivsize); memcpy(areq->iv, rctx->backup_iv, ivsize);
...@@ -262,19 +263,69 @@ static int sun8i_ce_cipher(struct skcipher_request *areq) ...@@ -262,19 +263,69 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
} }
theend_key: theend_key:
dma_unmap_single(ce->dev, addr_key, op->keylen, DMA_TO_DEVICE); dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
theend: theend:
return err; return err;
} }
static int sun8i_ce_handle_cipher_request(struct crypto_engine *engine, void *areq) static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
{ {
int err;
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sun8i_ce_dev *ce = op->ce;
struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
int flow, err;
err = sun8i_ce_cipher(breq); flow = rctx->flow;
err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
crypto_finalize_skcipher_request(engine, breq, err); crypto_finalize_skcipher_request(engine, breq, err);
return 0;
}
static int sun8i_ce_cipher_unprepare(struct crypto_engine *engine, void *async_req)
{
struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sun8i_ce_dev *ce = op->ce;
struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
struct sun8i_ce_flow *chan;
struct ce_task *cet;
unsigned int ivsize, offset;
int nr_sgs = rctx->nr_sgs;
int nr_sgd = rctx->nr_sgd;
int flow;
flow = rctx->flow;
chan = &ce->chanlist[flow];
cet = chan->tl;
ivsize = crypto_skcipher_ivsize(tfm);
if (areq->src == areq->dst) {
dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
} else {
if (nr_sgs > 0)
dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
}
if (areq->iv && ivsize > 0) {
if (cet->t_iv)
dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
offset = areq->cryptlen - ivsize;
if (rctx->op_dir & CE_DECRYPTION) {
memcpy(areq->iv, rctx->backup_iv, ivsize);
kfree_sensitive(rctx->backup_iv);
} else {
scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
ivsize, 0);
}
kfree(rctx->bounce_iv);
}
dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
return 0; return 0;
} }
...@@ -346,9 +397,9 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm) ...@@ -346,9 +397,9 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
crypto_tfm_alg_driver_name(&sktfm->base), crypto_tfm_alg_driver_name(&sktfm->base),
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm))); crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
op->enginectx.op.do_one_request = sun8i_ce_handle_cipher_request; op->enginectx.op.do_one_request = sun8i_ce_cipher_run;
op->enginectx.op.prepare_request = NULL; op->enginectx.op.prepare_request = sun8i_ce_cipher_prepare;
op->enginectx.op.unprepare_request = NULL; op->enginectx.op.unprepare_request = sun8i_ce_cipher_unprepare;
err = pm_runtime_get_sync(op->ce->dev); err = pm_runtime_get_sync(op->ce->dev);
if (err < 0) if (err < 0)
......
...@@ -182,6 +182,10 @@ struct sun8i_ce_dev { ...@@ -182,6 +182,10 @@ struct sun8i_ce_dev {
* @backup_iv: buffer which contain the next IV to store * @backup_iv: buffer which contain the next IV to store
* @bounce_iv: buffer which contain the IV * @bounce_iv: buffer which contain the IV
* @ivlen: size of bounce_iv * @ivlen: size of bounce_iv
* @nr_sgs: The number of source SG (as given by dma_map_sg())
* @nr_sgd: The number of destination SG (as given by dma_map_sg())
* @addr_iv: The IV addr returned by dma_map_single, need to unmap later
* @addr_key: The key addr returned by dma_map_single, need to unmap later
* @fallback_req: request struct for invoking the fallback skcipher TFM * @fallback_req: request struct for invoking the fallback skcipher TFM
*/ */
struct sun8i_cipher_req_ctx { struct sun8i_cipher_req_ctx {
...@@ -190,6 +194,10 @@ struct sun8i_cipher_req_ctx { ...@@ -190,6 +194,10 @@ struct sun8i_cipher_req_ctx {
void *backup_iv; void *backup_iv;
void *bounce_iv; void *bounce_iv;
unsigned int ivlen; unsigned int ivlen;
int nr_sgs;
int nr_sgd;
dma_addr_t addr_iv;
dma_addr_t addr_key;
struct skcipher_request fallback_req; // keep at the end struct skcipher_request fallback_req; // keep at the end
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment