Commit 654627ad authored by Herbert Xu's avatar Herbert Xu

crypto: bcm - Use subrequest for fallback

Instead of doing saving and restoring on the AEAD request object
for fallback processing, use a subrequest instead.
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent e16dda2b
...@@ -2570,66 +2570,29 @@ static int aead_need_fallback(struct aead_request *req) ...@@ -2570,66 +2570,29 @@ static int aead_need_fallback(struct aead_request *req)
return payload_len > ctx->max_payload; return payload_len > ctx->max_payload;
} }
static void aead_complete(struct crypto_async_request *areq, int err)
{
struct aead_request *req =
container_of(areq, struct aead_request, base);
struct iproc_reqctx_s *rctx = aead_request_ctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
flow_log("%s() err:%d\n", __func__, err);
areq->tfm = crypto_aead_tfm(aead);
areq->complete = rctx->old_complete;
areq->data = rctx->old_data;
areq->complete(areq, err);
}
static int aead_do_fallback(struct aead_request *req, bool is_encrypt) static int aead_do_fallback(struct aead_request *req, bool is_encrypt)
{ {
struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_tfm *tfm = crypto_aead_tfm(aead); struct crypto_tfm *tfm = crypto_aead_tfm(aead);
struct iproc_reqctx_s *rctx = aead_request_ctx(req); struct iproc_reqctx_s *rctx = aead_request_ctx(req);
struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm); struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
int err; struct aead_request *subreq;
u32 req_flags;
flow_log("%s() enc:%u\n", __func__, is_encrypt); flow_log("%s() enc:%u\n", __func__, is_encrypt);
if (ctx->fallback_cipher) { if (!ctx->fallback_cipher)
/* Store the cipher tfm and then use the fallback tfm */ return -EINVAL;
rctx->old_tfm = tfm;
aead_request_set_tfm(req, ctx->fallback_cipher);
/*
* Save the callback and chain ourselves in, so we can restore
* the tfm
*/
rctx->old_complete = req->base.complete;
rctx->old_data = req->base.data;
req_flags = aead_request_flags(req);
aead_request_set_callback(req, req_flags, aead_complete, req);
err = is_encrypt ? crypto_aead_encrypt(req) :
crypto_aead_decrypt(req);
if (err == 0) { subreq = &rctx->req;
/* aead_request_set_tfm(subreq, ctx->fallback_cipher);
* fallback was synchronous (did not return aead_request_set_callback(subreq, aead_request_flags(req),
* -EINPROGRESS). So restore request state here. req->base.complete, req->base.data);
*/ aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
aead_request_set_callback(req, req_flags, req->iv);
rctx->old_complete, req); aead_request_set_ad(subreq, req->assoclen);
req->base.data = rctx->old_data;
aead_request_set_tfm(req, aead);
flow_log("%s() fallback completed successfully\n\n",
__func__);
}
} else {
err = -EINVAL;
}
return err; return is_encrypt ? crypto_aead_encrypt(req) :
crypto_aead_decrypt(req);
} }
static int aead_enqueue(struct aead_request *req, bool is_encrypt) static int aead_enqueue(struct aead_request *req, bool is_encrypt)
...@@ -4243,6 +4206,7 @@ static int ahash_cra_init(struct crypto_tfm *tfm) ...@@ -4243,6 +4206,7 @@ static int ahash_cra_init(struct crypto_tfm *tfm)
static int aead_cra_init(struct crypto_aead *aead) static int aead_cra_init(struct crypto_aead *aead)
{ {
unsigned int reqsize = sizeof(struct iproc_reqctx_s);
struct crypto_tfm *tfm = crypto_aead_tfm(aead); struct crypto_tfm *tfm = crypto_aead_tfm(aead);
struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm); struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
struct crypto_alg *alg = tfm->__crt_alg; struct crypto_alg *alg = tfm->__crt_alg;
...@@ -4254,7 +4218,6 @@ static int aead_cra_init(struct crypto_aead *aead) ...@@ -4254,7 +4218,6 @@ static int aead_cra_init(struct crypto_aead *aead)
flow_log("%s()\n", __func__); flow_log("%s()\n", __func__);
crypto_aead_set_reqsize(aead, sizeof(struct iproc_reqctx_s));
ctx->is_esp = false; ctx->is_esp = false;
ctx->salt_len = 0; ctx->salt_len = 0;
ctx->salt_offset = 0; ctx->salt_offset = 0;
...@@ -4263,12 +4226,15 @@ static int aead_cra_init(struct crypto_aead *aead) ...@@ -4263,12 +4226,15 @@ static int aead_cra_init(struct crypto_aead *aead)
get_random_bytes(ctx->iv, MAX_IV_SIZE); get_random_bytes(ctx->iv, MAX_IV_SIZE);
flow_dump(" iv: ", ctx->iv, MAX_IV_SIZE); flow_dump(" iv: ", ctx->iv, MAX_IV_SIZE);
if (!err) { if (err)
if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) { goto out;
if (!(alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK))
goto reqsize;
flow_log("%s() creating fallback cipher\n", __func__); flow_log("%s() creating fallback cipher\n", __func__);
ctx->fallback_cipher = ctx->fallback_cipher = crypto_alloc_aead(alg->cra_name, 0,
crypto_alloc_aead(alg->cra_name, 0,
CRYPTO_ALG_ASYNC | CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK); CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fallback_cipher)) { if (IS_ERR(ctx->fallback_cipher)) {
...@@ -4276,9 +4242,13 @@ static int aead_cra_init(struct crypto_aead *aead) ...@@ -4276,9 +4242,13 @@ static int aead_cra_init(struct crypto_aead *aead)
__func__, alg->cra_name); __func__, alg->cra_name);
return PTR_ERR(ctx->fallback_cipher); return PTR_ERR(ctx->fallback_cipher);
} }
}
}
reqsize += crypto_aead_reqsize(ctx->fallback_cipher);
reqsize:
crypto_aead_set_reqsize(aead, reqsize);
out:
return err; return err;
} }
......
...@@ -339,15 +339,12 @@ struct iproc_reqctx_s { ...@@ -339,15 +339,12 @@ struct iproc_reqctx_s {
/* hmac context */ /* hmac context */
bool is_sw_hmac; bool is_sw_hmac;
/* aead context */
struct crypto_tfm *old_tfm;
crypto_completion_t old_complete;
void *old_data;
gfp_t gfp; gfp_t gfp;
/* Buffers used to build SPU request and response messages */ /* Buffers used to build SPU request and response messages */
struct spu_msg_buf msg_buf; struct spu_msg_buf msg_buf;
struct aead_request req;
}; };
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment