Commit 7cad2fab authored by Antoine Ténart's avatar Antoine Ténart Committed by Herbert Xu

crypto: inside-secure - fix request allocations in invalidation path

This patch makes use of the SKCIPHER_REQUEST_ON_STACK and
AHASH_REQUEST_ON_STACK helpers to allocate enough memory to contain both
the crypto request structures and their embedded context (__ctx).

Fixes: 1b44c5a6 ("crypto: inside-secure - add SafeXcel EIP197 crypto engine driver")
Suggested-by: default avatarOfer Heifetz <oferh@marvell.com>
Signed-off-by: default avatarAntoine Tenart <antoine.tenart@free-electrons.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 0a02dcca
...@@ -422,25 +422,25 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm) ...@@ -422,25 +422,25 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
{ {
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->priv; struct safexcel_crypto_priv *priv = ctx->priv;
struct skcipher_request req; SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm));
struct safexcel_cipher_req *sreq = skcipher_request_ctx(&req); struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
struct safexcel_inv_result result = {}; struct safexcel_inv_result result = {};
int ring = ctx->base.ring; int ring = ctx->base.ring;
memset(&req, 0, sizeof(struct skcipher_request)); memset(req, 0, sizeof(struct skcipher_request));
/* create invalidation request */ /* create invalidation request */
init_completion(&result.completion); init_completion(&result.completion);
skcipher_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG, skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
safexcel_inv_complete, &result); safexcel_inv_complete, &result);
skcipher_request_set_tfm(&req, __crypto_skcipher_cast(tfm)); skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
ctx = crypto_tfm_ctx(req.base.tfm); ctx = crypto_tfm_ctx(req->base.tfm);
ctx->base.exit_inv = true; ctx->base.exit_inv = true;
sreq->needs_inv = true; sreq->needs_inv = true;
spin_lock_bh(&priv->ring[ring].queue_lock); spin_lock_bh(&priv->ring[ring].queue_lock);
crypto_enqueue_request(&priv->ring[ring].queue, &req.base); crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock); spin_unlock_bh(&priv->ring[ring].queue_lock);
if (!priv->ring[ring].need_dequeue) if (!priv->ring[ring].need_dequeue)
......
...@@ -450,25 +450,25 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) ...@@ -450,25 +450,25 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
{ {
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->priv; struct safexcel_crypto_priv *priv = ctx->priv;
struct ahash_request req; AHASH_REQUEST_ON_STACK(req, __crypto_ahash_cast(tfm));
struct safexcel_ahash_req *rctx = ahash_request_ctx(&req); struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
struct safexcel_inv_result result = {}; struct safexcel_inv_result result = {};
int ring = ctx->base.ring; int ring = ctx->base.ring;
memset(&req, 0, sizeof(struct ahash_request)); memset(req, 0, sizeof(struct ahash_request));
/* create invalidation request */ /* create invalidation request */
init_completion(&result.completion); init_completion(&result.completion);
ahash_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG, ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
safexcel_inv_complete, &result); safexcel_inv_complete, &result);
ahash_request_set_tfm(&req, __crypto_ahash_cast(tfm)); ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
ctx = crypto_tfm_ctx(req.base.tfm); ctx = crypto_tfm_ctx(req->base.tfm);
ctx->base.exit_inv = true; ctx->base.exit_inv = true;
rctx->needs_inv = true; rctx->needs_inv = true;
spin_lock_bh(&priv->ring[ring].queue_lock); spin_lock_bh(&priv->ring[ring].queue_lock);
crypto_enqueue_request(&priv->ring[ring].queue, &req.base); crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock); spin_unlock_bh(&priv->ring[ring].queue_lock);
if (!priv->ring[ring].need_dequeue) if (!priv->ring[ring].need_dequeue)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment