Commit 6a99d7a2 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Herbert Xu

crypto: omap-aes - permit asynchronous skcipher as fallback

Even though the omap-aes driver implements asynchronous versions of
ecb(aes), cbc(aes) and ctr(aes), the fallbacks it allocates are required
to be synchronous. Given that SIMD based software implementations are
usually asynchronous as well, even though they rarely complete
asynchronously (this typically only happens in cases where the request was
made from softirq context, while SIMD was already in use in the task
context that it interrupted), these implementations are disregarded, and
either the generic C version or another table based version implemented in
assembler is selected instead.

Since falling back to synchronous AES is not only a performance issue, but
potentially a security issue as well (due to the fact that table based AES
is not time invariant), let's fix this, by allocating an ordinary skcipher
as the fallback, and invoke it with the completion routine that was given
to the outer request.
Signed-off-by: default avatarArd Biesheuvel <ardb@kernel.org>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 1d63e455
...@@ -548,20 +548,18 @@ static int omap_aes_crypt(struct skcipher_request *req, unsigned long mode) ...@@ -548,20 +548,18 @@ static int omap_aes_crypt(struct skcipher_request *req, unsigned long mode)
!!(mode & FLAGS_CBC)); !!(mode & FLAGS_CBC));
if (req->cryptlen < aes_fallback_sz) { if (req->cryptlen < aes_fallback_sz) {
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
skcipher_request_set_callback(&rctx->fallback_req,
skcipher_request_set_sync_tfm(subreq, ctx->fallback); req->base.flags,
skcipher_request_set_callback(subreq, req->base.flags, NULL, req->base.complete,
NULL); req->base.data);
skcipher_request_set_crypt(subreq, req->src, req->dst, skcipher_request_set_crypt(&rctx->fallback_req, req->src,
req->cryptlen, req->iv); req->dst, req->cryptlen, req->iv);
if (mode & FLAGS_ENCRYPT) if (mode & FLAGS_ENCRYPT)
ret = crypto_skcipher_encrypt(subreq); ret = crypto_skcipher_encrypt(&rctx->fallback_req);
else else
ret = crypto_skcipher_decrypt(subreq); ret = crypto_skcipher_decrypt(&rctx->fallback_req);
skcipher_request_zero(subreq);
return ret; return ret;
} }
dd = omap_aes_find_dev(rctx); dd = omap_aes_find_dev(rctx);
...@@ -590,11 +588,11 @@ static int omap_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, ...@@ -590,11 +588,11 @@ static int omap_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
memcpy(ctx->key, key, keylen); memcpy(ctx->key, key, keylen);
ctx->keylen = keylen; ctx->keylen = keylen;
crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK); crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags & crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
CRYPTO_TFM_REQ_MASK); CRYPTO_TFM_REQ_MASK);
ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen); ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
if (!ret) if (!ret)
return 0; return 0;
...@@ -640,15 +638,16 @@ static int omap_aes_init_tfm(struct crypto_skcipher *tfm) ...@@ -640,15 +638,16 @@ static int omap_aes_init_tfm(struct crypto_skcipher *tfm)
{ {
const char *name = crypto_tfm_alg_name(&tfm->base); const char *name = crypto_tfm_alg_name(&tfm->base);
struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm); struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_sync_skcipher *blk; struct crypto_skcipher *blk;
blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(blk)) if (IS_ERR(blk))
return PTR_ERR(blk); return PTR_ERR(blk);
ctx->fallback = blk; ctx->fallback = blk;
crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_aes_reqctx)); crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_aes_reqctx) +
crypto_skcipher_reqsize(blk));
ctx->enginectx.op.prepare_request = omap_aes_prepare_req; ctx->enginectx.op.prepare_request = omap_aes_prepare_req;
ctx->enginectx.op.unprepare_request = NULL; ctx->enginectx.op.unprepare_request = NULL;
...@@ -662,7 +661,7 @@ static void omap_aes_exit_tfm(struct crypto_skcipher *tfm) ...@@ -662,7 +661,7 @@ static void omap_aes_exit_tfm(struct crypto_skcipher *tfm)
struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm); struct omap_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
if (ctx->fallback) if (ctx->fallback)
crypto_free_sync_skcipher(ctx->fallback); crypto_free_skcipher(ctx->fallback);
ctx->fallback = NULL; ctx->fallback = NULL;
} }
......
...@@ -97,7 +97,7 @@ struct omap_aes_ctx { ...@@ -97,7 +97,7 @@ struct omap_aes_ctx {
int keylen; int keylen;
u32 key[AES_KEYSIZE_256 / sizeof(u32)]; u32 key[AES_KEYSIZE_256 / sizeof(u32)];
u8 nonce[4]; u8 nonce[4];
struct crypto_sync_skcipher *fallback; struct crypto_skcipher *fallback;
}; };
struct omap_aes_gcm_ctx { struct omap_aes_gcm_ctx {
...@@ -110,6 +110,7 @@ struct omap_aes_reqctx { ...@@ -110,6 +110,7 @@ struct omap_aes_reqctx {
unsigned long mode; unsigned long mode;
u8 iv[AES_BLOCK_SIZE]; u8 iv[AES_BLOCK_SIZE];
u32 auth_tag[AES_BLOCK_SIZE / sizeof(u32)]; u32 auth_tag[AES_BLOCK_SIZE / sizeof(u32)];
struct skcipher_request fallback_req; // keep at the end
}; };
#define OMAP_AES_QUEUE_LENGTH 1 #define OMAP_AES_QUEUE_LENGTH 1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment