Commit ccc5d51e authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Herbert Xu

crypto: arm64/aes-blk - remove cra_alignmask

Remove the unnecessary alignmask: it is much more efficient to deal with
the misalignment in the core algorithm than relying on the crypto API to
copy the data to a suitably aligned buffer.
Signed-off-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 8f4102db
...@@ -215,14 +215,15 @@ static int ctr_encrypt(struct skcipher_request *req) ...@@ -215,14 +215,15 @@ static int ctr_encrypt(struct skcipher_request *req)
u8 *tsrc = walk.src.virt.addr; u8 *tsrc = walk.src.virt.addr;
/* /*
* Minimum alignment is 8 bytes, so if nbytes is <= 8, we need * Tell aes_ctr_encrypt() to process a tail block.
* to tell aes_ctr_encrypt() to only read half a block.
*/ */
blocks = (nbytes <= 8) ? -1 : 1; blocks = -1;
aes_ctr_encrypt(tail, tsrc, (u8 *)ctx->key_enc, rounds, aes_ctr_encrypt(tail, NULL, (u8 *)ctx->key_enc, rounds,
blocks, walk.iv, first); blocks, walk.iv, first);
memcpy(tdst, tail, nbytes); if (tdst != tsrc)
memcpy(tdst, tsrc, nbytes);
crypto_xor(tdst, tail, nbytes);
err = skcipher_walk_done(&walk, 0); err = skcipher_walk_done(&walk, 0);
} }
kernel_neon_end(); kernel_neon_end();
...@@ -282,7 +283,6 @@ static struct skcipher_alg aes_algs[] = { { ...@@ -282,7 +283,6 @@ static struct skcipher_alg aes_algs[] = { {
.cra_flags = CRYPTO_ALG_INTERNAL, .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = AES_BLOCK_SIZE, .cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_aes_ctx), .cra_ctxsize = sizeof(struct crypto_aes_ctx),
.cra_alignmask = 7,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
}, },
.min_keysize = AES_MIN_KEY_SIZE, .min_keysize = AES_MIN_KEY_SIZE,
...@@ -298,7 +298,6 @@ static struct skcipher_alg aes_algs[] = { { ...@@ -298,7 +298,6 @@ static struct skcipher_alg aes_algs[] = { {
.cra_flags = CRYPTO_ALG_INTERNAL, .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = AES_BLOCK_SIZE, .cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_aes_ctx), .cra_ctxsize = sizeof(struct crypto_aes_ctx),
.cra_alignmask = 7,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
}, },
.min_keysize = AES_MIN_KEY_SIZE, .min_keysize = AES_MIN_KEY_SIZE,
...@@ -315,7 +314,6 @@ static struct skcipher_alg aes_algs[] = { { ...@@ -315,7 +314,6 @@ static struct skcipher_alg aes_algs[] = { {
.cra_flags = CRYPTO_ALG_INTERNAL, .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1, .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto_aes_ctx), .cra_ctxsize = sizeof(struct crypto_aes_ctx),
.cra_alignmask = 7,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
}, },
.min_keysize = AES_MIN_KEY_SIZE, .min_keysize = AES_MIN_KEY_SIZE,
...@@ -332,7 +330,6 @@ static struct skcipher_alg aes_algs[] = { { ...@@ -332,7 +330,6 @@ static struct skcipher_alg aes_algs[] = { {
.cra_priority = PRIO - 1, .cra_priority = PRIO - 1,
.cra_blocksize = 1, .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto_aes_ctx), .cra_ctxsize = sizeof(struct crypto_aes_ctx),
.cra_alignmask = 7,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
}, },
.min_keysize = AES_MIN_KEY_SIZE, .min_keysize = AES_MIN_KEY_SIZE,
...@@ -350,7 +347,6 @@ static struct skcipher_alg aes_algs[] = { { ...@@ -350,7 +347,6 @@ static struct skcipher_alg aes_algs[] = { {
.cra_flags = CRYPTO_ALG_INTERNAL, .cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = AES_BLOCK_SIZE, .cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_aes_xts_ctx), .cra_ctxsize = sizeof(struct crypto_aes_xts_ctx),
.cra_alignmask = 7,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
}, },
.min_keysize = 2 * AES_MIN_KEY_SIZE, .min_keysize = 2 * AES_MIN_KEY_SIZE,
......
...@@ -337,7 +337,7 @@ AES_ENTRY(aes_ctr_encrypt) ...@@ -337,7 +337,7 @@ AES_ENTRY(aes_ctr_encrypt)
.Lctrcarrydone: .Lctrcarrydone:
subs w4, w4, #1 subs w4, w4, #1
bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */ bmi .Lctrtailblock /* blocks <0 means tail block */
ld1 {v3.16b}, [x1], #16 ld1 {v3.16b}, [x1], #16
eor v3.16b, v0.16b, v3.16b eor v3.16b, v0.16b, v3.16b
st1 {v3.16b}, [x0], #16 st1 {v3.16b}, [x0], #16
...@@ -348,10 +348,8 @@ AES_ENTRY(aes_ctr_encrypt) ...@@ -348,10 +348,8 @@ AES_ENTRY(aes_ctr_encrypt)
FRAME_POP FRAME_POP
ret ret
.Lctrhalfblock: .Lctrtailblock:
ld1 {v3.8b}, [x1] st1 {v0.16b}, [x0]
eor v3.8b, v0.8b, v3.8b
st1 {v3.8b}, [x0]
FRAME_POP FRAME_POP
ret ret
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment