Commit 2cd208cf authored by Eric Biggers's avatar Eric Biggers Committed by Greg Kroah-Hartman

crypto: arm64/gcm-aes-ce - fix no-NEON fallback code

commit 580e2951 upstream.

The arm64 gcm-aes-ce algorithm is failing the extra crypto self-tests
following my patches to test the !may_use_simd() code paths, which
previously were untested.  The problem is that in the !may_use_simd()
case, an odd number of AES blocks can be processed within each step of
the skcipher_walk.  However, the skcipher_walk is being done with a
"stride" of 2 blocks and is advanced by an even number of blocks after
each step.  This causes the encryption to produce the wrong ciphertext
and authentication tag, and causes the decryption to incorrectly fail.

Fix it by only processing an even number of blocks per step.

Fixes: c2b24c36 ("crypto: arm64/aes-gcm-ce - fix scatterwalk API violation")
Fixes: 71e52c27 ("crypto: arm64/aes-ce-gcm - operate on two input blocks at a time")
Cc: <stable@vger.kernel.org> # v4.19+
Signed-off-by: default avatarEric Biggers <ebiggers@google.com>
Reviewed-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 3e95cd3d
...@@ -473,9 +473,11 @@ static int gcm_encrypt(struct aead_request *req) ...@@ -473,9 +473,11 @@ static int gcm_encrypt(struct aead_request *req)
put_unaligned_be32(2, iv + GCM_IV_SIZE); put_unaligned_be32(2, iv + GCM_IV_SIZE);
while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) { while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
int blocks = walk.nbytes / AES_BLOCK_SIZE; const int blocks =
walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
u8 *dst = walk.dst.virt.addr; u8 *dst = walk.dst.virt.addr;
u8 *src = walk.src.virt.addr; u8 *src = walk.src.virt.addr;
int remaining = blocks;
do { do {
__aes_arm64_encrypt(ctx->aes_key.key_enc, __aes_arm64_encrypt(ctx->aes_key.key_enc,
...@@ -485,9 +487,9 @@ static int gcm_encrypt(struct aead_request *req) ...@@ -485,9 +487,9 @@ static int gcm_encrypt(struct aead_request *req)
dst += AES_BLOCK_SIZE; dst += AES_BLOCK_SIZE;
src += AES_BLOCK_SIZE; src += AES_BLOCK_SIZE;
} while (--blocks > 0); } while (--remaining > 0);
ghash_do_update(walk.nbytes / AES_BLOCK_SIZE, dg, ghash_do_update(blocks, dg,
walk.dst.virt.addr, &ctx->ghash_key, walk.dst.virt.addr, &ctx->ghash_key,
NULL, pmull_ghash_update_p64); NULL, pmull_ghash_update_p64);
...@@ -609,7 +611,7 @@ static int gcm_decrypt(struct aead_request *req) ...@@ -609,7 +611,7 @@ static int gcm_decrypt(struct aead_request *req)
put_unaligned_be32(2, iv + GCM_IV_SIZE); put_unaligned_be32(2, iv + GCM_IV_SIZE);
while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) { while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
int blocks = walk.nbytes / AES_BLOCK_SIZE; int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
u8 *dst = walk.dst.virt.addr; u8 *dst = walk.dst.virt.addr;
u8 *src = walk.src.virt.addr; u8 *src = walk.src.virt.addr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment