Commit b8fb993a authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Herbert Xu

crypto: arm64/aes-ce-cipher: add non-SIMD generic fallback

The arm64 kernel will shortly disallow nested kernel mode NEON, so
add a fallback to scalar code that can be invoked in that case.
Signed-off-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent f402e311
...@@ -52,6 +52,7 @@ config CRYPTO_AES_ARM64_CE ...@@ -52,6 +52,7 @@ config CRYPTO_AES_ARM64_CE
tristate "AES core cipher using ARMv8 Crypto Extensions" tristate "AES core cipher using ARMv8 Crypto Extensions"
depends on ARM64 && KERNEL_MODE_NEON depends on ARM64 && KERNEL_MODE_NEON
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
select CRYPTO_AES_ARM64
config CRYPTO_AES_ARM64_CE_CCM config CRYPTO_AES_ARM64_CE_CCM
tristate "AES in CCM mode using ARMv8 Crypto Extensions" tristate "AES in CCM mode using ARMv8 Crypto Extensions"
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
*/ */
#include <asm/neon.h> #include <asm/neon.h>
#include <asm/simd.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <crypto/aes.h> #include <crypto/aes.h>
#include <linux/cpufeature.h> #include <linux/cpufeature.h>
...@@ -21,6 +22,9 @@ MODULE_DESCRIPTION("Synchronous AES cipher using ARMv8 Crypto Extensions"); ...@@ -21,6 +22,9 @@ MODULE_DESCRIPTION("Synchronous AES cipher using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
asmlinkage void __aes_arm64_decrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
struct aes_block { struct aes_block {
u8 b[AES_BLOCK_SIZE]; u8 b[AES_BLOCK_SIZE];
}; };
...@@ -45,7 +49,12 @@ static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) ...@@ -45,7 +49,12 @@ static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
void *dummy0; void *dummy0;
int dummy1; int dummy1;
kernel_neon_begin_partial(4); if (!may_use_simd()) {
__aes_arm64_encrypt(ctx->key_enc, dst, src, num_rounds(ctx));
return;
}
kernel_neon_begin();
__asm__(" ld1 {v0.16b}, %[in] ;" __asm__(" ld1 {v0.16b}, %[in] ;"
" ld1 {v1.4s}, [%[key]], #16 ;" " ld1 {v1.4s}, [%[key]], #16 ;"
...@@ -90,7 +99,12 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) ...@@ -90,7 +99,12 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
void *dummy0; void *dummy0;
int dummy1; int dummy1;
kernel_neon_begin_partial(4); if (!may_use_simd()) {
__aes_arm64_decrypt(ctx->key_dec, dst, src, num_rounds(ctx));
return;
}
kernel_neon_begin();
__asm__(" ld1 {v0.16b}, %[in] ;" __asm__(" ld1 {v0.16b}, %[in] ;"
" ld1 {v1.4s}, [%[key]], #16 ;" " ld1 {v1.4s}, [%[key]], #16 ;"
...@@ -170,7 +184,7 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, ...@@ -170,7 +184,7 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
for (i = 0; i < kwords; i++) for (i = 0; i < kwords; i++)
ctx->key_enc[i] = get_unaligned_le32(in_key + i * sizeof(u32)); ctx->key_enc[i] = get_unaligned_le32(in_key + i * sizeof(u32));
kernel_neon_begin_partial(2); kernel_neon_begin();
for (i = 0; i < sizeof(rcon); i++) { for (i = 0; i < sizeof(rcon); i++) {
u32 *rki = ctx->key_enc + (i * kwords); u32 *rki = ctx->key_enc + (i * kwords);
u32 *rko = rki + kwords; u32 *rko = rki + kwords;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment