Commit e4f87485 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Herbert Xu

crypto: arm64/gcm - use inline helper to suppress indirect calls

Introduce an inline wrapper for ghash_do_update() that incorporates
the indirect call to the asm routine that is passed as an argument,
and keep the non-SIMD fallback code out of line. This ensures that
all references to the function pointer are inlined where the address
is taken, removing the need for any indirect calls to begin with.
Signed-off-by: default avatarArd Biesheuvel <ardb@kernel.org>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 17d0fb1f
...@@ -69,17 +69,8 @@ static int ghash_init(struct shash_desc *desc) ...@@ -69,17 +69,8 @@ static int ghash_init(struct shash_desc *desc)
} }
static void ghash_do_update(int blocks, u64 dg[], const char *src, static void ghash_do_update(int blocks, u64 dg[], const char *src,
struct ghash_key *key, const char *head, struct ghash_key *key, const char *head)
void (*simd_update)(int blocks, u64 dg[],
const char *src,
u64 const h[][2],
const char *head))
{ {
if (likely(crypto_simd_usable() && simd_update)) {
kernel_neon_begin();
simd_update(blocks, dg, src, key->h, head);
kernel_neon_end();
} else {
be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) }; be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) };
do { do {
...@@ -99,6 +90,22 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src, ...@@ -99,6 +90,22 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src,
dg[0] = be64_to_cpu(dst.b); dg[0] = be64_to_cpu(dst.b);
dg[1] = be64_to_cpu(dst.a); dg[1] = be64_to_cpu(dst.a);
}
static __always_inline
void ghash_do_simd_update(int blocks, u64 dg[], const char *src,
struct ghash_key *key, const char *head,
void (*simd_update)(int blocks, u64 dg[],
const char *src,
u64 const h[][2],
const char *head))
{
if (likely(crypto_simd_usable())) {
kernel_neon_begin();
simd_update(blocks, dg, src, key->h, head);
kernel_neon_end();
} else {
ghash_do_update(blocks, dg, src, key, head);
} }
} }
...@@ -131,7 +138,7 @@ static int ghash_update(struct shash_desc *desc, const u8 *src, ...@@ -131,7 +138,7 @@ static int ghash_update(struct shash_desc *desc, const u8 *src,
do { do {
int chunk = min(blocks, MAX_BLOCKS); int chunk = min(blocks, MAX_BLOCKS);
ghash_do_update(chunk, ctx->digest, src, key, ghash_do_simd_update(chunk, ctx->digest, src, key,
partial ? ctx->buf : NULL, partial ? ctx->buf : NULL,
pmull_ghash_update_p8); pmull_ghash_update_p8);
...@@ -155,7 +162,7 @@ static int ghash_final(struct shash_desc *desc, u8 *dst) ...@@ -155,7 +162,7 @@ static int ghash_final(struct shash_desc *desc, u8 *dst)
memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial); memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
ghash_do_update(1, ctx->digest, ctx->buf, key, NULL, ghash_do_simd_update(1, ctx->digest, ctx->buf, key, NULL,
pmull_ghash_update_p8); pmull_ghash_update_p8);
} }
put_unaligned_be64(ctx->digest[1], dst); put_unaligned_be64(ctx->digest[1], dst);
...@@ -280,7 +287,7 @@ static void gcm_update_mac(u64 dg[], const u8 *src, int count, u8 buf[], ...@@ -280,7 +287,7 @@ static void gcm_update_mac(u64 dg[], const u8 *src, int count, u8 buf[],
if (count >= GHASH_BLOCK_SIZE || *buf_count == GHASH_BLOCK_SIZE) { if (count >= GHASH_BLOCK_SIZE || *buf_count == GHASH_BLOCK_SIZE) {
int blocks = count / GHASH_BLOCK_SIZE; int blocks = count / GHASH_BLOCK_SIZE;
ghash_do_update(blocks, dg, src, &ctx->ghash_key, ghash_do_simd_update(blocks, dg, src, &ctx->ghash_key,
*buf_count ? buf : NULL, *buf_count ? buf : NULL,
pmull_ghash_update_p64); pmull_ghash_update_p64);
...@@ -326,7 +333,7 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[]) ...@@ -326,7 +333,7 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[])
if (buf_count) { if (buf_count) {
memset(&buf[buf_count], 0, GHASH_BLOCK_SIZE - buf_count); memset(&buf[buf_count], 0, GHASH_BLOCK_SIZE - buf_count);
ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL, ghash_do_simd_update(1, dg, buf, &ctx->ghash_key, NULL,
pmull_ghash_update_p64); pmull_ghash_update_p64);
} }
} }
...@@ -403,7 +410,7 @@ static int gcm_encrypt(struct aead_request *req) ...@@ -403,7 +410,7 @@ static int gcm_encrypt(struct aead_request *req)
} while (--remaining > 0); } while (--remaining > 0);
ghash_do_update(blocks, dg, walk.dst.virt.addr, ghash_do_update(blocks, dg, walk.dst.virt.addr,
&ctx->ghash_key, NULL, NULL); &ctx->ghash_key, NULL);
err = skcipher_walk_done(&walk, err = skcipher_walk_done(&walk,
walk.nbytes % AES_BLOCK_SIZE); walk.nbytes % AES_BLOCK_SIZE);
...@@ -422,7 +429,7 @@ static int gcm_encrypt(struct aead_request *req) ...@@ -422,7 +429,7 @@ static int gcm_encrypt(struct aead_request *req)
tag = (u8 *)&lengths; tag = (u8 *)&lengths;
ghash_do_update(1, dg, tag, &ctx->ghash_key, ghash_do_update(1, dg, tag, &ctx->ghash_key,
walk.nbytes ? buf : NULL, NULL); walk.nbytes ? buf : NULL);
if (walk.nbytes) if (walk.nbytes)
err = skcipher_walk_done(&walk, 0); err = skcipher_walk_done(&walk, 0);
...@@ -507,7 +514,7 @@ static int gcm_decrypt(struct aead_request *req) ...@@ -507,7 +514,7 @@ static int gcm_decrypt(struct aead_request *req)
u8 *dst = walk.dst.virt.addr; u8 *dst = walk.dst.virt.addr;
ghash_do_update(blocks, dg, walk.src.virt.addr, ghash_do_update(blocks, dg, walk.src.virt.addr,
&ctx->ghash_key, NULL, NULL); &ctx->ghash_key, NULL);
do { do {
aes_encrypt(&ctx->aes_key, buf, iv); aes_encrypt(&ctx->aes_key, buf, iv);
...@@ -530,7 +537,7 @@ static int gcm_decrypt(struct aead_request *req) ...@@ -530,7 +537,7 @@ static int gcm_decrypt(struct aead_request *req)
tag = (u8 *)&lengths; tag = (u8 *)&lengths;
ghash_do_update(1, dg, tag, &ctx->ghash_key, ghash_do_update(1, dg, tag, &ctx->ghash_key,
walk.nbytes ? buf : NULL, NULL); walk.nbytes ? buf : NULL);
if (walk.nbytes) { if (walk.nbytes) {
aes_encrypt(&ctx->aes_key, buf, iv); aes_encrypt(&ctx->aes_key, buf, iv);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment