Commit 30f1a9f5 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Herbert Xu

crypto: arm64/aes-ce-gcm - don't reload key schedule if avoidable

Squeeze out another 5% of performance by minimizing the number
of invocations of kernel_neon_begin()/kernel_neon_end() on the
common path, which also allows some reloads of the key schedule
to be optimized away.

The resulting code runs at 2.3 cycles per byte on a Cortex-A53.
Signed-off-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent e0bd888d
/* /*
* Accelerated GHASH implementation with ARMv8 PMULL instructions. * Accelerated GHASH implementation with ARMv8 PMULL instructions.
* *
* Copyright (C) 2014 - 2017 Linaro Ltd. <ard.biesheuvel@linaro.org> * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published * under the terms of the GNU General Public License version 2 as published
...@@ -332,8 +332,6 @@ ENDPROC(pmull_ghash_update_p8) ...@@ -332,8 +332,6 @@ ENDPROC(pmull_ghash_update_p8)
ld1 {XL.2d}, [x1] ld1 {XL.2d}, [x1]
ldr x8, [x5, #8] // load lower counter ldr x8, [x5, #8] // load lower counter
load_round_keys w7, x6
movi MASK.16b, #0xe1 movi MASK.16b, #0xe1
trn1 SHASH2.2d, SHASH.2d, HH.2d trn1 SHASH2.2d, SHASH.2d, HH.2d
trn2 T1.2d, SHASH.2d, HH.2d trn2 T1.2d, SHASH.2d, HH.2d
...@@ -346,6 +344,8 @@ CPU_LE( rev x8, x8 ) ...@@ -346,6 +344,8 @@ CPU_LE( rev x8, x8 )
ld1 {KS0.16b-KS1.16b}, [x10] ld1 {KS0.16b-KS1.16b}, [x10]
.endif .endif
cbnz x6, 4f
0: ld1 {INP0.16b-INP1.16b}, [x3], #32 0: ld1 {INP0.16b-INP1.16b}, [x3], #32
rev x9, x8 rev x9, x8
...@@ -471,6 +471,9 @@ CPU_LE( rev x8, x8 ) ...@@ -471,6 +471,9 @@ CPU_LE( rev x8, x8 )
enc_round KS0, v20 enc_round KS0, v20
enc_round KS1, v20 enc_round KS1, v20
b 1b b 1b
4: load_round_keys w7, x6
b 0b
.endm .endm
/* /*
......
/* /*
* Accelerated GHASH implementation with ARMv8 PMULL instructions. * Accelerated GHASH implementation with ARMv8 PMULL instructions.
* *
* Copyright (C) 2014 - 2017 Linaro Ltd. <ard.biesheuvel@linaro.org> * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published * under the terms of the GNU General Public License version 2 as published
...@@ -373,37 +373,39 @@ static int gcm_encrypt(struct aead_request *req) ...@@ -373,37 +373,39 @@ static int gcm_encrypt(struct aead_request *req)
memcpy(iv, req->iv, GCM_IV_SIZE); memcpy(iv, req->iv, GCM_IV_SIZE);
put_unaligned_be32(1, iv + GCM_IV_SIZE); put_unaligned_be32(1, iv + GCM_IV_SIZE);
if (likely(may_use_simd())) { err = skcipher_walk_aead_encrypt(&walk, req, false);
kernel_neon_begin();
if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) {
u32 const *rk = NULL;
kernel_neon_begin();
pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, nrounds); pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, nrounds);
put_unaligned_be32(2, iv + GCM_IV_SIZE); put_unaligned_be32(2, iv + GCM_IV_SIZE);
pmull_gcm_encrypt_block(ks, iv, NULL, nrounds); pmull_gcm_encrypt_block(ks, iv, NULL, nrounds);
put_unaligned_be32(3, iv + GCM_IV_SIZE); put_unaligned_be32(3, iv + GCM_IV_SIZE);
pmull_gcm_encrypt_block(ks + AES_BLOCK_SIZE, iv, NULL, nrounds); pmull_gcm_encrypt_block(ks + AES_BLOCK_SIZE, iv, NULL, nrounds);
put_unaligned_be32(4, iv + GCM_IV_SIZE); put_unaligned_be32(4, iv + GCM_IV_SIZE);
kernel_neon_end();
err = skcipher_walk_aead_encrypt(&walk, req, false);
while (walk.nbytes >= 2 * AES_BLOCK_SIZE) { do {
int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2; int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
if (rk)
kernel_neon_begin(); kernel_neon_begin();
pmull_gcm_encrypt(blocks, dg, walk.dst.virt.addr, pmull_gcm_encrypt(blocks, dg, walk.dst.virt.addr,
walk.src.virt.addr, ctx->h2, iv, walk.src.virt.addr, ctx->h2, iv,
ctx->aes_key.key_enc, nrounds, ks); rk, nrounds, ks);
kernel_neon_end(); kernel_neon_end();
err = skcipher_walk_done(&walk, err = skcipher_walk_done(&walk,
walk.nbytes % (2 * AES_BLOCK_SIZE)); walk.nbytes % (2 * AES_BLOCK_SIZE));
}
rk = ctx->aes_key.key_enc;
} while (walk.nbytes >= 2 * AES_BLOCK_SIZE);
} else { } else {
__aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
put_unaligned_be32(2, iv + GCM_IV_SIZE); put_unaligned_be32(2, iv + GCM_IV_SIZE);
err = skcipher_walk_aead_encrypt(&walk, req, false);
while (walk.nbytes >= AES_BLOCK_SIZE) { while (walk.nbytes >= AES_BLOCK_SIZE) {
int blocks = walk.nbytes / AES_BLOCK_SIZE; int blocks = walk.nbytes / AES_BLOCK_SIZE;
u8 *dst = walk.dst.virt.addr; u8 *dst = walk.dst.virt.addr;
...@@ -485,50 +487,53 @@ static int gcm_decrypt(struct aead_request *req) ...@@ -485,50 +487,53 @@ static int gcm_decrypt(struct aead_request *req)
memcpy(iv, req->iv, GCM_IV_SIZE); memcpy(iv, req->iv, GCM_IV_SIZE);
put_unaligned_be32(1, iv + GCM_IV_SIZE); put_unaligned_be32(1, iv + GCM_IV_SIZE);
if (likely(may_use_simd())) { err = skcipher_walk_aead_decrypt(&walk, req, false);
if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) {
u32 const *rk = NULL;
kernel_neon_begin(); kernel_neon_begin();
pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, nrounds); pmull_gcm_encrypt_block(tag, iv, ctx->aes_key.key_enc, nrounds);
put_unaligned_be32(2, iv + GCM_IV_SIZE); put_unaligned_be32(2, iv + GCM_IV_SIZE);
kernel_neon_end();
err = skcipher_walk_aead_decrypt(&walk, req, false);
while (walk.nbytes >= 2 * AES_BLOCK_SIZE) { do {
int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2; int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
int rem = walk.total - blocks * AES_BLOCK_SIZE;
if (rk)
kernel_neon_begin(); kernel_neon_begin();
pmull_gcm_decrypt(blocks, dg, walk.dst.virt.addr, pmull_gcm_decrypt(blocks, dg, walk.dst.virt.addr,
walk.src.virt.addr, ctx->h2, iv, walk.src.virt.addr, ctx->h2, iv,
ctx->aes_key.key_enc, nrounds); rk, nrounds);
kernel_neon_end();
err = skcipher_walk_done(&walk,
walk.nbytes % (2 * AES_BLOCK_SIZE));
}
if (walk.nbytes) { /* check if this is the final iteration of the loop */
if (rem < (2 * AES_BLOCK_SIZE)) {
u8 *iv2 = iv + AES_BLOCK_SIZE; u8 *iv2 = iv + AES_BLOCK_SIZE;
if (walk.nbytes > AES_BLOCK_SIZE) { if (rem > AES_BLOCK_SIZE) {
memcpy(iv2, iv, AES_BLOCK_SIZE); memcpy(iv2, iv, AES_BLOCK_SIZE);
crypto_inc(iv2, AES_BLOCK_SIZE); crypto_inc(iv2, AES_BLOCK_SIZE);
} }
kernel_neon_begin(); pmull_gcm_encrypt_block(iv, iv, NULL, nrounds);
pmull_gcm_encrypt_block(iv, iv, ctx->aes_key.key_enc,
nrounds);
if (walk.nbytes > AES_BLOCK_SIZE) if (rem > AES_BLOCK_SIZE)
pmull_gcm_encrypt_block(iv2, iv2, NULL, pmull_gcm_encrypt_block(iv2, iv2, NULL,
nrounds); nrounds);
kernel_neon_end();
} }
kernel_neon_end();
err = skcipher_walk_done(&walk,
walk.nbytes % (2 * AES_BLOCK_SIZE));
rk = ctx->aes_key.key_enc;
} while (walk.nbytes >= 2 * AES_BLOCK_SIZE);
} else { } else {
__aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds); __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
put_unaligned_be32(2, iv + GCM_IV_SIZE); put_unaligned_be32(2, iv + GCM_IV_SIZE);
err = skcipher_walk_aead_decrypt(&walk, req, false);
while (walk.nbytes >= AES_BLOCK_SIZE) { while (walk.nbytes >= AES_BLOCK_SIZE) {
int blocks = walk.nbytes / AES_BLOCK_SIZE; int blocks = walk.nbytes / AES_BLOCK_SIZE;
u8 *dst = walk.dst.virt.addr; u8 *dst = walk.dst.virt.addr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment