Commit 0e89640b authored by Mark Brown's avatar Mark Brown Committed by Herbert Xu

crypto: arm64 - Use modern annotations for assembly functions

In an effort to clarify and simplify the annotation of assembly functions
in the kernel new macros have been introduced. These replace ENTRY and
ENDPROC and also add a new annotation for static functions which previously
had no ENTRY equivalent. Update the annotations in the crypto code to the
new macros.

There are a small number of files imported from OpenSSL where the assembly
is generated using perl programs, these are not currently annotated at all
and have not been modified.
Signed-off-by: default avatarMark Brown <broonie@kernel.org>
Acked-by: default avatarArd Biesheuvel <ardb@kernel.org>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 3907ccfa
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
* void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes, * void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
* u32 *macp, u8 const rk[], u32 rounds); * u32 *macp, u8 const rk[], u32 rounds);
*/ */
ENTRY(ce_aes_ccm_auth_data) SYM_FUNC_START(ce_aes_ccm_auth_data)
ldr w8, [x3] /* leftover from prev round? */ ldr w8, [x3] /* leftover from prev round? */
ld1 {v0.16b}, [x0] /* load mac */ ld1 {v0.16b}, [x0] /* load mac */
cbz w8, 1f cbz w8, 1f
...@@ -81,13 +81,13 @@ ENTRY(ce_aes_ccm_auth_data) ...@@ -81,13 +81,13 @@ ENTRY(ce_aes_ccm_auth_data)
st1 {v0.16b}, [x0] st1 {v0.16b}, [x0]
10: str w8, [x3] 10: str w8, [x3]
ret ret
ENDPROC(ce_aes_ccm_auth_data) SYM_FUNC_END(ce_aes_ccm_auth_data)
/* /*
* void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u8 const rk[], * void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u8 const rk[],
* u32 rounds); * u32 rounds);
*/ */
ENTRY(ce_aes_ccm_final) SYM_FUNC_START(ce_aes_ccm_final)
ld1 {v3.4s}, [x2], #16 /* load first round key */ ld1 {v3.4s}, [x2], #16 /* load first round key */
ld1 {v0.16b}, [x0] /* load mac */ ld1 {v0.16b}, [x0] /* load mac */
cmp w3, #12 /* which key size? */ cmp w3, #12 /* which key size? */
...@@ -121,7 +121,7 @@ ENTRY(ce_aes_ccm_final) ...@@ -121,7 +121,7 @@ ENTRY(ce_aes_ccm_final)
eor v0.16b, v0.16b, v1.16b /* en-/decrypt the mac */ eor v0.16b, v0.16b, v1.16b /* en-/decrypt the mac */
st1 {v0.16b}, [x0] /* store result */ st1 {v0.16b}, [x0] /* store result */
ret ret
ENDPROC(ce_aes_ccm_final) SYM_FUNC_END(ce_aes_ccm_final)
.macro aes_ccm_do_crypt,enc .macro aes_ccm_do_crypt,enc
ldr x8, [x6, #8] /* load lower ctr */ ldr x8, [x6, #8] /* load lower ctr */
...@@ -212,10 +212,10 @@ CPU_LE( rev x8, x8 ) ...@@ -212,10 +212,10 @@ CPU_LE( rev x8, x8 )
* u8 const rk[], u32 rounds, u8 mac[], * u8 const rk[], u32 rounds, u8 mac[],
* u8 ctr[]); * u8 ctr[]);
*/ */
ENTRY(ce_aes_ccm_encrypt) SYM_FUNC_START(ce_aes_ccm_encrypt)
aes_ccm_do_crypt 1 aes_ccm_do_crypt 1
ENDPROC(ce_aes_ccm_encrypt) SYM_FUNC_END(ce_aes_ccm_encrypt)
ENTRY(ce_aes_ccm_decrypt) SYM_FUNC_START(ce_aes_ccm_decrypt)
aes_ccm_do_crypt 0 aes_ccm_do_crypt 0
ENDPROC(ce_aes_ccm_decrypt) SYM_FUNC_END(ce_aes_ccm_decrypt)
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
.arch armv8-a+crypto .arch armv8-a+crypto
ENTRY(__aes_ce_encrypt) SYM_FUNC_START(__aes_ce_encrypt)
sub w3, w3, #2 sub w3, w3, #2
ld1 {v0.16b}, [x2] ld1 {v0.16b}, [x2]
ld1 {v1.4s}, [x0], #16 ld1 {v1.4s}, [x0], #16
...@@ -34,9 +34,9 @@ ENTRY(__aes_ce_encrypt) ...@@ -34,9 +34,9 @@ ENTRY(__aes_ce_encrypt)
eor v0.16b, v0.16b, v3.16b eor v0.16b, v0.16b, v3.16b
st1 {v0.16b}, [x1] st1 {v0.16b}, [x1]
ret ret
ENDPROC(__aes_ce_encrypt) SYM_FUNC_END(__aes_ce_encrypt)
ENTRY(__aes_ce_decrypt) SYM_FUNC_START(__aes_ce_decrypt)
sub w3, w3, #2 sub w3, w3, #2
ld1 {v0.16b}, [x2] ld1 {v0.16b}, [x2]
ld1 {v1.4s}, [x0], #16 ld1 {v1.4s}, [x0], #16
...@@ -62,23 +62,23 @@ ENTRY(__aes_ce_decrypt) ...@@ -62,23 +62,23 @@ ENTRY(__aes_ce_decrypt)
eor v0.16b, v0.16b, v3.16b eor v0.16b, v0.16b, v3.16b
st1 {v0.16b}, [x1] st1 {v0.16b}, [x1]
ret ret
ENDPROC(__aes_ce_decrypt) SYM_FUNC_END(__aes_ce_decrypt)
/* /*
* __aes_ce_sub() - use the aese instruction to perform the AES sbox * __aes_ce_sub() - use the aese instruction to perform the AES sbox
* substitution on each byte in 'input' * substitution on each byte in 'input'
*/ */
ENTRY(__aes_ce_sub) SYM_FUNC_START(__aes_ce_sub)
dup v1.4s, w0 dup v1.4s, w0
movi v0.16b, #0 movi v0.16b, #0
aese v0.16b, v1.16b aese v0.16b, v1.16b
umov w0, v0.s[0] umov w0, v0.s[0]
ret ret
ENDPROC(__aes_ce_sub) SYM_FUNC_END(__aes_ce_sub)
ENTRY(__aes_ce_invert) SYM_FUNC_START(__aes_ce_invert)
ld1 {v0.4s}, [x1] ld1 {v0.4s}, [x1]
aesimc v1.16b, v0.16b aesimc v1.16b, v0.16b
st1 {v1.4s}, [x0] st1 {v1.4s}, [x0]
ret ret
ENDPROC(__aes_ce_invert) SYM_FUNC_END(__aes_ce_invert)
...@@ -9,8 +9,8 @@ ...@@ -9,8 +9,8 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#define AES_ENTRY(func) ENTRY(ce_ ## func) #define AES_ENTRY(func) SYM_FUNC_START(ce_ ## func)
#define AES_ENDPROC(func) ENDPROC(ce_ ## func) #define AES_ENDPROC(func) SYM_FUNC_END(ce_ ## func)
.arch armv8-a+crypto .arch armv8-a+crypto
......
...@@ -122,11 +122,11 @@ CPU_BE( rev w7, w7 ) ...@@ -122,11 +122,11 @@ CPU_BE( rev w7, w7 )
ret ret
.endm .endm
ENTRY(__aes_arm64_encrypt) SYM_FUNC_START(__aes_arm64_encrypt)
do_crypt fround, crypto_ft_tab, crypto_ft_tab + 1, 2 do_crypt fround, crypto_ft_tab, crypto_ft_tab + 1, 2
ENDPROC(__aes_arm64_encrypt) SYM_FUNC_END(__aes_arm64_encrypt)
.align 5 .align 5
ENTRY(__aes_arm64_decrypt) SYM_FUNC_START(__aes_arm64_decrypt)
do_crypt iround, crypto_it_tab, crypto_aes_inv_sbox, 0 do_crypt iround, crypto_it_tab, crypto_aes_inv_sbox, 0
ENDPROC(__aes_arm64_decrypt) SYM_FUNC_END(__aes_arm64_decrypt)
...@@ -22,26 +22,26 @@ ...@@ -22,26 +22,26 @@
#define ST5(x...) x #define ST5(x...) x
#endif #endif
aes_encrypt_block4x: SYM_FUNC_START_LOCAL(aes_encrypt_block4x)
encrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7 encrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
ret ret
ENDPROC(aes_encrypt_block4x) SYM_FUNC_END(aes_encrypt_block4x)
aes_decrypt_block4x: SYM_FUNC_START_LOCAL(aes_decrypt_block4x)
decrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7 decrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
ret ret
ENDPROC(aes_decrypt_block4x) SYM_FUNC_END(aes_decrypt_block4x)
#if MAX_STRIDE == 5 #if MAX_STRIDE == 5
aes_encrypt_block5x: SYM_FUNC_START_LOCAL(aes_encrypt_block5x)
encrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7 encrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7
ret ret
ENDPROC(aes_encrypt_block5x) SYM_FUNC_END(aes_encrypt_block5x)
aes_decrypt_block5x: SYM_FUNC_START_LOCAL(aes_decrypt_block5x)
decrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7 decrypt_block5x v0, v1, v2, v3, v4, w3, x2, x8, w7
ret ret
ENDPROC(aes_decrypt_block5x) SYM_FUNC_END(aes_decrypt_block5x)
#endif #endif
/* /*
......
...@@ -8,8 +8,8 @@ ...@@ -8,8 +8,8 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#define AES_ENTRY(func) ENTRY(neon_ ## func) #define AES_ENTRY(func) SYM_FUNC_START(neon_ ## func)
#define AES_ENDPROC(func) ENDPROC(neon_ ## func) #define AES_ENDPROC(func) SYM_FUNC_END(neon_ ## func)
xtsmask .req v7 xtsmask .req v7
cbciv .req v7 cbciv .req v7
......
...@@ -380,7 +380,7 @@ ISRM0: .octa 0x0306090c00070a0d01040b0e0205080f ...@@ -380,7 +380,7 @@ ISRM0: .octa 0x0306090c00070a0d01040b0e0205080f
/* /*
* void aesbs_convert_key(u8 out[], u32 const rk[], int rounds) * void aesbs_convert_key(u8 out[], u32 const rk[], int rounds)
*/ */
ENTRY(aesbs_convert_key) SYM_FUNC_START(aesbs_convert_key)
ld1 {v7.4s}, [x1], #16 // load round 0 key ld1 {v7.4s}, [x1], #16 // load round 0 key
ld1 {v17.4s}, [x1], #16 // load round 1 key ld1 {v17.4s}, [x1], #16 // load round 1 key
...@@ -425,10 +425,10 @@ ENTRY(aesbs_convert_key) ...@@ -425,10 +425,10 @@ ENTRY(aesbs_convert_key)
eor v17.16b, v17.16b, v7.16b eor v17.16b, v17.16b, v7.16b
str q17, [x0] str q17, [x0]
ret ret
ENDPROC(aesbs_convert_key) SYM_FUNC_END(aesbs_convert_key)
.align 4 .align 4
aesbs_encrypt8: SYM_FUNC_START_LOCAL(aesbs_encrypt8)
ldr q9, [bskey], #16 // round 0 key ldr q9, [bskey], #16 // round 0 key
ldr q8, M0SR ldr q8, M0SR
ldr q24, SR ldr q24, SR
...@@ -488,10 +488,10 @@ aesbs_encrypt8: ...@@ -488,10 +488,10 @@ aesbs_encrypt8:
eor v2.16b, v2.16b, v12.16b eor v2.16b, v2.16b, v12.16b
eor v5.16b, v5.16b, v12.16b eor v5.16b, v5.16b, v12.16b
ret ret
ENDPROC(aesbs_encrypt8) SYM_FUNC_END(aesbs_encrypt8)
.align 4 .align 4
aesbs_decrypt8: SYM_FUNC_START_LOCAL(aesbs_decrypt8)
lsl x9, rounds, #7 lsl x9, rounds, #7
add bskey, bskey, x9 add bskey, bskey, x9
...@@ -553,7 +553,7 @@ aesbs_decrypt8: ...@@ -553,7 +553,7 @@ aesbs_decrypt8:
eor v3.16b, v3.16b, v12.16b eor v3.16b, v3.16b, v12.16b
eor v5.16b, v5.16b, v12.16b eor v5.16b, v5.16b, v12.16b
ret ret
ENDPROC(aesbs_decrypt8) SYM_FUNC_END(aesbs_decrypt8)
/* /*
* aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
...@@ -621,21 +621,21 @@ ENDPROC(aesbs_decrypt8) ...@@ -621,21 +621,21 @@ ENDPROC(aesbs_decrypt8)
.endm .endm
.align 4 .align 4
ENTRY(aesbs_ecb_encrypt) SYM_FUNC_START(aesbs_ecb_encrypt)
__ecb_crypt aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5 __ecb_crypt aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5
ENDPROC(aesbs_ecb_encrypt) SYM_FUNC_END(aesbs_ecb_encrypt)
.align 4 .align 4
ENTRY(aesbs_ecb_decrypt) SYM_FUNC_START(aesbs_ecb_decrypt)
__ecb_crypt aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5 __ecb_crypt aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5
ENDPROC(aesbs_ecb_decrypt) SYM_FUNC_END(aesbs_ecb_decrypt)
/* /*
* aesbs_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * aesbs_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
* int blocks, u8 iv[]) * int blocks, u8 iv[])
*/ */
.align 4 .align 4
ENTRY(aesbs_cbc_decrypt) SYM_FUNC_START(aesbs_cbc_decrypt)
frame_push 6 frame_push 6
mov x19, x0 mov x19, x0
...@@ -720,7 +720,7 @@ ENTRY(aesbs_cbc_decrypt) ...@@ -720,7 +720,7 @@ ENTRY(aesbs_cbc_decrypt)
2: frame_pop 2: frame_pop
ret ret
ENDPROC(aesbs_cbc_decrypt) SYM_FUNC_END(aesbs_cbc_decrypt)
.macro next_tweak, out, in, const, tmp .macro next_tweak, out, in, const, tmp
sshr \tmp\().2d, \in\().2d, #63 sshr \tmp\().2d, \in\().2d, #63
...@@ -736,7 +736,7 @@ ENDPROC(aesbs_cbc_decrypt) ...@@ -736,7 +736,7 @@ ENDPROC(aesbs_cbc_decrypt)
* aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds, * aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
* int blocks, u8 iv[]) * int blocks, u8 iv[])
*/ */
__xts_crypt8: SYM_FUNC_START_LOCAL(__xts_crypt8)
mov x6, #1 mov x6, #1
lsl x6, x6, x23 lsl x6, x6, x23
subs w23, w23, #8 subs w23, w23, #8
...@@ -789,7 +789,7 @@ __xts_crypt8: ...@@ -789,7 +789,7 @@ __xts_crypt8:
0: mov bskey, x21 0: mov bskey, x21
mov rounds, x22 mov rounds, x22
br x7 br x7
ENDPROC(__xts_crypt8) SYM_FUNC_END(__xts_crypt8)
.macro __xts_crypt, do8, o0, o1, o2, o3, o4, o5, o6, o7 .macro __xts_crypt, do8, o0, o1, o2, o3, o4, o5, o6, o7
frame_push 6, 64 frame_push 6, 64
...@@ -854,13 +854,13 @@ ENDPROC(__xts_crypt8) ...@@ -854,13 +854,13 @@ ENDPROC(__xts_crypt8)
ret ret
.endm .endm
ENTRY(aesbs_xts_encrypt) SYM_FUNC_START(aesbs_xts_encrypt)
__xts_crypt aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5 __xts_crypt aesbs_encrypt8, v0, v1, v4, v6, v3, v7, v2, v5
ENDPROC(aesbs_xts_encrypt) SYM_FUNC_END(aesbs_xts_encrypt)
ENTRY(aesbs_xts_decrypt) SYM_FUNC_START(aesbs_xts_decrypt)
__xts_crypt aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5 __xts_crypt aesbs_decrypt8, v0, v1, v6, v4, v2, v7, v3, v5
ENDPROC(aesbs_xts_decrypt) SYM_FUNC_END(aesbs_xts_decrypt)
.macro next_ctr, v .macro next_ctr, v
mov \v\().d[1], x8 mov \v\().d[1], x8
...@@ -874,7 +874,7 @@ ENDPROC(aesbs_xts_decrypt) ...@@ -874,7 +874,7 @@ ENDPROC(aesbs_xts_decrypt)
* aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], * aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
* int rounds, int blocks, u8 iv[], u8 final[]) * int rounds, int blocks, u8 iv[], u8 final[])
*/ */
ENTRY(aesbs_ctr_encrypt) SYM_FUNC_START(aesbs_ctr_encrypt)
frame_push 8 frame_push 8
mov x19, x0 mov x19, x0
...@@ -1002,4 +1002,4 @@ CPU_LE( rev x8, x8 ) ...@@ -1002,4 +1002,4 @@ CPU_LE( rev x8, x8 )
7: cbz x25, 8b 7: cbz x25, 8b
st1 {v5.16b}, [x25] st1 {v5.16b}, [x25]
b 8b b 8b
ENDPROC(aesbs_ctr_encrypt) SYM_FUNC_END(aesbs_ctr_encrypt)
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
* *
* Clobbers: w3, x10, v4, v12 * Clobbers: w3, x10, v4, v12
*/ */
chacha_permute: SYM_FUNC_START_LOCAL(chacha_permute)
adr_l x10, ROT8 adr_l x10, ROT8
ld1 {v12.4s}, [x10] ld1 {v12.4s}, [x10]
...@@ -104,9 +104,9 @@ chacha_permute: ...@@ -104,9 +104,9 @@ chacha_permute:
b.ne .Ldoubleround b.ne .Ldoubleround
ret ret
ENDPROC(chacha_permute) SYM_FUNC_END(chacha_permute)
ENTRY(chacha_block_xor_neon) SYM_FUNC_START(chacha_block_xor_neon)
// x0: Input state matrix, s // x0: Input state matrix, s
// x1: 1 data block output, o // x1: 1 data block output, o
// x2: 1 data block input, i // x2: 1 data block input, i
...@@ -143,9 +143,9 @@ ENTRY(chacha_block_xor_neon) ...@@ -143,9 +143,9 @@ ENTRY(chacha_block_xor_neon)
ldp x29, x30, [sp], #16 ldp x29, x30, [sp], #16
ret ret
ENDPROC(chacha_block_xor_neon) SYM_FUNC_END(chacha_block_xor_neon)
ENTRY(hchacha_block_neon) SYM_FUNC_START(hchacha_block_neon)
// x0: Input state matrix, s // x0: Input state matrix, s
// x1: output (8 32-bit words) // x1: output (8 32-bit words)
// w2: nrounds // w2: nrounds
...@@ -163,7 +163,7 @@ ENTRY(hchacha_block_neon) ...@@ -163,7 +163,7 @@ ENTRY(hchacha_block_neon)
ldp x29, x30, [sp], #16 ldp x29, x30, [sp], #16
ret ret
ENDPROC(hchacha_block_neon) SYM_FUNC_END(hchacha_block_neon)
a0 .req w12 a0 .req w12
a1 .req w13 a1 .req w13
...@@ -183,7 +183,7 @@ ENDPROC(hchacha_block_neon) ...@@ -183,7 +183,7 @@ ENDPROC(hchacha_block_neon)
a15 .req w28 a15 .req w28
.align 6 .align 6
ENTRY(chacha_4block_xor_neon) SYM_FUNC_START(chacha_4block_xor_neon)
frame_push 10 frame_push 10
// x0: Input state matrix, s // x0: Input state matrix, s
...@@ -845,7 +845,7 @@ CPU_BE( rev a15, a15 ) ...@@ -845,7 +845,7 @@ CPU_BE( rev a15, a15 )
eor v31.16b, v31.16b, v3.16b eor v31.16b, v31.16b, v3.16b
st1 {v28.16b-v31.16b}, [x1] st1 {v28.16b-v31.16b}, [x1]
b .Lout b .Lout
ENDPROC(chacha_4block_xor_neon) SYM_FUNC_END(chacha_4block_xor_neon)
.section ".rodata", "a", %progbits .section ".rodata", "a", %progbits
.align L1_CACHE_SHIFT .align L1_CACHE_SHIFT
......
...@@ -131,7 +131,7 @@ ...@@ -131,7 +131,7 @@
tbl bd4.16b, {\bd\().16b}, perm4.16b tbl bd4.16b, {\bd\().16b}, perm4.16b
.endm .endm
__pmull_p8_core: SYM_FUNC_START_LOCAL(__pmull_p8_core)
.L__pmull_p8_core: .L__pmull_p8_core:
ext t4.8b, ad.8b, ad.8b, #1 // A1 ext t4.8b, ad.8b, ad.8b, #1 // A1
ext t5.8b, ad.8b, ad.8b, #2 // A2 ext t5.8b, ad.8b, ad.8b, #2 // A2
...@@ -194,7 +194,7 @@ __pmull_p8_core: ...@@ -194,7 +194,7 @@ __pmull_p8_core:
eor t4.16b, t4.16b, t5.16b eor t4.16b, t4.16b, t5.16b
eor t6.16b, t6.16b, t3.16b eor t6.16b, t6.16b, t3.16b
ret ret
ENDPROC(__pmull_p8_core) SYM_FUNC_END(__pmull_p8_core)
.macro __pmull_p8, rq, ad, bd, i .macro __pmull_p8, rq, ad, bd, i
.ifnc \bd, fold_consts .ifnc \bd, fold_consts
...@@ -488,9 +488,9 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 ) ...@@ -488,9 +488,9 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
// //
// Assumes len >= 16. // Assumes len >= 16.
// //
ENTRY(crc_t10dif_pmull_p8) SYM_FUNC_START(crc_t10dif_pmull_p8)
crc_t10dif_pmull p8 crc_t10dif_pmull p8
ENDPROC(crc_t10dif_pmull_p8) SYM_FUNC_END(crc_t10dif_pmull_p8)
.align 5 .align 5
// //
...@@ -498,9 +498,9 @@ ENDPROC(crc_t10dif_pmull_p8) ...@@ -498,9 +498,9 @@ ENDPROC(crc_t10dif_pmull_p8)
// //
// Assumes len >= 16. // Assumes len >= 16.
// //
ENTRY(crc_t10dif_pmull_p64) SYM_FUNC_START(crc_t10dif_pmull_p64)
crc_t10dif_pmull p64 crc_t10dif_pmull p64
ENDPROC(crc_t10dif_pmull_p64) SYM_FUNC_END(crc_t10dif_pmull_p64)
.section ".rodata", "a" .section ".rodata", "a"
.align 4 .align 4
......
...@@ -350,13 +350,13 @@ CPU_LE( rev64 T1.16b, T1.16b ) ...@@ -350,13 +350,13 @@ CPU_LE( rev64 T1.16b, T1.16b )
* void pmull_ghash_update(int blocks, u64 dg[], const char *src, * void pmull_ghash_update(int blocks, u64 dg[], const char *src,
* struct ghash_key const *k, const char *head) * struct ghash_key const *k, const char *head)
*/ */
ENTRY(pmull_ghash_update_p64) SYM_FUNC_START(pmull_ghash_update_p64)
__pmull_ghash p64 __pmull_ghash p64
ENDPROC(pmull_ghash_update_p64) SYM_FUNC_END(pmull_ghash_update_p64)
ENTRY(pmull_ghash_update_p8) SYM_FUNC_START(pmull_ghash_update_p8)
__pmull_ghash p8 __pmull_ghash p8
ENDPROC(pmull_ghash_update_p8) SYM_FUNC_END(pmull_ghash_update_p8)
KS0 .req v8 KS0 .req v8
KS1 .req v9 KS1 .req v9
......
...@@ -62,7 +62,7 @@ ...@@ -62,7 +62,7 @@
* *
* It's guaranteed that message_len % 16 == 0. * It's guaranteed that message_len % 16 == 0.
*/ */
ENTRY(nh_neon) SYM_FUNC_START(nh_neon)
ld1 {K0.4s,K1.4s}, [KEY], #32 ld1 {K0.4s,K1.4s}, [KEY], #32
movi PASS0_SUMS.2d, #0 movi PASS0_SUMS.2d, #0
...@@ -100,4 +100,4 @@ ENTRY(nh_neon) ...@@ -100,4 +100,4 @@ ENTRY(nh_neon)
addp T1.2d, PASS2_SUMS.2d, PASS3_SUMS.2d addp T1.2d, PASS2_SUMS.2d, PASS3_SUMS.2d
st1 {T0.16b,T1.16b}, [HASH] st1 {T0.16b,T1.16b}, [HASH]
ret ret
ENDPROC(nh_neon) SYM_FUNC_END(nh_neon)
...@@ -65,7 +65,7 @@ ...@@ -65,7 +65,7 @@
* void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src, * void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
* int blocks) * int blocks)
*/ */
ENTRY(sha1_ce_transform) SYM_FUNC_START(sha1_ce_transform)
frame_push 3 frame_push 3
mov x19, x0 mov x19, x0
...@@ -160,4 +160,4 @@ CPU_LE( rev32 v11.16b, v11.16b ) ...@@ -160,4 +160,4 @@ CPU_LE( rev32 v11.16b, v11.16b )
str dgb, [x19, #16] str dgb, [x19, #16]
frame_pop frame_pop
ret ret
ENDPROC(sha1_ce_transform) SYM_FUNC_END(sha1_ce_transform)
...@@ -75,7 +75,7 @@ ...@@ -75,7 +75,7 @@
* int blocks) * int blocks)
*/ */
.text .text
ENTRY(sha2_ce_transform) SYM_FUNC_START(sha2_ce_transform)
frame_push 3 frame_push 3
mov x19, x0 mov x19, x0
...@@ -166,4 +166,4 @@ CPU_LE( rev32 v19.16b, v19.16b ) ...@@ -166,4 +166,4 @@ CPU_LE( rev32 v19.16b, v19.16b )
4: st1 {dgav.4s, dgbv.4s}, [x19] 4: st1 {dgav.4s, dgbv.4s}, [x19]
frame_pop frame_pop
ret ret
ENDPROC(sha2_ce_transform) SYM_FUNC_END(sha2_ce_transform)
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
* sha3_ce_transform(u64 *st, const u8 *data, int blocks, int dg_size) * sha3_ce_transform(u64 *st, const u8 *data, int blocks, int dg_size)
*/ */
.text .text
ENTRY(sha3_ce_transform) SYM_FUNC_START(sha3_ce_transform)
frame_push 4 frame_push 4
mov x19, x0 mov x19, x0
...@@ -218,7 +218,7 @@ ENTRY(sha3_ce_transform) ...@@ -218,7 +218,7 @@ ENTRY(sha3_ce_transform)
st1 {v24.1d}, [x19] st1 {v24.1d}, [x19]
frame_pop frame_pop
ret ret
ENDPROC(sha3_ce_transform) SYM_FUNC_END(sha3_ce_transform)
.section ".rodata", "a" .section ".rodata", "a"
.align 8 .align 8
......
...@@ -106,7 +106,7 @@ ...@@ -106,7 +106,7 @@
* int blocks) * int blocks)
*/ */
.text .text
ENTRY(sha512_ce_transform) SYM_FUNC_START(sha512_ce_transform)
frame_push 3 frame_push 3
mov x19, x0 mov x19, x0
...@@ -216,4 +216,4 @@ CPU_LE( rev64 v19.16b, v19.16b ) ...@@ -216,4 +216,4 @@ CPU_LE( rev64 v19.16b, v19.16b )
3: st1 {v8.2d-v11.2d}, [x19] 3: st1 {v8.2d-v11.2d}, [x19]
frame_pop frame_pop
ret ret
ENDPROC(sha512_ce_transform) SYM_FUNC_END(sha512_ce_transform)
...@@ -73,7 +73,7 @@ ...@@ -73,7 +73,7 @@
* int blocks) * int blocks)
*/ */
.text .text
ENTRY(sm3_ce_transform) SYM_FUNC_START(sm3_ce_transform)
/* load state */ /* load state */
ld1 {v8.4s-v9.4s}, [x0] ld1 {v8.4s-v9.4s}, [x0]
rev64 v8.4s, v8.4s rev64 v8.4s, v8.4s
...@@ -131,7 +131,7 @@ CPU_LE( rev32 v3.16b, v3.16b ) ...@@ -131,7 +131,7 @@ CPU_LE( rev32 v3.16b, v3.16b )
ext v9.16b, v9.16b, v9.16b, #8 ext v9.16b, v9.16b, v9.16b, #8
st1 {v8.4s-v9.4s}, [x0] st1 {v8.4s-v9.4s}, [x0]
ret ret
ENDPROC(sm3_ce_transform) SYM_FUNC_END(sm3_ce_transform)
.section ".rodata", "a" .section ".rodata", "a"
.align 3 .align 3
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
* void sm4_ce_do_crypt(const u32 *rk, u32 *out, const u32 *in); * void sm4_ce_do_crypt(const u32 *rk, u32 *out, const u32 *in);
*/ */
.text .text
ENTRY(sm4_ce_do_crypt) SYM_FUNC_START(sm4_ce_do_crypt)
ld1 {v8.4s}, [x2] ld1 {v8.4s}, [x2]
ld1 {v0.4s-v3.4s}, [x0], #64 ld1 {v0.4s-v3.4s}, [x0], #64
CPU_LE( rev32 v8.16b, v8.16b ) CPU_LE( rev32 v8.16b, v8.16b )
...@@ -33,4 +33,4 @@ CPU_LE( rev32 v8.16b, v8.16b ) ...@@ -33,4 +33,4 @@ CPU_LE( rev32 v8.16b, v8.16b )
CPU_LE( rev32 v8.16b, v8.16b ) CPU_LE( rev32 v8.16b, v8.16b )
st1 {v8.4s}, [x1] st1 {v8.4s}, [x1]
ret ret
ENDPROC(sm4_ce_do_crypt) SYM_FUNC_END(sm4_ce_do_crypt)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment