Commit aec4d0e3 authored by Josh Poimboeuf's avatar Josh Poimboeuf Committed by Ingo Molnar

x86/asm/crypto: Simplify stack usage in sha-mb functions

sha1_mb_mgr_flush_avx2() and sha1_mb_mgr_submit_avx2() both allocate a
lot of stack space which is never used.  Also, many of the registers
being saved aren't being clobbered so there's no need to save them.
Signed-off-by: default avatarJosh Poimboeuf <jpoimboe@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Bernd Petrovitsch <bernd@petrovitsch.priv.at>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Chris J Arges <chris.j.arges@canonical.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jiri Slaby <jslaby@suse.cz>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michal Marek <mmarek@suse.cz>
Cc: Namhyung Kim <namhyung@gmail.com>
Cc: Pedro Alves <palves@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: live-patching@vger.kernel.org
Link: http://lkml.kernel.org/r/9402e4d87580d6b2376ed95f67b84bdcce3c830e.1453405861.git.jpoimboe@redhat.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent f66f6191
...@@ -86,16 +86,6 @@ ...@@ -86,16 +86,6 @@
#define extra_blocks %arg2 #define extra_blocks %arg2
#define p %arg2 #define p %arg2
# STACK_SPACE needs to be an odd multiple of 8
_XMM_SAVE_SIZE = 10*16
_GPR_SAVE_SIZE = 8*8
_ALIGN_SIZE = 8
_XMM_SAVE = 0
_GPR_SAVE = _XMM_SAVE + _XMM_SAVE_SIZE
STACK_SPACE = _GPR_SAVE + _GPR_SAVE_SIZE + _ALIGN_SIZE
.macro LABEL prefix n .macro LABEL prefix n
\prefix\n\(): \prefix\n\():
.endm .endm
...@@ -113,16 +103,7 @@ offset = \_offset ...@@ -113,16 +103,7 @@ offset = \_offset
# JOB* sha1_mb_mgr_flush_avx2(MB_MGR *state) # JOB* sha1_mb_mgr_flush_avx2(MB_MGR *state)
# arg 1 : rcx : state # arg 1 : rcx : state
ENTRY(sha1_mb_mgr_flush_avx2) ENTRY(sha1_mb_mgr_flush_avx2)
mov %rsp, %r10 push %rbx
sub $STACK_SPACE, %rsp
and $~31, %rsp
mov %rbx, _GPR_SAVE(%rsp)
mov %r10, _GPR_SAVE+8*1(%rsp) #save rsp
mov %rbp, _GPR_SAVE+8*3(%rsp)
mov %r12, _GPR_SAVE+8*4(%rsp)
mov %r13, _GPR_SAVE+8*5(%rsp)
mov %r14, _GPR_SAVE+8*6(%rsp)
mov %r15, _GPR_SAVE+8*7(%rsp)
# If bit (32+3) is set, then all lanes are empty # If bit (32+3) is set, then all lanes are empty
mov _unused_lanes(state), unused_lanes mov _unused_lanes(state), unused_lanes
...@@ -230,16 +211,7 @@ len_is_0: ...@@ -230,16 +211,7 @@ len_is_0:
mov tmp2_w, offset(job_rax) mov tmp2_w, offset(job_rax)
return: return:
pop %rbx
mov _GPR_SAVE(%rsp), %rbx
mov _GPR_SAVE+8*1(%rsp), %r10 #saved rsp
mov _GPR_SAVE+8*3(%rsp), %rbp
mov _GPR_SAVE+8*4(%rsp), %r12
mov _GPR_SAVE+8*5(%rsp), %r13
mov _GPR_SAVE+8*6(%rsp), %r14
mov _GPR_SAVE+8*7(%rsp), %r15
mov %r10, %rsp
ret ret
return_null: return_null:
......
...@@ -94,25 +94,12 @@ DWORD_tmp = %r9d ...@@ -94,25 +94,12 @@ DWORD_tmp = %r9d
lane_data = %r10 lane_data = %r10
# STACK_SPACE needs to be an odd multiple of 8
STACK_SPACE = 8*8 + 16*10 + 8
# JOB* submit_mb_mgr_submit_avx2(MB_MGR *state, job_sha1 *job) # JOB* submit_mb_mgr_submit_avx2(MB_MGR *state, job_sha1 *job)
# arg 1 : rcx : state # arg 1 : rcx : state
# arg 2 : rdx : job # arg 2 : rdx : job
ENTRY(sha1_mb_mgr_submit_avx2) ENTRY(sha1_mb_mgr_submit_avx2)
push %rbx
mov %rsp, %r10 push %rbp
sub $STACK_SPACE, %rsp
and $~31, %rsp
mov %rbx, (%rsp)
mov %r10, 8*2(%rsp) #save old rsp
mov %rbp, 8*3(%rsp)
mov %r12, 8*4(%rsp)
mov %r13, 8*5(%rsp)
mov %r14, 8*6(%rsp)
mov %r15, 8*7(%rsp)
mov _unused_lanes(state), unused_lanes mov _unused_lanes(state), unused_lanes
mov unused_lanes, lane mov unused_lanes, lane
...@@ -203,16 +190,8 @@ len_is_0: ...@@ -203,16 +190,8 @@ len_is_0:
movl DWORD_tmp, _result_digest+1*16(job_rax) movl DWORD_tmp, _result_digest+1*16(job_rax)
return: return:
pop %rbp
mov (%rsp), %rbx pop %rbx
mov 8*2(%rsp), %r10 #save old rsp
mov 8*3(%rsp), %rbp
mov 8*4(%rsp), %r12
mov 8*5(%rsp), %r13
mov 8*6(%rsp), %r14
mov 8*7(%rsp), %r15
mov %r10, %rsp
ret ret
return_null: return_null:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment