Commit ff467594 authored by Andy Lutomirski's avatar Andy Lutomirski Committed by Ingo Molnar

x86/asm/entry/64: Save all regs on interrupt entry

To prepare for the big rewrite of the error and interrupt exit
paths, we will need pt_regs completely filled in.

It's already completely filled in when error_exit runs, so rearrange
interrupt handling to match it.  This will slow down interrupt
handling very slightly (eight instructions), but the
simplification it enables will be more than worth it.
Signed-off-by: default avatarAndy Lutomirski <luto@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Denys Vlasenko <vda.linux@googlemail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: paulmck@linux.vnet.ibm.com
Link: http://lkml.kernel.org/r/d8a766a7f558b30e6e01352854628a2d9943460c.1435952415.git.luto@kernel.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 29ea1b25
...@@ -135,9 +135,6 @@ For 32-bit we have the following conventions - kernel is built with ...@@ -135,9 +135,6 @@ For 32-bit we have the following conventions - kernel is built with
movq %rbp, 4*8+\offset(%rsp) movq %rbp, 4*8+\offset(%rsp)
movq %rbx, 5*8+\offset(%rsp) movq %rbx, 5*8+\offset(%rsp)
.endm .endm
.macro SAVE_EXTRA_REGS_RBP offset=0
movq %rbp, 4*8+\offset(%rsp)
.endm
.macro RESTORE_EXTRA_REGS offset=0 .macro RESTORE_EXTRA_REGS offset=0
movq 0*8+\offset(%rsp), %r15 movq 0*8+\offset(%rsp), %r15
......
...@@ -502,21 +502,13 @@ END(irq_entries_start) ...@@ -502,21 +502,13 @@ END(irq_entries_start)
/* 0(%rsp): ~(interrupt number) */ /* 0(%rsp): ~(interrupt number) */
.macro interrupt func .macro interrupt func
cld cld
/* ALLOC_PT_GPREGS_ON_STACK
* Since nothing in interrupt handling code touches r12...r15 members SAVE_C_REGS
* of "struct pt_regs", and since interrupts can nest, we can save SAVE_EXTRA_REGS
* four stack slots and simultaneously provide
* an unwind-friendly stack layout by saving "truncated" pt_regs
* exactly up to rbp slot, without these members.
*/
ALLOC_PT_GPREGS_ON_STACK -RBP
SAVE_C_REGS -RBP
/* this goes to 0(%rsp) for unwinder, not for saving the value: */
SAVE_EXTRA_REGS_RBP -RBP
leaq -RBP(%rsp), %rdi /* arg1 for \func (pointer to pt_regs) */ movq %rsp,%rdi /* arg1 for \func (pointer to pt_regs) */
testb $3, CS-RBP(%rsp) testb $3, CS(%rsp)
jz 1f jz 1f
SWAPGS SWAPGS
1: 1:
...@@ -553,9 +545,7 @@ ret_from_intr: ...@@ -553,9 +545,7 @@ ret_from_intr:
decl PER_CPU_VAR(irq_count) decl PER_CPU_VAR(irq_count)
/* Restore saved previous stack */ /* Restore saved previous stack */
popq %rsi popq %rsp
/* return code expects complete pt_regs - adjust rsp accordingly: */
leaq -RBP(%rsi), %rsp
testb $3, CS(%rsp) testb $3, CS(%rsp)
jz retint_kernel jz retint_kernel
...@@ -580,7 +570,7 @@ retint_swapgs: /* return to user-space */ ...@@ -580,7 +570,7 @@ retint_swapgs: /* return to user-space */
TRACE_IRQS_IRETQ TRACE_IRQS_IRETQ
SWAPGS SWAPGS
jmp restore_c_regs_and_iret jmp restore_regs_and_iret
/* Returning to kernel space */ /* Returning to kernel space */
retint_kernel: retint_kernel:
...@@ -604,6 +594,8 @@ retint_kernel: ...@@ -604,6 +594,8 @@ retint_kernel:
* At this label, code paths which return to kernel and to user, * At this label, code paths which return to kernel and to user,
* which come from interrupts/exception and from syscalls, merge. * which come from interrupts/exception and from syscalls, merge.
*/ */
restore_regs_and_iret:
RESTORE_EXTRA_REGS
restore_c_regs_and_iret: restore_c_regs_and_iret:
RESTORE_C_REGS RESTORE_C_REGS
REMOVE_PT_GPREGS_FROM_STACK 8 REMOVE_PT_GPREGS_FROM_STACK 8
...@@ -674,12 +666,10 @@ retint_signal: ...@@ -674,12 +666,10 @@ retint_signal:
jz retint_swapgs jz retint_swapgs
TRACE_IRQS_ON TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE) ENABLE_INTERRUPTS(CLBR_NONE)
SAVE_EXTRA_REGS
movq $-1, ORIG_RAX(%rsp) movq $-1, ORIG_RAX(%rsp)
xorl %esi, %esi /* oldset */ xorl %esi, %esi /* oldset */
movq %rsp, %rdi /* &pt_regs */ movq %rsp, %rdi /* &pt_regs */
call do_notify_resume call do_notify_resume
RESTORE_EXTRA_REGS
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF TRACE_IRQS_OFF
GET_THREAD_INFO(%rcx) GET_THREAD_INFO(%rcx)
...@@ -1160,7 +1150,6 @@ END(error_entry) ...@@ -1160,7 +1150,6 @@ END(error_entry)
*/ */
ENTRY(error_exit) ENTRY(error_exit)
movl %ebx, %eax movl %ebx, %eax
RESTORE_EXTRA_REGS
DISABLE_INTERRUPTS(CLBR_NONE) DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF TRACE_IRQS_OFF
testl %eax, %eax testl %eax, %eax
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment