Commit 3a4ec59d authored by Linus Torvalds's avatar Linus Torvalds Committed by Jeff Garzik

Fix a rather theoretical race if an NMI happens when a debug fault

happens exactly on the sysenter entry path before the kernel stacks
have been switched to the proper ones.
parent 0e6cb0bc
...@@ -73,10 +73,10 @@ NT_MASK = 0x00004000 ...@@ -73,10 +73,10 @@ NT_MASK = 0x00004000
VM_MASK = 0x00020000 VM_MASK = 0x00020000
/* /*
* ESP0 is at offset 4. 0x100 is the size of the TSS, and * ESP0 is at offset 4. 0x200 is the size of the TSS, and
* also thus the top-of-stack pointer offset of SYSENTER_ESP * also thus the top-of-stack pointer offset of SYSENTER_ESP
*/ */
TSS_ESP0_OFFSET = (4 - 0x100) TSS_ESP0_OFFSET = (4 - 0x200)
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
#define preempt_stop cli #define preempt_stop cli
...@@ -479,23 +479,32 @@ device_not_available_emulate: ...@@ -479,23 +479,32 @@ device_not_available_emulate:
* by hand onto the new stack - while updating the return eip past * by hand onto the new stack - while updating the return eip past
* the instruction that would have done it for sysenter. * the instruction that would have done it for sysenter.
*/ */
#define CHECK_SYSENTER_EIP \ #define FIX_STACK(offset, ok, label) \
cmpl $sysenter_entry,(%esp); \ cmpw $__KERNEL_CS,4(%esp); \
jne 1f; \ jne ok; \
movl TSS_ESP0_OFFSET+12(%esp),%esp; \ label: \
movl TSS_ESP0_OFFSET+offset(%esp),%esp; \
pushfl; \ pushfl; \
pushl $__KERNEL_CS; \ pushl $__KERNEL_CS; \
pushl $sysenter_past_esp; \ pushl $sysenter_past_esp
1:
ENTRY(debug) ENTRY(debug)
CHECK_SYSENTER_EIP cmpl $sysenter_entry,(%esp)
jne debug_stack_correct
FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
debug_stack_correct:
pushl $0 pushl $0
pushl $do_debug pushl $do_debug
jmp error_code jmp error_code
ENTRY(nmi) ENTRY(nmi)
CHECK_SYSENTER_EIP cmpl $sysenter_entry,(%esp)
je nmi_stack_fixup
cmpl $debug - 1,(%esp)
jle nmi_stack_correct
cmpl $debug_esp_fix_insn,(%esp)
jle nmi_debug_stack_fixup
nmi_stack_correct:
pushl %eax pushl %eax
SAVE_ALL SAVE_ALL
movl %esp, %edx movl %esp, %edx
...@@ -505,6 +514,13 @@ ENTRY(nmi) ...@@ -505,6 +514,13 @@ ENTRY(nmi)
addl $8, %esp addl $8, %esp
RESTORE_ALL RESTORE_ALL
nmi_stack_fixup:
FIX_STACK(12,nmi_stack_correct, 1)
jmp nmi_stack_correct
nmi_debug_stack_fixup:
FIX_STACK(24,nmi_stack_correct, 1)
jmp nmi_stack_correct
ENTRY(int3) ENTRY(int3)
pushl $0 pushl $0
pushl $do_int3 pushl $do_int3
......
...@@ -371,6 +371,10 @@ struct tss_struct { ...@@ -371,6 +371,10 @@ struct tss_struct {
* pads the TSS to be cacheline-aligned (size is 0x100) * pads the TSS to be cacheline-aligned (size is 0x100)
*/ */
unsigned long __cacheline_filler[5]; unsigned long __cacheline_filler[5];
/*
* .. and then another 0x100 bytes for emergency kernel stack
*/
unsigned long stack[64];
}; };
struct thread_struct { struct thread_struct {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment