Commit 4c5023a3 authored by H. Peter Anvin's avatar H. Peter Anvin Committed by H. Peter Anvin

x86-32: Handle exception table entries during early boot

If we get an exception during early boot, walk the exception table to
see if we should intercept it.  The main use case for this is to allow
rdmsr_safe()/wrmsr_safe() during CPU initialization.

Since the exception table is currently sorted at runtime, and fairly
late in startup, this code walks the exception table linearly.  We
obviously don't need to worry about modules, however: none have been
loaded at this point.

This patch changes the early IDT setup to look a lot more like x86-64:
we now install handlers for all 32 exception vectors.  The output of
the early exception handler has changed somewhat as it directly
reflects the stack frame of the exception handler, and the stack frame
has been somewhat restructured.

Finally, centralize the code that can and should be run only once.

[ v2: Use early_fixup_exception() instead of linear search ]
Signed-off-by: default avatarH. Peter Anvin <hpa@zytor.com>
Link: http://lkml.kernel.org/r/1334794610-5546-6-git-send-email-hpa@zytor.com
parent 9900aa2f
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <asm/msr-index.h> #include <asm/msr-index.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/percpu.h> #include <asm/percpu.h>
#include <asm/nops.h>
/* Physical address */ /* Physical address */
#define pa(X) ((X) - __PAGE_OFFSET) #define pa(X) ((X) - __PAGE_OFFSET)
...@@ -363,20 +364,15 @@ default_entry: ...@@ -363,20 +364,15 @@ default_entry:
pushl $0 pushl $0
popfl popfl
#ifdef CONFIG_SMP
cmpb $0, ready
jnz checkCPUtype
#endif /* CONFIG_SMP */
/* /*
* start system 32-bit setup. We need to re-do some of the things done * start system 32-bit setup. We need to re-do some of the things done
* in 16-bit mode for the "real" operations. * in 16-bit mode for the "real" operations.
*/ */
call setup_idt movl setup_once_ref,%eax
andl %eax,%eax
checkCPUtype: jz 1f # Did we do this already?
call *%eax
movl $-1,X86_CPUID # -1 for no CPUID initially 1:
/* check if it is 486 or 386. */ /* check if it is 486 or 386. */
/* /*
...@@ -384,7 +380,7 @@ checkCPUtype: ...@@ -384,7 +380,7 @@ checkCPUtype:
* apply at our cpl of 0 and the stack ought to be aligned already, and * apply at our cpl of 0 and the stack ought to be aligned already, and
* we don't need to preserve eflags. * we don't need to preserve eflags.
*/ */
movl $-1,X86_CPUID # -1 for no CPUID initially
movb $3,X86 # at least 386 movb $3,X86 # at least 386
pushfl # push EFLAGS pushfl # push EFLAGS
popl %eax # get EFLAGS popl %eax # get EFLAGS
...@@ -450,21 +446,6 @@ is386: movl $2,%ecx # set MP ...@@ -450,21 +446,6 @@ is386: movl $2,%ecx # set MP
movl $(__KERNEL_PERCPU), %eax movl $(__KERNEL_PERCPU), %eax
movl %eax,%fs # set this cpu's percpu movl %eax,%fs # set this cpu's percpu
#ifdef CONFIG_CC_STACKPROTECTOR
/*
* The linker can't handle this by relocation. Manually set
* base address in stack canary segment descriptor.
*/
cmpb $0,ready
jne 1f
movl $gdt_page,%eax
movl $stack_canary,%ecx
movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
shrl $16, %ecx
movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
1:
#endif
movl $(__KERNEL_STACK_CANARY),%eax movl $(__KERNEL_STACK_CANARY),%eax
movl %eax,%gs movl %eax,%gs
...@@ -473,7 +454,6 @@ is386: movl $2,%ecx # set MP ...@@ -473,7 +454,6 @@ is386: movl $2,%ecx # set MP
cld # gcc2 wants the direction flag cleared at all times cld # gcc2 wants the direction flag cleared at all times
pushl $0 # fake return address for unwinder pushl $0 # fake return address for unwinder
movb $1, ready
jmp *(initial_code) jmp *(initial_code)
/* /*
...@@ -495,81 +475,122 @@ check_x87: ...@@ -495,81 +475,122 @@ check_x87:
.byte 0xDB,0xE4 /* fsetpm for 287, ignored by 387 */ .byte 0xDB,0xE4 /* fsetpm for 287, ignored by 387 */
ret ret
#include "verify_cpu.S"
/* /*
* setup_idt * setup_once
* *
* sets up a idt with 256 entries pointing to * The setup work we only want to run on the BSP.
* ignore_int, interrupt gates. It doesn't actually load
* idt - that can be done only after paging has been enabled
* and the kernel moved to PAGE_OFFSET. Interrupts
* are enabled elsewhere, when we can be relatively
* sure everything is ok.
* *
* Warning: %esi is live across this function. * Warning: %esi is live across this function.
*/ */
setup_idt: __INIT
lea ignore_int,%edx setup_once:
movl $(__KERNEL_CS << 16),%eax /*
movw %dx,%ax /* selector = 0x0010 = cs */ * Set up a idt with 256 entries pointing to ignore_int,
movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ * interrupt gates. It doesn't actually load idt - that needs
* to be done on each CPU. Interrupts are enabled elsewhere,
* when we can be relatively sure everything is ok.
*/
lea idt_table,%edi movl $idt_table,%edi
mov $256,%ecx movl $early_idt_handlers,%eax
rp_sidt: movl $NUM_EXCEPTION_VECTORS,%ecx
1:
movl %eax,(%edi) movl %eax,(%edi)
movl %edx,4(%edi) movl %eax,4(%edi)
/* interrupt gate, dpl=0, present */
movl $(0x8E000000 + __KERNEL_CS),2(%edi)
addl $9,%eax
addl $8,%edi addl $8,%edi
dec %ecx loop 1b
jne rp_sidt
.macro set_early_handler handler,trapno movl $256 - NUM_EXCEPTION_VECTORS,%ecx
lea \handler,%edx movl $ignore_int,%edx
movl $(__KERNEL_CS << 16),%eax movl $(__KERNEL_CS << 16),%eax
movw %dx,%ax movw %dx,%ax /* selector = 0x0010 = cs */
movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ movw $0x8E00,%dx /* interrupt gate - dpl=0, present */
lea idt_table,%edi 2:
movl %eax,8*\trapno(%edi) movl %eax,(%edi)
movl %edx,8*\trapno+4(%edi) movl %edx,4(%edi)
.endm addl $8,%edi
loop 2b
set_early_handler handler=early_divide_err,trapno=0 #ifdef CONFIG_CC_STACKPROTECTOR
set_early_handler handler=early_illegal_opcode,trapno=6 /*
set_early_handler handler=early_protection_fault,trapno=13 * Configure the stack canary. The linker can't handle this by
set_early_handler handler=early_page_fault,trapno=14 * relocation. Manually set base address in stack canary
* segment descriptor.
*/
movl $gdt_page,%eax
movl $stack_canary,%ecx
movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
shrl $16, %ecx
movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
#endif
andl $0,setup_once_ref /* Once is enough, thanks */
ret ret
early_divide_err: ENTRY(early_idt_handlers)
xor %edx,%edx # 36(%esp) %eflags
pushl $0 /* fake errcode */ # 32(%esp) %cs
jmp early_fault # 28(%esp) %eip
# 24(%rsp) error code
i = 0
.rept NUM_EXCEPTION_VECTORS
.if (EXCEPTION_ERRCODE_MASK >> i) & 1
ASM_NOP2
.else
pushl $0 # Dummy error code, to make stack frame uniform
.endif
pushl $i # 20(%esp) Vector number
jmp early_idt_handler
i = i + 1
.endr
ENDPROC(early_idt_handlers)
/* This is global to keep gas from relaxing the jumps */
ENTRY(early_idt_handler)
cld
cmpl $2,%ss:early_recursion_flag
je hlt_loop
incl %ss:early_recursion_flag
early_illegal_opcode: push %eax # 16(%esp)
movl $6,%edx push %ecx # 12(%esp)
pushl $0 /* fake errcode */ push %edx # 8(%esp)
jmp early_fault push %ds # 4(%esp)
push %es # 0(%esp)
movl $(__KERNEL_DS),%eax
movl %eax,%ds
movl %eax,%es
early_protection_fault: cmpl $(__KERNEL_CS),32(%esp)
movl $13,%edx jne 10f
jmp early_fault
early_page_fault: leal 28(%esp),%eax # Pointer to %eip
movl $14,%edx call early_fixup_exception
jmp early_fault andl %eax,%eax
jnz ex_entry /* found an exception entry */
early_fault: 10:
cld
#ifdef CONFIG_PRINTK #ifdef CONFIG_PRINTK
pusha xorl %eax,%eax
movl $(__KERNEL_DS),%eax movw %ax,2(%esp) /* clean up the segment values on some cpus */
movl %eax,%ds movw %ax,6(%esp)
movl %eax,%es movw %ax,34(%esp)
cmpl $2,early_recursion_flag leal 40(%esp),%eax
je hlt_loop pushl %eax /* %esp before the exception */
incl early_recursion_flag pushl %ebx
pushl %ebp
pushl %esi
pushl %edi
movl %cr2,%eax movl %cr2,%eax
pushl %eax pushl %eax
pushl %edx /* trapno */ pushl (20+6*4)(%esp) /* trapno */
pushl $fault_msg pushl $fault_msg
call printk call printk
#endif #endif
...@@ -578,6 +599,17 @@ hlt_loop: ...@@ -578,6 +599,17 @@ hlt_loop:
hlt hlt
jmp hlt_loop jmp hlt_loop
ex_entry:
pop %es
pop %ds
pop %edx
pop %ecx
pop %eax
addl $8,%esp /* drop vector number and error code */
decl %ss:early_recursion_flag
iret
ENDPROC(early_idt_handler)
/* This is the default interrupt "handler" :-) */ /* This is the default interrupt "handler" :-) */
ALIGN ALIGN
ignore_int: ignore_int:
...@@ -611,13 +643,18 @@ ignore_int: ...@@ -611,13 +643,18 @@ ignore_int:
popl %eax popl %eax
#endif #endif
iret iret
ENDPROC(ignore_int)
__INITDATA
.align 4
early_recursion_flag:
.long 0
#include "verify_cpu.S" __REFDATA
.align 4
__REFDATA
.align 4
ENTRY(initial_code) ENTRY(initial_code)
.long i386_start_kernel .long i386_start_kernel
ENTRY(setup_once_ref)
.long setup_once
/* /*
* BSS section * BSS section
...@@ -670,22 +707,19 @@ ENTRY(initial_page_table) ...@@ -670,22 +707,19 @@ ENTRY(initial_page_table)
ENTRY(stack_start) ENTRY(stack_start)
.long init_thread_union+THREAD_SIZE .long init_thread_union+THREAD_SIZE
early_recursion_flag: __INITRODATA
.long 0
ready: .byte 0
int_msg: int_msg:
.asciz "Unknown interrupt or fault at: %p %p %p\n" .asciz "Unknown interrupt or fault at: %p %p %p\n"
fault_msg: fault_msg:
/* fault info: */ /* fault info: */
.ascii "BUG: Int %d: CR2 %p\n" .ascii "BUG: Int %d: CR2 %p\n"
/* pusha regs: */ /* regs pushed in early_idt_handler: */
.ascii " EDI %p ESI %p EBP %p ESP %p\n" .ascii " EDI %p ESI %p EBP %p EBX %p\n"
.ascii " EBX %p EDX %p ECX %p EAX %p\n" .ascii " ESP %p ES %p DS %p\n"
.ascii " EDX %p ECX %p EAX %p\n"
/* fault frame: */ /* fault frame: */
.ascii " err %p EIP %p CS %p flg %p\n" .ascii " vec %p err %p EIP %p CS %p flg %p\n"
.ascii "Stack: %p %p %p %p %p %p %p %p\n" .ascii "Stack: %p %p %p %p %p %p %p %p\n"
.ascii " %p %p %p %p %p %p %p %p\n" .ascii " %p %p %p %p %p %p %p %p\n"
.asciz " %p %p %p %p %p %p %p %p\n" .asciz " %p %p %p %p %p %p %p %p\n"
...@@ -699,6 +733,7 @@ fault_msg: ...@@ -699,6 +733,7 @@ fault_msg:
* segment size, and 32-bit linear address value: * segment size, and 32-bit linear address value:
*/ */
.data
.globl boot_gdt_descr .globl boot_gdt_descr
.globl idt_descr .globl idt_descr
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment