Commit cf89ec92 authored by Harvey Harrison's avatar Harvey Harrison Committed by Ingo Molnar

x86: reduce ifdef sections in fault.c

Signed-off-by: default avatarHarvey Harrison <harvey.harrison@gmail.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 6118f76f
...@@ -508,6 +508,10 @@ static int vmalloc_fault(unsigned long address) ...@@ -508,6 +508,10 @@ static int vmalloc_fault(unsigned long address)
pmd_t *pmd, *pmd_ref; pmd_t *pmd, *pmd_ref;
pte_t *pte, *pte_ref; pte_t *pte, *pte_ref;
/* Make sure we are in vmalloc area */
if (!(address >= VMALLOC_START && address < VMALLOC_END))
return -1;
/* Copy kernel mappings over when needed. This can also /* Copy kernel mappings over when needed. This can also
happen within a race in page table update. In the later happen within a race in page table update. In the later
case just flush. */ case just flush. */
...@@ -603,6 +607,9 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) ...@@ -603,6 +607,9 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
*/ */
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
if (unlikely(address >= TASK_SIZE)) { if (unlikely(address >= TASK_SIZE)) {
#else
if (unlikely(address >= TASK_SIZE64)) {
#endif
if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) && if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
vmalloc_fault(address) >= 0) vmalloc_fault(address) >= 0)
return; return;
...@@ -618,6 +625,8 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) ...@@ -618,6 +625,8 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
goto bad_area_nosemaphore; goto bad_area_nosemaphore;
} }
#ifdef CONFIG_X86_32
/* It's safe to allow irq's after cr2 has been saved and the vmalloc /* It's safe to allow irq's after cr2 has been saved and the vmalloc
fault has been handled. */ fault has been handled. */
if (regs->flags & (X86_EFLAGS_IF|VM_MASK)) if (regs->flags & (X86_EFLAGS_IF|VM_MASK))
...@@ -630,28 +639,6 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) ...@@ -630,28 +639,6 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
if (in_atomic() || !mm) if (in_atomic() || !mm)
goto bad_area_nosemaphore; goto bad_area_nosemaphore;
#else /* CONFIG_X86_64 */ #else /* CONFIG_X86_64 */
if (unlikely(address >= TASK_SIZE64)) {
/*
* Don't check for the module range here: its PML4
* is always initialized because it's shared with the main
* kernel text. Only vmalloc may need PML4 syncups.
*/
if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
((address >= VMALLOC_START && address < VMALLOC_END))) {
if (vmalloc_fault(address) >= 0)
return;
}
/* Can handle a stale RO->RW TLB */
if (spurious_fault(address, error_code))
return;
/*
* Don't take the mm semaphore here. If we fixup a prefetch
* fault we could otherwise deadlock.
*/
goto bad_area_nosemaphore;
}
if (likely(regs->flags & X86_EFLAGS_IF)) if (likely(regs->flags & X86_EFLAGS_IF))
local_irq_enable(); local_irq_enable();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment