Commit 608566b4 authored by Harvey Harrison's avatar Harvey Harrison Committed by Ingo Molnar

x86: do_page_fault small unification

Copy the prefetch of map_sem from X86_64 and move the check
notify_page_fault (soon to be kprobe_handle_fault) out of
the unlikely if() statement.

This makes the X86_32|64 pagefault handlers closer to each
other.
Signed-off-by: default avatarHarvey Harrison <harvey.harrison@gmail.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent f2857ce9
...@@ -295,13 +295,18 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) ...@@ -295,13 +295,18 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
*/ */
trace_hardirqs_fixup(); trace_hardirqs_fixup();
tsk = current;
mm = tsk->mm;
prefetchw(&mm->mmap_sem);
/* get the address */ /* get the address */
address = read_cr2(); address = read_cr2();
tsk = current;
si_code = SEGV_MAPERR; si_code = SEGV_MAPERR;
if (notify_page_fault(regs))
return;
/* /*
* We fault-in kernel-space virtual memory on-demand. The * We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd. * 'reference' page table is init_mm.pgd.
...@@ -319,8 +324,6 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) ...@@ -319,8 +324,6 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) && if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
vmalloc_fault(address) >= 0) vmalloc_fault(address) >= 0)
return; return;
if (notify_page_fault(regs))
return;
/* /*
* Don't take the mm semaphore here. If we fixup a prefetch * Don't take the mm semaphore here. If we fixup a prefetch
* fault we could otherwise deadlock. * fault we could otherwise deadlock.
...@@ -328,16 +331,11 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) ...@@ -328,16 +331,11 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
goto bad_area_nosemaphore; goto bad_area_nosemaphore;
} }
if (notify_page_fault(regs))
return;
/* It's safe to allow irq's after cr2 has been saved and the vmalloc /* It's safe to allow irq's after cr2 has been saved and the vmalloc
fault has been handled. */ fault has been handled. */
if (regs->flags & (X86_EFLAGS_IF|VM_MASK)) if (regs->flags & (X86_EFLAGS_IF|VM_MASK))
local_irq_enable(); local_irq_enable();
mm = tsk->mm;
/* /*
* If we're in an interrupt, have no user context or are running in an * If we're in an interrupt, have no user context or are running in an
* atomic region then we must not take the fault. * atomic region then we must not take the fault.
......
...@@ -355,6 +355,8 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, ...@@ -355,6 +355,8 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
si_code = SEGV_MAPERR; si_code = SEGV_MAPERR;
if (notify_page_fault(regs))
return;
/* /*
* We fault-in kernel-space virtual memory on-demand. The * We fault-in kernel-space virtual memory on-demand. The
...@@ -380,8 +382,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, ...@@ -380,8 +382,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
if (vmalloc_fault(address) >= 0) if (vmalloc_fault(address) >= 0)
return; return;
} }
if (notify_page_fault(regs))
return;
/* /*
* Don't take the mm semaphore here. If we fixup a prefetch * Don't take the mm semaphore here. If we fixup a prefetch
* fault we could otherwise deadlock. * fault we could otherwise deadlock.
...@@ -389,9 +389,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, ...@@ -389,9 +389,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
goto bad_area_nosemaphore; goto bad_area_nosemaphore;
} }
if (notify_page_fault(regs))
return;
if (likely(regs->flags & X86_EFLAGS_IF)) if (likely(regs->flags & X86_EFLAGS_IF))
local_irq_enable(); local_irq_enable();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment