Commit 1067f030 authored by Ricardo Neri's avatar Ricardo Neri Committed by Thomas Gleixner

x86/mm: Relocate page fault error codes to traps.h

Up to this point, only fault.c used the definitions of the page fault error
codes. Thus, it made sense to keep them within such file. Other portions of
code might be interested in those definitions too. For instance, the User-
Mode Instruction Prevention emulation code will use such definitions to
emulate a page fault when it is unable to successfully copy the results
of the emulated instructions to user space.

While relocating the error code enumeration, the prefix X86_ is used to
make it consistent with the rest of the definitions in traps.h. Of course,
code using the enumeration had to be updated as well. No functional changes
were performed.
Signed-off-by: default avatarRicardo Neri <ricardo.neri-calderon@linux.intel.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarBorislav Petkov <bp@suse.de>
Reviewed-by: default avatarAndy Lutomirski <luto@kernel.org>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: ricardo.neri@intel.com
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: Huang Rui <ray.huang@amd.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Jiri Slaby <jslaby@suse.cz>
Cc: "Ravi V. Shankar" <ravi.v.shankar@intel.com>
Cc: Chris Metcalf <cmetcalf@mellanox.com>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Chen Yucong <slaoub@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Link: https://lkml.kernel.org/r/1509135945-13762-2-git-send-email-ricardo.neri-calderon@linux.intel.com
parent 287683d0
...@@ -144,4 +144,22 @@ enum { ...@@ -144,4 +144,22 @@ enum {
X86_TRAP_IRET = 32, /* 32, IRET Exception */ X86_TRAP_IRET = 32, /* 32, IRET Exception */
}; };
/*
* Page fault error code bits:
*
* bit 0 == 0: no page found 1: protection fault
* bit 1 == 0: read access 1: write access
* bit 2 == 0: kernel-mode access 1: user-mode access
* bit 3 == 1: use of reserved bit detected
* bit 4 == 1: fault was an instruction fetch
* bit 5 == 1: protection keys block access
*/
enum x86_pf_error_code {
X86_PF_PROT = 1 << 0,
X86_PF_WRITE = 1 << 1,
X86_PF_USER = 1 << 2,
X86_PF_RSVD = 1 << 3,
X86_PF_INSTR = 1 << 4,
X86_PF_PK = 1 << 5,
};
#endif /* _ASM_X86_TRAPS_H */ #endif /* _ASM_X86_TRAPS_H */
...@@ -28,26 +28,6 @@ ...@@ -28,26 +28,6 @@
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <asm/trace/exceptions.h> #include <asm/trace/exceptions.h>
/*
* Page fault error code bits:
*
* bit 0 == 0: no page found 1: protection fault
* bit 1 == 0: read access 1: write access
* bit 2 == 0: kernel-mode access 1: user-mode access
* bit 3 == 1: use of reserved bit detected
* bit 4 == 1: fault was an instruction fetch
* bit 5 == 1: protection keys block access
*/
enum x86_pf_error_code {
PF_PROT = 1 << 0,
PF_WRITE = 1 << 1,
PF_USER = 1 << 2,
PF_RSVD = 1 << 3,
PF_INSTR = 1 << 4,
PF_PK = 1 << 5,
};
/* /*
* Returns 0 if mmiotrace is disabled, or if the fault is not * Returns 0 if mmiotrace is disabled, or if the fault is not
* handled by mmiotrace: * handled by mmiotrace:
...@@ -149,7 +129,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) ...@@ -149,7 +129,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
* If it was a exec (instruction fetch) fault on NX page, then * If it was a exec (instruction fetch) fault on NX page, then
* do not ignore the fault: * do not ignore the fault:
*/ */
if (error_code & PF_INSTR) if (error_code & X86_PF_INSTR)
return 0; return 0;
instr = (void *)convert_ip_to_linear(current, regs); instr = (void *)convert_ip_to_linear(current, regs);
...@@ -179,7 +159,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) ...@@ -179,7 +159,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
* siginfo so userspace can discover which protection key was set * siginfo so userspace can discover which protection key was set
* on the PTE. * on the PTE.
* *
* If we get here, we know that the hardware signaled a PF_PK * If we get here, we know that the hardware signaled a X86_PF_PK
* fault and that there was a VMA once we got in the fault * fault and that there was a VMA once we got in the fault
* handler. It does *not* guarantee that the VMA we find here * handler. It does *not* guarantee that the VMA we find here
* was the one that we faulted on. * was the one that we faulted on.
...@@ -204,7 +184,7 @@ static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey) ...@@ -204,7 +184,7 @@ static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey)
/* /*
* force_sig_info_fault() is called from a number of * force_sig_info_fault() is called from a number of
* contexts, some of which have a VMA and some of which * contexts, some of which have a VMA and some of which
* do not. The PF_PK handing happens after we have a * do not. The X86_PF_PK handing happens after we have a
* valid VMA, so we should never reach this without a * valid VMA, so we should never reach this without a
* valid VMA. * valid VMA.
*/ */
...@@ -697,7 +677,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code, ...@@ -697,7 +677,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
if (!oops_may_print()) if (!oops_may_print())
return; return;
if (error_code & PF_INSTR) { if (error_code & X86_PF_INSTR) {
unsigned int level; unsigned int level;
pgd_t *pgd; pgd_t *pgd;
pte_t *pte; pte_t *pte;
...@@ -779,7 +759,7 @@ no_context(struct pt_regs *regs, unsigned long error_code, ...@@ -779,7 +759,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
*/ */
if (current->thread.sig_on_uaccess_err && signal) { if (current->thread.sig_on_uaccess_err && signal) {
tsk->thread.trap_nr = X86_TRAP_PF; tsk->thread.trap_nr = X86_TRAP_PF;
tsk->thread.error_code = error_code | PF_USER; tsk->thread.error_code = error_code | X86_PF_USER;
tsk->thread.cr2 = address; tsk->thread.cr2 = address;
/* XXX: hwpoison faults will set the wrong code. */ /* XXX: hwpoison faults will set the wrong code. */
...@@ -897,7 +877,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, ...@@ -897,7 +877,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
struct task_struct *tsk = current; struct task_struct *tsk = current;
/* User mode accesses just cause a SIGSEGV */ /* User mode accesses just cause a SIGSEGV */
if (error_code & PF_USER) { if (error_code & X86_PF_USER) {
/* /*
* It's possible to have interrupts off here: * It's possible to have interrupts off here:
*/ */
...@@ -918,7 +898,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, ...@@ -918,7 +898,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
* Instruction fetch faults in the vsyscall page might need * Instruction fetch faults in the vsyscall page might need
* emulation. * emulation.
*/ */
if (unlikely((error_code & PF_INSTR) && if (unlikely((error_code & X86_PF_INSTR) &&
((address & ~0xfff) == VSYSCALL_ADDR))) { ((address & ~0xfff) == VSYSCALL_ADDR))) {
if (emulate_vsyscall(regs, address)) if (emulate_vsyscall(regs, address))
return; return;
...@@ -931,7 +911,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, ...@@ -931,7 +911,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
* are always protection faults. * are always protection faults.
*/ */
if (address >= TASK_SIZE_MAX) if (address >= TASK_SIZE_MAX)
error_code |= PF_PROT; error_code |= X86_PF_PROT;
if (likely(show_unhandled_signals)) if (likely(show_unhandled_signals))
show_signal_msg(regs, error_code, address, tsk); show_signal_msg(regs, error_code, address, tsk);
...@@ -992,11 +972,11 @@ static inline bool bad_area_access_from_pkeys(unsigned long error_code, ...@@ -992,11 +972,11 @@ static inline bool bad_area_access_from_pkeys(unsigned long error_code,
if (!boot_cpu_has(X86_FEATURE_OSPKE)) if (!boot_cpu_has(X86_FEATURE_OSPKE))
return false; return false;
if (error_code & PF_PK) if (error_code & X86_PF_PK)
return true; return true;
/* this checks permission keys on the VMA: */ /* this checks permission keys on the VMA: */
if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
(error_code & PF_INSTR), foreign)) (error_code & X86_PF_INSTR), foreign))
return true; return true;
return false; return false;
} }
...@@ -1024,7 +1004,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, ...@@ -1024,7 +1004,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
int code = BUS_ADRERR; int code = BUS_ADRERR;
/* Kernel mode? Handle exceptions or die: */ /* Kernel mode? Handle exceptions or die: */
if (!(error_code & PF_USER)) { if (!(error_code & X86_PF_USER)) {
no_context(regs, error_code, address, SIGBUS, BUS_ADRERR); no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
return; return;
} }
...@@ -1052,14 +1032,14 @@ static noinline void ...@@ -1052,14 +1032,14 @@ static noinline void
mm_fault_error(struct pt_regs *regs, unsigned long error_code, mm_fault_error(struct pt_regs *regs, unsigned long error_code,
unsigned long address, u32 *pkey, unsigned int fault) unsigned long address, u32 *pkey, unsigned int fault)
{ {
if (fatal_signal_pending(current) && !(error_code & PF_USER)) { if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) {
no_context(regs, error_code, address, 0, 0); no_context(regs, error_code, address, 0, 0);
return; return;
} }
if (fault & VM_FAULT_OOM) { if (fault & VM_FAULT_OOM) {
/* Kernel mode? Handle exceptions or die: */ /* Kernel mode? Handle exceptions or die: */
if (!(error_code & PF_USER)) { if (!(error_code & X86_PF_USER)) {
no_context(regs, error_code, address, no_context(regs, error_code, address,
SIGSEGV, SEGV_MAPERR); SIGSEGV, SEGV_MAPERR);
return; return;
...@@ -1084,16 +1064,16 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, ...@@ -1084,16 +1064,16 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
static int spurious_fault_check(unsigned long error_code, pte_t *pte) static int spurious_fault_check(unsigned long error_code, pte_t *pte)
{ {
if ((error_code & PF_WRITE) && !pte_write(*pte)) if ((error_code & X86_PF_WRITE) && !pte_write(*pte))
return 0; return 0;
if ((error_code & PF_INSTR) && !pte_exec(*pte)) if ((error_code & X86_PF_INSTR) && !pte_exec(*pte))
return 0; return 0;
/* /*
* Note: We do not do lazy flushing on protection key * Note: We do not do lazy flushing on protection key
* changes, so no spurious fault will ever set PF_PK. * changes, so no spurious fault will ever set X86_PF_PK.
*/ */
if ((error_code & PF_PK)) if ((error_code & X86_PF_PK))
return 1; return 1;
return 1; return 1;
...@@ -1139,8 +1119,8 @@ spurious_fault(unsigned long error_code, unsigned long address) ...@@ -1139,8 +1119,8 @@ spurious_fault(unsigned long error_code, unsigned long address)
* change, so user accesses are not expected to cause spurious * change, so user accesses are not expected to cause spurious
* faults. * faults.
*/ */
if (error_code != (PF_WRITE | PF_PROT) if (error_code != (X86_PF_WRITE | X86_PF_PROT) &&
&& error_code != (PF_INSTR | PF_PROT)) error_code != (X86_PF_INSTR | X86_PF_PROT))
return 0; return 0;
pgd = init_mm.pgd + pgd_index(address); pgd = init_mm.pgd + pgd_index(address);
...@@ -1200,19 +1180,19 @@ access_error(unsigned long error_code, struct vm_area_struct *vma) ...@@ -1200,19 +1180,19 @@ access_error(unsigned long error_code, struct vm_area_struct *vma)
* always an unconditional error and can never result in * always an unconditional error and can never result in
* a follow-up action to resolve the fault, like a COW. * a follow-up action to resolve the fault, like a COW.
*/ */
if (error_code & PF_PK) if (error_code & X86_PF_PK)
return 1; return 1;
/* /*
* Make sure to check the VMA so that we do not perform * Make sure to check the VMA so that we do not perform
* faults just to hit a PF_PK as soon as we fill in a * faults just to hit a X86_PF_PK as soon as we fill in a
* page. * page.
*/ */
if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE), if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
(error_code & PF_INSTR), foreign)) (error_code & X86_PF_INSTR), foreign))
return 1; return 1;
if (error_code & PF_WRITE) { if (error_code & X86_PF_WRITE) {
/* write, present and write, not present: */ /* write, present and write, not present: */
if (unlikely(!(vma->vm_flags & VM_WRITE))) if (unlikely(!(vma->vm_flags & VM_WRITE)))
return 1; return 1;
...@@ -1220,7 +1200,7 @@ access_error(unsigned long error_code, struct vm_area_struct *vma) ...@@ -1220,7 +1200,7 @@ access_error(unsigned long error_code, struct vm_area_struct *vma)
} }
/* read, present: */ /* read, present: */
if (unlikely(error_code & PF_PROT)) if (unlikely(error_code & X86_PF_PROT))
return 1; return 1;
/* read, not present: */ /* read, not present: */
...@@ -1243,7 +1223,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs) ...@@ -1243,7 +1223,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
if (!static_cpu_has(X86_FEATURE_SMAP)) if (!static_cpu_has(X86_FEATURE_SMAP))
return false; return false;
if (error_code & PF_USER) if (error_code & X86_PF_USER)
return false; return false;
if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC)) if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
...@@ -1296,7 +1276,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, ...@@ -1296,7 +1276,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
* protection error (error_code & 9) == 0. * protection error (error_code & 9) == 0.
*/ */
if (unlikely(fault_in_kernel_space(address))) { if (unlikely(fault_in_kernel_space(address))) {
if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) { if (!(error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
if (vmalloc_fault(address) >= 0) if (vmalloc_fault(address) >= 0)
return; return;
...@@ -1324,7 +1304,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, ...@@ -1324,7 +1304,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
if (unlikely(kprobes_fault(regs))) if (unlikely(kprobes_fault(regs)))
return; return;
if (unlikely(error_code & PF_RSVD)) if (unlikely(error_code & X86_PF_RSVD))
pgtable_bad(regs, error_code, address); pgtable_bad(regs, error_code, address);
if (unlikely(smap_violation(error_code, regs))) { if (unlikely(smap_violation(error_code, regs))) {
...@@ -1350,7 +1330,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, ...@@ -1350,7 +1330,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
*/ */
if (user_mode(regs)) { if (user_mode(regs)) {
local_irq_enable(); local_irq_enable();
error_code |= PF_USER; error_code |= X86_PF_USER;
flags |= FAULT_FLAG_USER; flags |= FAULT_FLAG_USER;
} else { } else {
if (regs->flags & X86_EFLAGS_IF) if (regs->flags & X86_EFLAGS_IF)
...@@ -1359,9 +1339,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, ...@@ -1359,9 +1339,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
if (error_code & PF_WRITE) if (error_code & X86_PF_WRITE)
flags |= FAULT_FLAG_WRITE; flags |= FAULT_FLAG_WRITE;
if (error_code & PF_INSTR) if (error_code & X86_PF_INSTR)
flags |= FAULT_FLAG_INSTRUCTION; flags |= FAULT_FLAG_INSTRUCTION;
/* /*
...@@ -1381,7 +1361,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, ...@@ -1381,7 +1361,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
* space check, thus avoiding the deadlock: * space check, thus avoiding the deadlock:
*/ */
if (unlikely(!down_read_trylock(&mm->mmap_sem))) { if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
if ((error_code & PF_USER) == 0 && if (!(error_code & X86_PF_USER) &&
!search_exception_tables(regs->ip)) { !search_exception_tables(regs->ip)) {
bad_area_nosemaphore(regs, error_code, address, NULL); bad_area_nosemaphore(regs, error_code, address, NULL);
return; return;
...@@ -1408,7 +1388,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, ...@@ -1408,7 +1388,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
bad_area(regs, error_code, address); bad_area(regs, error_code, address);
return; return;
} }
if (error_code & PF_USER) { if (error_code & X86_PF_USER) {
/* /*
* Accessing the stack below %sp is always a bug. * Accessing the stack below %sp is always a bug.
* The large cushion allows instructions like enter * The large cushion allows instructions like enter
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment