Commit 33692f27 authored by Linus Torvalds's avatar Linus Torvalds

vm: add VM_FAULT_SIGSEGV handling support

The core VM already knows about VM_FAULT_SIGBUS, but cannot return a
"you should SIGSEGV" error, because the SIGSEGV case was generally
handled by the caller - usually the architecture fault handler.

That results in lots of duplication - all the architecture fault
handlers end up doing very similar "look up vma, check permissions, do
retries etc" - but it generally works.  However, there are cases where
the VM actually wants to SIGSEGV, and applications _expect_ SIGSEGV.

In particular, when accessing the stack guard page, libsigsegv expects a
SIGSEGV.  And it usually got one, because the stack growth is handled by
that duplicated architecture fault handler.

However, when the generic VM layer started propagating the error return
from the stack expansion in commit fee7e49d ("mm: propagate error
from stack expansion even for guard page"), that now exposed the
existing VM_FAULT_SIGBUS result to user space.  And user space really
expected SIGSEGV, not SIGBUS.

To fix that case, we need to add a VM_FAULT_SIGSEGV, and teach all those
duplicate architecture fault handlers about it.  They all already have
the code to handle SIGSEGV, so it's about just tying that new return
value to the existing code, but it's all a bit annoying.

This is the mindless minimal patch to do this.  A more extensive patch
would be to try to gather up the mostly shared fault handling logic into
one generic helper routine, and long-term we really should do that
cleanup.

Just from this patch, you can generally see that most architectures just
copied (directly or indirectly) the old x86 way of doing things, but in
the meantime that original x86 model has been improved to hold the VM
semaphore for shorter times etc and to handle VM_FAULT_RETRY and other
"newer" things, so it would be a good idea to bring all those
improvements to the generic case and teach other architectures about
them too.
Reported-and-tested-by: default avatarTakashi Iwai <tiwai@suse.de>
Tested-by: default avatarJan Engelhardt <jengelh@inai.de>
Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com> # "s390 still compiles and boots"
Cc: linux-arch@vger.kernel.org
Cc: stable@vger.kernel.org
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c59c961c
...@@ -156,6 +156,8 @@ do_page_fault(unsigned long address, unsigned long mmcsr, ...@@ -156,6 +156,8 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();
......
...@@ -161,6 +161,8 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) ...@@ -161,6 +161,8 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
......
...@@ -142,6 +142,8 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs) ...@@ -142,6 +142,8 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();
......
...@@ -176,6 +176,8 @@ do_page_fault(unsigned long address, struct pt_regs *regs, ...@@ -176,6 +176,8 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();
......
...@@ -168,6 +168,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear ...@@ -168,6 +168,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();
......
...@@ -172,6 +172,8 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re ...@@ -172,6 +172,8 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
*/ */
if (fault & VM_FAULT_OOM) { if (fault & VM_FAULT_OOM) {
goto out_of_memory; goto out_of_memory;
} else if (fault & VM_FAULT_SIGSEGV) {
goto bad_area;
} else if (fault & VM_FAULT_SIGBUS) { } else if (fault & VM_FAULT_SIGBUS) {
signal = SIGBUS; signal = SIGBUS;
goto bad_area; goto bad_area;
......
...@@ -200,6 +200,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, ...@@ -200,6 +200,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();
......
...@@ -145,6 +145,8 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -145,6 +145,8 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto map_err;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto bus_err; goto bus_err;
BUG(); BUG();
......
...@@ -141,6 +141,8 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -141,6 +141,8 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();
......
...@@ -224,6 +224,8 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -224,6 +224,8 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();
......
...@@ -158,6 +158,8 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write, ...@@ -158,6 +158,8 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();
......
...@@ -262,6 +262,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code, ...@@ -262,6 +262,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code,
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();
......
...@@ -135,6 +135,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause, ...@@ -135,6 +135,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();
......
...@@ -171,6 +171,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -171,6 +171,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();
......
...@@ -256,6 +256,8 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, ...@@ -256,6 +256,8 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
*/ */
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto bad_area; goto bad_area;
BUG(); BUG();
......
...@@ -76,7 +76,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, ...@@ -76,7 +76,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
if (*flt & VM_FAULT_OOM) { if (*flt & VM_FAULT_OOM) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_unlock; goto out_unlock;
} else if (*flt & VM_FAULT_SIGBUS) { } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
ret = -EFAULT; ret = -EFAULT;
goto out_unlock; goto out_unlock;
} }
......
...@@ -437,6 +437,8 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -437,6 +437,8 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
*/ */
fault = handle_mm_fault(mm, vma, address, flags); fault = handle_mm_fault(mm, vma, address, flags);
if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
rc = mm_fault_error(regs, address, fault); rc = mm_fault_error(regs, address, fault);
if (rc >= MM_FAULT_RETURN) if (rc >= MM_FAULT_RETURN)
goto bail; goto bail;
......
...@@ -374,6 +374,12 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault) ...@@ -374,6 +374,12 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)
do_no_context(regs); do_no_context(regs);
else else
pagefault_out_of_memory(); pagefault_out_of_memory();
} else if (fault & VM_FAULT_SIGSEGV) {
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
do_no_context(regs);
else
do_sigsegv(regs, SEGV_MAPERR);
} else if (fault & VM_FAULT_SIGBUS) { } else if (fault & VM_FAULT_SIGBUS) {
/* Kernel mode? Handle exceptions or die */ /* Kernel mode? Handle exceptions or die */
if (!user_mode(regs)) if (!user_mode(regs))
......
...@@ -114,6 +114,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, ...@@ -114,6 +114,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();
......
...@@ -353,6 +353,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, ...@@ -353,6 +353,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
} else { } else {
if (fault & VM_FAULT_SIGBUS) if (fault & VM_FAULT_SIGBUS)
do_sigbus(regs, error_code, address); do_sigbus(regs, error_code, address);
else if (fault & VM_FAULT_SIGSEGV)
bad_area(regs, error_code, address);
else else
BUG(); BUG();
} }
......
...@@ -249,6 +249,8 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, ...@@ -249,6 +249,8 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();
......
...@@ -446,6 +446,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) ...@@ -446,6 +446,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();
......
...@@ -442,6 +442,8 @@ static int handle_page_fault(struct pt_regs *regs, ...@@ -442,6 +442,8 @@ static int handle_page_fault(struct pt_regs *regs,
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();
......
...@@ -80,6 +80,8 @@ int handle_page_fault(unsigned long address, unsigned long ip, ...@@ -80,6 +80,8 @@ int handle_page_fault(unsigned long address, unsigned long ip,
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) { if (fault & VM_FAULT_OOM) {
goto out_of_memory; goto out_of_memory;
} else if (fault & VM_FAULT_SIGSEGV) {
goto out;
} else if (fault & VM_FAULT_SIGBUS) { } else if (fault & VM_FAULT_SIGBUS) {
err = -EACCES; err = -EACCES;
goto out; goto out;
......
...@@ -898,6 +898,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, ...@@ -898,6 +898,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
VM_FAULT_HWPOISON_LARGE)) VM_FAULT_HWPOISON_LARGE))
do_sigbus(regs, error_code, address, fault); do_sigbus(regs, error_code, address, fault);
else if (fault & VM_FAULT_SIGSEGV)
bad_area_nosemaphore(regs, error_code, address);
else else
BUG(); BUG();
} }
......
...@@ -117,6 +117,8 @@ void do_page_fault(struct pt_regs *regs) ...@@ -117,6 +117,8 @@ void do_page_fault(struct pt_regs *regs)
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();
......
...@@ -632,7 +632,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio) ...@@ -632,7 +632,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
return 0; return 0;
} }
if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) { if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address); CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
return -EFAULT; return -EFAULT;
} }
......
...@@ -1070,6 +1070,7 @@ static inline int page_mapped(struct page *page) ...@@ -1070,6 +1070,7 @@ static inline int page_mapped(struct page *page)
#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */
#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */ #define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */
#define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */ #define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */
#define VM_FAULT_SIGSEGV 0x0040
#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
...@@ -1078,8 +1079,9 @@ static inline int page_mapped(struct page *page) ...@@ -1078,8 +1079,9 @@ static inline int page_mapped(struct page *page)
#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \ #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE) VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
VM_FAULT_FALLBACK)
/* Encode hstate index for a hwpoisoned large page */ /* Encode hstate index for a hwpoisoned large page */
#define VM_FAULT_SET_HINDEX(x) ((x) << 12) #define VM_FAULT_SET_HINDEX(x) ((x) << 12)
......
...@@ -296,7 +296,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, ...@@ -296,7 +296,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
return -ENOMEM; return -ENOMEM;
if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT; return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT;
if (ret & VM_FAULT_SIGBUS) if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
return -EFAULT; return -EFAULT;
BUG(); BUG();
} }
...@@ -571,7 +571,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, ...@@ -571,7 +571,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
return -ENOMEM; return -ENOMEM;
if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
return -EHWPOISON; return -EHWPOISON;
if (ret & VM_FAULT_SIGBUS) if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
return -EFAULT; return -EFAULT;
BUG(); BUG();
} }
......
...@@ -376,7 +376,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr) ...@@ -376,7 +376,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
else else
ret = VM_FAULT_WRITE; ret = VM_FAULT_WRITE;
put_page(page); put_page(page);
} while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM))); } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
/* /*
* We must loop because handle_mm_fault() may back out if there's * We must loop because handle_mm_fault() may back out if there's
* any difficulty e.g. if pte accessed bit gets updated concurrently. * any difficulty e.g. if pte accessed bit gets updated concurrently.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment