Commit eebb5181 authored by Kefeng Wang's avatar Kefeng Wang Committed by Andrew Morton

arm64: mm: drop VM_FAULT_BADMAP/VM_FAULT_BADACCESS

Patch series "mm: remove arch's private VM_FAULT_BADMAP/BADACCESS", v2.

Directly set SEGV_MAPRR or SEGV_ACCERR for arm/arm64 to remove the last
two arch's private vm_fault reasons.  


This patch (of 2):

If bad map or access, directly set si_code to SEGV_MAPRR or SEGV_ACCERR,
also set fault to 0 and goto error handling, which make us to drop the
arch's special vm fault reason.

Link: https://lkml.kernel.org/r/20240411130925.73281-1-wangkefeng.wang@huawei.com
Link: https://lkml.kernel.org/r/20240411130925.73281-2-wangkefeng.wang@huawei.comSigned-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Cc: Aishwarya TCV <aishwarya.tcv@arm.com>
Cc: Cristian Marussi <cristian.marussi@arm.com>
Cc: Mark Brown <broonie@kernel.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 65867060
...@@ -500,9 +500,6 @@ static bool is_write_abort(unsigned long esr) ...@@ -500,9 +500,6 @@ static bool is_write_abort(unsigned long esr)
return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM); return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM);
} }
#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000)
#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000)
static int __kprobes do_page_fault(unsigned long far, unsigned long esr, static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
struct pt_regs *regs) struct pt_regs *regs)
{ {
...@@ -513,6 +510,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, ...@@ -513,6 +510,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
unsigned int mm_flags = FAULT_FLAG_DEFAULT; unsigned int mm_flags = FAULT_FLAG_DEFAULT;
unsigned long addr = untagged_addr(far); unsigned long addr = untagged_addr(far);
struct vm_area_struct *vma; struct vm_area_struct *vma;
int si_code;
if (kprobe_page_fault(regs, esr)) if (kprobe_page_fault(regs, esr))
return 0; return 0;
...@@ -572,9 +570,10 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, ...@@ -572,9 +570,10 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
if (!(vma->vm_flags & vm_flags)) { if (!(vma->vm_flags & vm_flags)) {
vma_end_read(vma); vma_end_read(vma);
fault = VM_FAULT_BADACCESS; fault = 0;
si_code = SEGV_ACCERR;
count_vm_vma_lock_event(VMA_LOCK_SUCCESS); count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
goto done; goto bad_area;
} }
fault = handle_mm_fault(vma, addr, mm_flags | FAULT_FLAG_VMA_LOCK, regs); fault = handle_mm_fault(vma, addr, mm_flags | FAULT_FLAG_VMA_LOCK, regs);
if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED))) if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
...@@ -599,15 +598,19 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, ...@@ -599,15 +598,19 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
retry: retry:
vma = lock_mm_and_find_vma(mm, addr, regs); vma = lock_mm_and_find_vma(mm, addr, regs);
if (unlikely(!vma)) { if (unlikely(!vma)) {
fault = VM_FAULT_BADMAP; fault = 0;
goto done; si_code = SEGV_MAPERR;
goto bad_area;
} }
if (!(vma->vm_flags & vm_flags)) if (!(vma->vm_flags & vm_flags)) {
fault = VM_FAULT_BADACCESS; mmap_read_unlock(mm);
else fault = 0;
fault = handle_mm_fault(vma, addr, mm_flags, regs); si_code = SEGV_ACCERR;
goto bad_area;
}
fault = handle_mm_fault(vma, addr, mm_flags, regs);
/* Quick path to respond to signals */ /* Quick path to respond to signals */
if (fault_signal_pending(fault, regs)) { if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs)) if (!user_mode(regs))
...@@ -626,13 +629,12 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, ...@@ -626,13 +629,12 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
mmap_read_unlock(mm); mmap_read_unlock(mm);
done: done:
/* /* Handle the "normal" (no error) case first. */
* Handle the "normal" (no error) case first. if (likely(!(fault & VM_FAULT_ERROR)))
*/
if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |
VM_FAULT_BADACCESS))))
return 0; return 0;
si_code = SEGV_MAPERR;
bad_area:
/* /*
* If we are in kernel mode at this point, we have no context to * If we are in kernel mode at this point, we have no context to
* handle this fault with. * handle this fault with.
...@@ -667,13 +669,8 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, ...@@ -667,13 +669,8 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
arm64_force_sig_mceerr(BUS_MCEERR_AR, far, lsb, inf->name); arm64_force_sig_mceerr(BUS_MCEERR_AR, far, lsb, inf->name);
} else { } else {
/* /* Something tried to access memory that out of memory map */
* Something tried to access memory that isn't in our memory arm64_force_sig_fault(SIGSEGV, si_code, far, inf->name);
* map.
*/
arm64_force_sig_fault(SIGSEGV,
fault == VM_FAULT_BADACCESS ? SEGV_ACCERR : SEGV_MAPERR,
far, inf->name);
} }
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment