Commit a51271d9 authored by Pekka Enberg's avatar Pekka Enberg Committed by Palmer Dabbelt

riscv/mm/fault: Move bad area handling to bad_area()

This patch moves the bad area handling in do_page_fault() to bad_area()
function and converts gotos to calls to the new function.
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
Signed-off-by: default avatarPalmer Dabbelt <palmerdabbelt@google.com>
parent cac4d1dc
...@@ -37,6 +37,22 @@ static inline void no_context(struct pt_regs *regs, unsigned long addr) ...@@ -37,6 +37,22 @@ static inline void no_context(struct pt_regs *regs, unsigned long addr)
do_exit(SIGKILL); do_exit(SIGKILL);
} }
static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
{
/*
* Something tried to access memory that isn't in our memory map.
* Fix it, but check if it's kernel or user first.
*/
mmap_read_unlock(mm);
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
do_trap(regs, SIGSEGV, code, addr);
return;
}
no_context(regs, addr);
}
/* /*
* This routine handles page faults. It determines the address and the * This routine handles page faults. It determines the address and the
* problem, and then passes it off to one of the appropriate routines. * problem, and then passes it off to one of the appropriate routines.
...@@ -90,14 +106,20 @@ asmlinkage void do_page_fault(struct pt_regs *regs) ...@@ -90,14 +106,20 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
retry: retry:
mmap_read_lock(mm); mmap_read_lock(mm);
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (unlikely(!vma)) if (unlikely(!vma)) {
goto bad_area; bad_area(regs, mm, code, addr);
return;
}
if (likely(vma->vm_start <= addr)) if (likely(vma->vm_start <= addr))
goto good_area; goto good_area;
if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
goto bad_area; bad_area(regs, mm, code, addr);
if (unlikely(expand_stack(vma, addr))) return;
goto bad_area; }
if (unlikely(expand_stack(vma, addr))) {
bad_area(regs, mm, code, addr);
return;
}
/* /*
* Ok, we have a good vm_area for this memory access, so * Ok, we have a good vm_area for this memory access, so
...@@ -108,16 +130,22 @@ asmlinkage void do_page_fault(struct pt_regs *regs) ...@@ -108,16 +130,22 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
switch (cause) { switch (cause) {
case EXC_INST_PAGE_FAULT: case EXC_INST_PAGE_FAULT:
if (!(vma->vm_flags & VM_EXEC)) if (!(vma->vm_flags & VM_EXEC)) {
goto bad_area; bad_area(regs, mm, code, addr);
return;
}
break; break;
case EXC_LOAD_PAGE_FAULT: case EXC_LOAD_PAGE_FAULT:
if (!(vma->vm_flags & VM_READ)) if (!(vma->vm_flags & VM_READ)) {
goto bad_area; bad_area(regs, mm, code, addr);
return;
}
break; break;
case EXC_STORE_PAGE_FAULT: case EXC_STORE_PAGE_FAULT:
if (!(vma->vm_flags & VM_WRITE)) if (!(vma->vm_flags & VM_WRITE)) {
goto bad_area; bad_area(regs, mm, code, addr);
return;
}
flags |= FAULT_FLAG_WRITE; flags |= FAULT_FLAG_WRITE;
break; break;
default: default:
...@@ -161,21 +189,6 @@ asmlinkage void do_page_fault(struct pt_regs *regs) ...@@ -161,21 +189,6 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
mmap_read_unlock(mm); mmap_read_unlock(mm);
return; return;
/*
* Something tried to access memory that isn't in our memory map.
* Fix it, but check if it's kernel or user first.
*/
bad_area:
mmap_read_unlock(mm);
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
do_trap(regs, SIGSEGV, code, addr);
return;
}
no_context(regs, addr);
return;
/* /*
* We ran out of memory, call the OOM killer, and return the userspace * We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed). * (which will retry the fault, or kill us if we got oom-killed).
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment