Commit f107701f authored by Kautuk Consul's avatar Kautuk Consul Committed by Linus Torvalds

xtensa/mm/fault.c: port OOM changes to do_page_fault

Commits d065bd81 ("mm: retry page fault when blocking on disk
transfer") and 37b23e05 ("x86,mm: make pagefault killable")
introduced changes into the x86 pagefault handler for making the page
fault handler retryable as well as killable.

These changes reduce the mmap_sem hold time, which is crucial during OOM
killer invocation.

Port these changes to xtensa.
Signed-off-by: default avatarKautuk Consul <consul.kautuk@gmail.com>
Acked-by: default avatarChris Zankel <chris@zankel.net>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 54b50199
...@@ -44,6 +44,7 @@ void do_page_fault(struct pt_regs *regs) ...@@ -44,6 +44,7 @@ void do_page_fault(struct pt_regs *regs)
int is_write, is_exec; int is_write, is_exec;
int fault; int fault;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
info.si_code = SEGV_MAPERR; info.si_code = SEGV_MAPERR;
...@@ -71,6 +72,7 @@ void do_page_fault(struct pt_regs *regs) ...@@ -71,6 +72,7 @@ void do_page_fault(struct pt_regs *regs)
address, exccause, regs->pc, is_write? "w":"", is_exec? "x":""); address, exccause, regs->pc, is_write? "w":"", is_exec? "x":"");
#endif #endif
retry:
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
vma = find_vma(mm, address); vma = find_vma(mm, address);
...@@ -93,6 +95,7 @@ void do_page_fault(struct pt_regs *regs) ...@@ -93,6 +95,7 @@ void do_page_fault(struct pt_regs *regs)
if (is_write) { if (is_write) {
if (!(vma->vm_flags & VM_WRITE)) if (!(vma->vm_flags & VM_WRITE))
goto bad_area; goto bad_area;
flags |= FAULT_FLAG_WRITE;
} else if (is_exec) { } else if (is_exec) {
if (!(vma->vm_flags & VM_EXEC)) if (!(vma->vm_flags & VM_EXEC))
goto bad_area; goto bad_area;
...@@ -104,7 +107,11 @@ void do_page_fault(struct pt_regs *regs) ...@@ -104,7 +107,11 @@ void do_page_fault(struct pt_regs *regs)
* make sure we exit gracefully rather than endlessly redo * make sure we exit gracefully rather than endlessly redo
* the fault. * the fault.
*/ */
fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); fault = handle_mm_fault(mm, vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return;
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
...@@ -112,10 +119,22 @@ void do_page_fault(struct pt_regs *regs) ...@@ -112,10 +119,22 @@ void do_page_fault(struct pt_regs *regs)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();
} }
if (fault & VM_FAULT_MAJOR) if (flags & FAULT_FLAG_ALLOW_RETRY) {
current->maj_flt++; if (fault & VM_FAULT_MAJOR)
else current->maj_flt++;
current->min_flt++; else
current->min_flt++;
if (fault & VM_FAULT_RETRY) {
flags &= ~FAULT_FLAG_ALLOW_RETRY;
/* No need to up_read(&mm->mmap_sem) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
goto retry;
}
}
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment