Commit 474183b1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'please-pull-misc-3.6' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux

Pull misc Itanium fixes from Tony Luck.

* tag 'please-pull-misc-3.6' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux:
  debug: Do not permit CONFIG_DEBUG_STACK_USAGE=y on IA64 or PARISC
  [IA64] Port OOM changes to ia64_do_page_fault
parents 47b170af e9c31b32
...@@ -72,6 +72,10 @@ mapped_kernel_page_is_present (unsigned long address) ...@@ -72,6 +72,10 @@ mapped_kernel_page_is_present (unsigned long address)
return pte_present(pte); return pte_present(pte);
} }
# define VM_READ_BIT 0
# define VM_WRITE_BIT 1
# define VM_EXEC_BIT 2
void __kprobes void __kprobes
ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs) ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
{ {
...@@ -81,6 +85,12 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re ...@@ -81,6 +85,12 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
struct siginfo si; struct siginfo si;
unsigned long mask; unsigned long mask;
int fault; int fault;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
flags |= ((mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
/* mmap_sem is performance critical.... */ /* mmap_sem is performance critical.... */
prefetchw(&mm->mmap_sem); prefetchw(&mm->mmap_sem);
...@@ -109,6 +119,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re ...@@ -109,6 +119,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
if (notify_page_fault(regs, TRAP_BRKPT)) if (notify_page_fault(regs, TRAP_BRKPT))
return; return;
retry:
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
vma = find_vma_prev(mm, address, &prev_vma); vma = find_vma_prev(mm, address, &prev_vma);
...@@ -130,10 +141,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re ...@@ -130,10 +141,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
/* OK, we've got a good vm_area for this memory area. Check the access permissions: */ /* OK, we've got a good vm_area for this memory area. Check the access permissions: */
# define VM_READ_BIT 0
# define VM_WRITE_BIT 1
# define VM_EXEC_BIT 2
# if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \ # if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
|| (1 << VM_EXEC_BIT) != VM_EXEC) || (1 << VM_EXEC_BIT) != VM_EXEC)
# error File is out of sync with <linux/mm.h>. Please update. # error File is out of sync with <linux/mm.h>. Please update.
...@@ -142,9 +149,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re ...@@ -142,9 +149,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE)))) if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
goto bad_area; goto bad_area;
mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
if ((vma->vm_flags & mask) != mask) if ((vma->vm_flags & mask) != mask)
goto bad_area; goto bad_area;
...@@ -153,7 +157,11 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re ...@@ -153,7 +157,11 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
* sure we exit gracefully rather than endlessly redo the * sure we exit gracefully rather than endlessly redo the
* fault. * fault.
*/ */
fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0); fault = handle_mm_fault(mm, vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return;
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
/* /*
* We ran out of memory, or some other thing happened * We ran out of memory, or some other thing happened
...@@ -168,10 +176,24 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re ...@@ -168,10 +176,24 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
} }
BUG(); BUG();
} }
if (fault & VM_FAULT_MAJOR)
current->maj_flt++; if (flags & FAULT_FLAG_ALLOW_RETRY) {
else if (fault & VM_FAULT_MAJOR)
current->min_flt++; current->maj_flt++;
else
current->min_flt++;
if (fault & VM_FAULT_RETRY) {
flags &= ~FAULT_FLAG_ALLOW_RETRY;
/* No need to up_read(&mm->mmap_sem) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
goto retry;
}
}
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
return; return;
......
...@@ -714,7 +714,7 @@ config STACKTRACE ...@@ -714,7 +714,7 @@ config STACKTRACE
config DEBUG_STACK_USAGE config DEBUG_STACK_USAGE
bool "Stack utilization instrumentation" bool "Stack utilization instrumentation"
depends on DEBUG_KERNEL depends on DEBUG_KERNEL && !IA64 && !PARISC
help help
Enables the display of the minimum amount of free stack which each Enables the display of the minimum amount of free stack which each
task has ever had available in the sysrq-T and sysrq-P debug output. task has ever had available in the sysrq-T and sysrq-P debug output.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment