Commit 842915f5 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

ppc64: add paranoid warnings for unexpected DSISR_PROTFAULT

ppc64 should not be depending on DSISR_PROTFAULT and it's unexpected if
they are triggered.  This patch adds warnings just in case they are being
accidentally depended upon.
Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Acked-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Tested-by: default avatarSasha Levin <sasha.levin@oracle.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Dave Jones <davej@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Kirill Shutemov <kirill.shutemov@linux.intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8a0516ed
...@@ -64,10 +64,14 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, ...@@ -64,10 +64,14 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
if (!(vma->vm_flags & VM_WRITE)) if (!(vma->vm_flags & VM_WRITE))
goto out_unlock; goto out_unlock;
} else { } else {
if (dsisr & DSISR_PROTFAULT)
goto out_unlock;
if (!(vma->vm_flags & (VM_READ | VM_EXEC))) if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto out_unlock; goto out_unlock;
/*
* protfault should only happen due to us
* mapping a region readonly temporarily. PROT_NONE
* is also covered by the VMA check above.
*/
WARN_ON_ONCE(dsisr & DSISR_PROTFAULT);
} }
ret = 0; ret = 0;
......
...@@ -389,17 +389,6 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -389,17 +389,6 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
#endif /* CONFIG_8xx */ #endif /* CONFIG_8xx */
if (is_exec) { if (is_exec) {
#ifdef CONFIG_PPC_STD_MMU
/* Protection fault on exec go straight to failure on
* Hash based MMUs as they either don't support per-page
* execute permission, or if they do, it's handled already
* at the hash level. This test would probably have to
* be removed if we change the way this works to make hash
* processors use the same I/D cache coherency mechanism
* as embedded.
*/
#endif /* CONFIG_PPC_STD_MMU */
/* /*
* Allow execution from readable areas if the MMU does not * Allow execution from readable areas if the MMU does not
* provide separate controls over reading and executing. * provide separate controls over reading and executing.
...@@ -414,6 +403,14 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -414,6 +403,14 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
(cpu_has_feature(CPU_FTR_NOEXECUTE) || (cpu_has_feature(CPU_FTR_NOEXECUTE) ||
!(vma->vm_flags & (VM_READ | VM_WRITE)))) !(vma->vm_flags & (VM_READ | VM_WRITE))))
goto bad_area; goto bad_area;
#ifdef CONFIG_PPC_STD_MMU
/*
* protfault should only happen due to us
* mapping a region readonly temporarily. PROT_NONE
* is also covered by the VMA check above.
*/
WARN_ON_ONCE(error_code & DSISR_PROTFAULT);
#endif /* CONFIG_PPC_STD_MMU */
/* a write */ /* a write */
} else if (is_write) { } else if (is_write) {
if (!(vma->vm_flags & VM_WRITE)) if (!(vma->vm_flags & VM_WRITE))
...@@ -423,6 +420,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -423,6 +420,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
} else { } else {
if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
goto bad_area; goto bad_area;
WARN_ON_ONCE(error_code & DSISR_PROTFAULT);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment