Commit bab70a4a authored by Eugene Surovegin's avatar Eugene Surovegin Committed by Paul Mackerras

[PATCH] lock PTE before updating it in 440/BookE page fault handler

Fix 44x and BookE page fault handler to correctly lock PTE before
trying to pte_update() it, otherwise this PTE might be swapped out
after pte_present() check but before pte_uptdate() call, resulting in
corrupted PTE. This can happen with enabled preemption and low memory
condition.
Signed-off-by: default avatarEugene Surovegin <ebs@ebshome.net>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent bac30d1a
...@@ -267,25 +267,29 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -267,25 +267,29 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
#endif #endif
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
pte_t *ptep; pte_t *ptep;
pmd_t *pmdp;
/* Since 4xx/Book-E supports per-page execute permission, /* Since 4xx/Book-E supports per-page execute permission,
* we lazily flush dcache to icache. */ * we lazily flush dcache to icache. */
ptep = NULL; ptep = NULL;
if (get_pteptr(mm, address, &ptep) && pte_present(*ptep)) { if (get_pteptr(mm, address, &ptep, &pmdp)) {
struct page *page = pte_page(*ptep); spinlock_t *ptl = pte_lockptr(mm, pmdp);
spin_lock(ptl);
if (! test_bit(PG_arch_1, &page->flags)) { if (pte_present(*ptep)) {
flush_dcache_icache_page(page); struct page *page = pte_page(*ptep);
set_bit(PG_arch_1, &page->flags);
if (!test_bit(PG_arch_1, &page->flags)) {
flush_dcache_icache_page(page);
set_bit(PG_arch_1, &page->flags);
}
pte_update(ptep, 0, _PAGE_HWEXEC);
_tlbie(address);
pte_unmap_unlock(ptep, ptl);
up_read(&mm->mmap_sem);
return 0;
} }
pte_update(ptep, 0, _PAGE_HWEXEC); pte_unmap_unlock(ptep, ptl);
_tlbie(address);
pte_unmap(ptep);
up_read(&mm->mmap_sem);
return 0;
} }
if (ptep != NULL)
pte_unmap(ptep);
#endif #endif
/* a write */ /* a write */
} else if (is_write) { } else if (is_write) {
......
...@@ -372,7 +372,7 @@ void __init io_block_mapping(unsigned long virt, phys_addr_t phys, ...@@ -372,7 +372,7 @@ void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
* the PTE pointer is unmodified if PTE is not found. * the PTE pointer is unmodified if PTE is not found.
*/ */
int int
get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep) get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
{ {
pgd_t *pgd; pgd_t *pgd;
pmd_t *pmd; pmd_t *pmd;
...@@ -387,6 +387,8 @@ get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep) ...@@ -387,6 +387,8 @@ get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
if (pte) { if (pte) {
retval = 1; retval = 1;
*ptep = pte; *ptep = pte;
if (pmdp)
*pmdp = pmd;
/* XXX caller needs to do pte_unmap, yuck */ /* XXX caller needs to do pte_unmap, yuck */
} }
} }
...@@ -424,7 +426,7 @@ unsigned long iopa(unsigned long addr) ...@@ -424,7 +426,7 @@ unsigned long iopa(unsigned long addr)
mm = &init_mm; mm = &init_mm;
pa = 0; pa = 0;
if (get_pteptr(mm, addr, &pte)) { if (get_pteptr(mm, addr, &pte, NULL)) {
pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK); pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
pte_unmap(pte); pte_unmap(pte);
} }
......
...@@ -202,6 +202,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -202,6 +202,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
/* an exec - 4xx/Book-E allows for per-page execute permission */ /* an exec - 4xx/Book-E allows for per-page execute permission */
} else if (TRAP(regs) == 0x400) { } else if (TRAP(regs) == 0x400) {
pte_t *ptep; pte_t *ptep;
pmd_t *pmdp;
#if 0 #if 0
/* It would be nice to actually enforce the VM execute /* It would be nice to actually enforce the VM execute
...@@ -215,21 +216,24 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -215,21 +216,24 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
/* Since 4xx/Book-E supports per-page execute permission, /* Since 4xx/Book-E supports per-page execute permission,
* we lazily flush dcache to icache. */ * we lazily flush dcache to icache. */
ptep = NULL; ptep = NULL;
if (get_pteptr(mm, address, &ptep) && pte_present(*ptep)) { if (get_pteptr(mm, address, &ptep, &pmdp)) {
struct page *page = pte_page(*ptep); spinlock_t *ptl = pte_lockptr(mm, pmdp);
spin_lock(ptl);
if (! test_bit(PG_arch_1, &page->flags)) { if (pte_present(*ptep)) {
flush_dcache_icache_page(page); struct page *page = pte_page(*ptep);
set_bit(PG_arch_1, &page->flags);
if (!test_bit(PG_arch_1, &page->flags)) {
flush_dcache_icache_page(page);
set_bit(PG_arch_1, &page->flags);
}
pte_update(ptep, 0, _PAGE_HWEXEC);
_tlbie(address);
pte_unmap_unlock(ptep, ptl);
up_read(&mm->mmap_sem);
return 0;
} }
pte_update(ptep, 0, _PAGE_HWEXEC); pte_unmap_unlock(ptep, ptl);
_tlbie(address);
pte_unmap(ptep);
up_read(&mm->mmap_sem);
return 0;
} }
if (ptep != NULL)
pte_unmap(ptep);
#endif #endif
/* a read */ /* a read */
} else { } else {
......
...@@ -368,7 +368,7 @@ void __init io_block_mapping(unsigned long virt, phys_addr_t phys, ...@@ -368,7 +368,7 @@ void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
* the PTE pointer is unmodified if PTE is not found. * the PTE pointer is unmodified if PTE is not found.
*/ */
int int
get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep) get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
{ {
pgd_t *pgd; pgd_t *pgd;
pmd_t *pmd; pmd_t *pmd;
...@@ -383,6 +383,8 @@ get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep) ...@@ -383,6 +383,8 @@ get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
if (pte) { if (pte) {
retval = 1; retval = 1;
*ptep = pte; *ptep = pte;
if (pmdp)
*pmdp = pmd;
/* XXX caller needs to do pte_unmap, yuck */ /* XXX caller needs to do pte_unmap, yuck */
} }
} }
...@@ -420,7 +422,7 @@ unsigned long iopa(unsigned long addr) ...@@ -420,7 +422,7 @@ unsigned long iopa(unsigned long addr)
mm = &init_mm; mm = &init_mm;
pa = 0; pa = 0;
if (get_pteptr(mm, addr, &pte)) { if (get_pteptr(mm, addr, &pte, NULL)) {
pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK); pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
pte_unmap(pte); pte_unmap(pte);
} }
......
...@@ -837,7 +837,8 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma, ...@@ -837,7 +837,8 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
*/ */
#define pgtable_cache_init() do { } while (0) #define pgtable_cache_init() do { } while (0)
extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep); extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
pmd_t **pmdp);
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment