Commit bd51d9ad authored by Russell King's avatar Russell King

[ARM] Fix flush_dcache_page()

Make flush_dcache_page() handle user space mappings correctly; with a
VIVT cache, we need to make sure that any user space cache lines are
coherent with the kernels view of the same page of memory.
parent 2ca93395
...@@ -131,21 +131,22 @@ do_PrefetchAbort(unsigned long addr, struct pt_regs *regs) ...@@ -131,21 +131,22 @@ do_PrefetchAbort(unsigned long addr, struct pt_regs *regs)
* We take the easy way out of this problem - we make the * We take the easy way out of this problem - we make the
* PTE uncacheable. However, we leave the write buffer on. * PTE uncacheable. However, we leave the write buffer on.
*/ */
static void adjust_pte(struct vm_area_struct *vma, unsigned long address) static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
{ {
pgd_t *pgd; pgd_t *pgd;
pmd_t *pmd; pmd_t *pmd;
pte_t *pte, entry; pte_t *pte, entry;
int ret = 0;
pgd = pgd_offset(vma->vm_mm, address); pgd = pgd_offset(vma->vm_mm, address);
if (pgd_none(*pgd)) if (pgd_none(*pgd))
return; goto no_pgd;
if (pgd_bad(*pgd)) if (pgd_bad(*pgd))
goto bad_pgd; goto bad_pgd;
pmd = pmd_offset(pgd, address); pmd = pmd_offset(pgd, address);
if (pmd_none(*pmd)) if (pmd_none(*pmd))
return; goto no_pmd;
if (pmd_bad(*pmd)) if (pmd_bad(*pmd))
goto bad_pmd; goto bad_pmd;
...@@ -161,23 +162,64 @@ static void adjust_pte(struct vm_area_struct *vma, unsigned long address) ...@@ -161,23 +162,64 @@ static void adjust_pte(struct vm_area_struct *vma, unsigned long address)
pte_val(entry) &= ~L_PTE_CACHEABLE; pte_val(entry) &= ~L_PTE_CACHEABLE;
set_pte(pte, entry); set_pte(pte, entry);
flush_tlb_page(vma, address); flush_tlb_page(vma, address);
ret = 1;
} }
pte_unmap(pte); pte_unmap(pte);
return; return ret;
bad_pgd: bad_pgd:
pgd_ERROR(*pgd); pgd_ERROR(*pgd);
pgd_clear(pgd); pgd_clear(pgd);
return; no_pgd:
return 0;
bad_pmd: bad_pmd:
pmd_ERROR(*pmd); pmd_ERROR(*pmd);
pmd_clear(pmd); pmd_clear(pmd);
return; no_pmd:
return 0;
}
void __flush_dcache_page(struct page *page)
{
struct mm_struct *mm = current->active_mm;
struct list_head *l;
unsigned long kaddr = (unsigned long)page_address(page);
cpu_cache_clean_invalidate_range(kaddr, kaddr + PAGE_SIZE, 0);
if (!page->mapping)
return;
/*
* With a VIVT cache, we need to also write back
* and invalidate any user data.
*/
list_for_each(l, &page->mapping->i_mmap_shared) {
struct vm_area_struct *mpnt;
unsigned long off;
mpnt = list_entry(l, struct vm_area_struct, shared);
/*
* If this VMA is not in our MM, we can ignore it.
*/
if (mpnt->vm_mm != mm)
continue;
if (page->index < mpnt->vm_pgoff)
continue;
off = page->index - mpnt->vm_pgoff;
if (off >= (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT)
continue;
flush_cache_page(mpnt, off);
}
} }
static void static void
make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page) make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, int dirty)
{ {
struct list_head *l; struct list_head *l;
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
...@@ -213,14 +255,17 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page) ...@@ -213,14 +255,17 @@ make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page)
if (off >= (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT) if (off >= (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT)
continue; continue;
off = mpnt->vm_start + (off << PAGE_SHIFT);
/* /*
* Ok, it is within mpnt. Fix it up. * Ok, it is within mpnt. Fix it up.
*/ */
adjust_pte(mpnt, mpnt->vm_start + (off << PAGE_SHIFT)); aliases += adjust_pte(mpnt, off);
aliases ++;
} }
if (aliases) if (aliases)
adjust_pte(vma, addr); adjust_pte(vma, addr);
else
flush_cache_page(vma, addr);
} }
/* /*
...@@ -245,9 +290,12 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) ...@@ -245,9 +290,12 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
return; return;
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
if (page->mapping) { if (page->mapping) {
if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
__flush_dcache_page(page); unsigned long kaddr = (unsigned long)page_address(page);
if (dirty)
cpu_cache_clean_invalidate_range(kaddr, kaddr + PAGE_SIZE, 0);
make_coherent(vma, addr, page); make_coherent(vma, addr, page, dirty);
} }
} }
/* /*
* linux/arch/arm/mm/proc-syms.c * linux/arch/arm/mm/proc-syms.c
* *
* Copyright (C) 2000 Russell King * Copyright (C) 2000-2002 Russell King
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
#include <asm/proc-fns.h> #include <asm/proc-fns.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
EXPORT_SYMBOL(__flush_dcache_page);
#ifndef MULTI_CPU #ifndef MULTI_CPU
EXPORT_SYMBOL(cpu_cache_clean_invalidate_all); EXPORT_SYMBOL(cpu_cache_clean_invalidate_all);
EXPORT_SYMBOL(cpu_cache_clean_invalidate_range); EXPORT_SYMBOL(cpu_cache_clean_invalidate_range);
......
...@@ -93,11 +93,7 @@ ...@@ -93,11 +93,7 @@
#define mapping_mapped(map) (!list_empty(&(map)->i_mmap) || \ #define mapping_mapped(map) (!list_empty(&(map)->i_mmap) || \
!list_empty(&(map)->i_mmap_shared)) !list_empty(&(map)->i_mmap_shared))
static inline void __flush_dcache_page(struct page *page) extern void __flush_dcache_page(struct page *);
{
unsigned long virt = (unsigned long)page_address(page);
cpu_cache_clean_invalidate_range(virt, virt + PAGE_SIZE, 0);
}
static inline void flush_dcache_page(struct page *page) static inline void flush_dcache_page(struct page *page)
{ {
...@@ -116,8 +112,6 @@ static inline void flush_dcache_page(struct page *page) ...@@ -116,8 +112,6 @@ static inline void flush_dcache_page(struct page *page)
*/ */
#define flush_icache_page(vma,page) do { } while (0) #define flush_icache_page(vma,page) do { } while (0)
#define clean_dcache_entry(_s) cpu_dcache_clean_entry((unsigned long)(_s))
/* /*
* I cache coherency stuff. * I cache coherency stuff.
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment