Commit 86afe9ae authored by Russell King's avatar Russell King

[ARM] Fix/Optimise flush_dcache_page() for VIPT aliasing caches.

Fix flush_dcache_page() for ARMv6 VIPT aliasing caches, and ignore
it for non-aliasing caches.
parent 2e510b77
...@@ -142,9 +142,20 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) ...@@ -142,9 +142,20 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
if (page_mapping(page)) { if (page_mapping(page)) {
int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
if (dirty) if (dirty) {
/*
* This is our first userspace mapping of this page.
* Ensure that the physical page is coherent with
* the kernel mapping.
*
* FIXME: only need to do this on VIVT and aliasing
* VIPT cache architectures. We can do that
* by choosing whether to set this bit...
*/
__cpuc_flush_dcache_page(page_address(page)); __cpuc_flush_dcache_page(page_address(page));
}
if (cache_is_vivt())
make_coherent(vma, addr, page, dirty); make_coherent(vma, addr, page, dirty);
} }
} }
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/system.h>
static void __flush_dcache_page(struct address_space *mapping, struct page *page) static void __flush_dcache_page(struct address_space *mapping, struct page *page)
{ {
...@@ -20,14 +21,26 @@ static void __flush_dcache_page(struct address_space *mapping, struct page *page ...@@ -20,14 +21,26 @@ static void __flush_dcache_page(struct address_space *mapping, struct page *page
struct prio_tree_iter iter; struct prio_tree_iter iter;
pgoff_t pgoff; pgoff_t pgoff;
/*
* Writeback any data associated with the kernel mapping of this
* page. This ensures that data in the physical page is mutually
* coherent with the kernels mapping.
*/
__cpuc_flush_dcache_page(page_address(page)); __cpuc_flush_dcache_page(page_address(page));
/*
* If there's no mapping pointer here, then this page isn't
* visible to userspace yet, so there are no cache lines
* associated with any other aliases.
*/
if (!mapping) if (!mapping)
return; return;
/* /*
* With a VIVT cache, we need to also write back * There are possible user space mappings of this page:
* and invalidate any user data. * - VIVT cache: we need to also write back and invalidate all user
* data in the current VM view associated with this page.
* - aliasing VIPT: we only need to find one mapping of this page.
*/ */
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
...@@ -44,14 +57,35 @@ static void __flush_dcache_page(struct address_space *mapping, struct page *page ...@@ -44,14 +57,35 @@ static void __flush_dcache_page(struct address_space *mapping, struct page *page
continue; continue;
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
flush_cache_page(mpnt, mpnt->vm_start + offset); flush_cache_page(mpnt, mpnt->vm_start + offset);
if (cache_is_vipt())
break;
} }
flush_dcache_mmap_unlock(mapping); flush_dcache_mmap_unlock(mapping);
} }
/*
* Ensure cache coherency between kernel mapping and userspace mapping
* of this page.
*
* We have three cases to consider:
* - VIPT non-aliasing cache: fully coherent so nothing required.
* - VIVT: fully aliasing, so we need to handle every alias in our
* current VM view.
* - VIPT aliasing: need to handle one alias in our current VM view.
*
* If we need to handle aliasing:
* If the page only exists in the page cache and there are no user
* space mappings, we can be lazy and remember that we may have dirty
* kernel cache lines for later. Otherwise, we assume we have
* aliasing mappings.
*/
void flush_dcache_page(struct page *page) void flush_dcache_page(struct page *page)
{ {
struct address_space *mapping = page_mapping(page); struct address_space *mapping = page_mapping(page);
if (cache_is_vipt_nonaliasing())
return;
if (mapping && !mapping_mapped(mapping)) if (mapping && !mapping_mapped(mapping))
set_bit(PG_dcache_dirty, &page->flags); set_bit(PG_dcache_dirty, &page->flags);
else else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment