Commit c0177800 authored by Catalin Marinas's avatar Catalin Marinas Committed by Russell King

ARM: 6379/1: Assume new page cache pages have dirty D-cache

There are places in Linux where writes to newly allocated page cache
pages happen without a subsequent call to flush_dcache_page() (several
PIO drivers including USB HCD). This patch changes the meaning of
PG_arch_1 to be PG_dcache_clean and always flush the D-cache for a newly
mapped page in update_mmu_cache().

The patch also sets the PG_arch_1 bit in the DMA cache maintenance
function to avoid additional cache flushing in update_mmu_cache().
Tested-by: default avatarRabin Vincent <rabin.vincent@stericsson.com>
Cc: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 0fc73099
...@@ -137,10 +137,10 @@ ...@@ -137,10 +137,10 @@
#endif #endif
/* /*
* This flag is used to indicate that the page pointed to by a pte * This flag is used to indicate that the page pointed to by a pte is clean
* is dirty and requires cleaning before returning it to the user. * and does not require cleaning before returning it to the user.
*/ */
#define PG_dcache_dirty PG_arch_1 #define PG_dcache_clean PG_arch_1
/* /*
* MM Cache Management * MM Cache Management
......
...@@ -560,7 +560,7 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); ...@@ -560,7 +560,7 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
#endif #endif
/* /*
* if PG_dcache_dirty is set for the page, we need to ensure that any * If PG_dcache_clean is not set for the page, we need to ensure that any
* cache entries for the kernels virtual memory range are written * cache entries for the kernels virtual memory range are written
* back to the page. * back to the page.
*/ */
......
...@@ -73,7 +73,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from, ...@@ -73,7 +73,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
{ {
void *kto = kmap_atomic(to, KM_USER1); void *kto = kmap_atomic(to, KM_USER1);
if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from); __flush_dcache_page(page_mapping(from), from);
spin_lock(&minicache_lock); spin_lock(&minicache_lock);
......
...@@ -79,7 +79,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to, ...@@ -79,7 +79,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
unsigned int offset = CACHE_COLOUR(vaddr); unsigned int offset = CACHE_COLOUR(vaddr);
unsigned long kfrom, kto; unsigned long kfrom, kto;
if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from); __flush_dcache_page(page_mapping(from), from);
/* FIXME: not highmem safe */ /* FIXME: not highmem safe */
......
...@@ -95,7 +95,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from, ...@@ -95,7 +95,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
{ {
void *kto = kmap_atomic(to, KM_USER1); void *kto = kmap_atomic(to, KM_USER1);
if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from); __flush_dcache_page(page_mapping(from), from);
spin_lock(&minicache_lock); spin_lock(&minicache_lock);
......
...@@ -523,6 +523,12 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, ...@@ -523,6 +523,12 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
outer_inv_range(paddr, paddr + size); outer_inv_range(paddr, paddr + size);
dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
/*
* Mark the D-cache clean for this page to avoid extra flushing.
*/
if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
set_bit(PG_dcache_clean, &page->flags);
} }
EXPORT_SYMBOL(___dma_page_dev_to_cpu); EXPORT_SYMBOL(___dma_page_dev_to_cpu);
......
...@@ -141,7 +141,7 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, ...@@ -141,7 +141,7 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
* a page table, or changing an existing PTE. Basically, there are two * a page table, or changing an existing PTE. Basically, there are two
* things that we need to take care of: * things that we need to take care of:
* *
* 1. If PG_dcache_dirty is set for the page, we need to ensure * 1. If PG_dcache_clean is not set for the page, we need to ensure
* that any cache entries for the kernels virtual memory * that any cache entries for the kernels virtual memory
* range are written back to the page. * range are written back to the page.
* 2. If we have multiple shared mappings of the same space in * 2. If we have multiple shared mappings of the same space in
...@@ -169,7 +169,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, ...@@ -169,7 +169,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
mapping = page_mapping(page); mapping = page_mapping(page);
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) if (!test_and_set_bit(PG_dcache_clean, &page->flags))
__flush_dcache_page(mapping, page); __flush_dcache_page(mapping, page);
#endif #endif
if (mapping) { if (mapping) {
......
...@@ -248,7 +248,7 @@ void flush_dcache_page(struct page *page) ...@@ -248,7 +248,7 @@ void flush_dcache_page(struct page *page)
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
if (mapping && !mapping_mapped(mapping)) if (mapping && !mapping_mapped(mapping))
set_bit(PG_dcache_dirty, &page->flags); clear_bit(PG_dcache_clean, &page->flags);
else else
#endif #endif
{ {
...@@ -257,6 +257,7 @@ void flush_dcache_page(struct page *page) ...@@ -257,6 +257,7 @@ void flush_dcache_page(struct page *page)
__flush_dcache_aliases(mapping, page); __flush_dcache_aliases(mapping, page);
else if (mapping) else if (mapping)
__flush_icache_all(); __flush_icache_all();
set_bit(PG_dcache_clean, &page->flags);
} }
} }
EXPORT_SYMBOL(flush_dcache_page); EXPORT_SYMBOL(flush_dcache_page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment