Commit d936a7e7 authored by Guo Ren's avatar Guo Ren

csky: Enable defer flush_dcache_page for abiv2 cpus (807/810/860)

Instead of flushing cache per update_mmu_cache() called, we use
flush_dcache_page to reduce the frequency of flashing the cache.

As abiv2 cpus are all PIPT for icache & dcache, we needn't handle
dcache aliasing problem. But their icache can't snoop dcache, so
we still need sync_icache_dcache in update_mmu_cache().
Signed-off-by: default avatarGuo Ren <guoren@linux.alibaba.com>
parent a1176734
......@@ -9,20 +9,22 @@
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
pte_t *pte)
{
unsigned long addr, pfn;
unsigned long addr;
struct page *page;
pfn = pte_pfn(*pte);
if (unlikely(!pfn_valid(pfn)))
page = pfn_to_page(pte_pfn(*pte));
if (page == ZERO_PAGE(0))
return;
page = pfn_to_page(pfn);
if (page == ZERO_PAGE(0))
if (test_and_set_bit(PG_dcache_clean, &page->flags))
return;
addr = (unsigned long) kmap_atomic(page);
cache_wbinv_range(addr, addr + PAGE_SIZE);
dcache_wb_range(addr, addr + PAGE_SIZE);
if (vma->vm_flags & VM_EXEC)
icache_inv_range(addr, addr + PAGE_SIZE);
kunmap_atomic((void *) addr);
}
......@@ -15,8 +15,16 @@
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#define flush_dcache_page(page) do { } while (0)
#define PG_dcache_clean PG_arch_1
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
static inline void flush_dcache_page(struct page *page)
{
if (test_bit(PG_dcache_clean, &page->flags))
clear_bit(PG_dcache_clean, &page->flags);
}
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_page(vma, page) do { } while (0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment