Commit a1176734 authored by Guo Ren's avatar Guo Ren

csky: Remove unnecessary flush_icache_* implementation

The abiv2 CPUs are all PIPT cache, so there is no need to implement
flush_icache_page function.

The function flush_icache_user_range hasn't been used, so just
remove it.

The function flush_cache_range is not necessary for PIPT cache when
tlb mapping changed.
Signed-off-by: default avatarGuo Ren <guoren@linux.alibaba.com>
parent 761b4f69
......@@ -49,9 +49,6 @@ extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, u
#define flush_icache_page(vma, page) do {} while (0);
#define flush_icache_range(start, end) cache_wbinv_range(start, end)
#define flush_icache_user_range(vma,page,addr,len) \
flush_dcache_page(page)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
......
......@@ -6,29 +6,6 @@
#include <linux/mm.h>
#include <asm/cache.h>
void flush_icache_page(struct vm_area_struct *vma, struct page *page)
{
unsigned long start;
start = (unsigned long) kmap_atomic(page);
cache_wbinv_range(start, start + PAGE_SIZE);
kunmap_atomic((void *)start);
}
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, int len)
{
unsigned long kaddr;
kaddr = (unsigned long) kmap_atomic(page) + (vaddr & ~PAGE_MASK);
cache_wbinv_range(kaddr, kaddr + len);
kunmap_atomic((void *)kaddr);
}
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
pte_t *pte)
{
......
......@@ -13,25 +13,16 @@
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) \
do { \
if (vma->vm_flags & VM_EXEC) \
icache_inv_all(); \
} while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_page(vma, page) do { } while (0)
#define flush_icache_range(start, end) cache_wbinv_range(start, end)
void flush_icache_page(struct vm_area_struct *vma, struct page *page);
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, int len);
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment