Commit 5019f760 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Linus Torvalds

powerpc: use asm-generic/cacheflush.h

Power needs almost no cache flushing routines of its own.  Rely on
asm-generic/cacheflush.h for the defaults.

Also remove the pointless __KERNEL__ ifdef while we're at it.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Link: http://lkml.kernel.org/r/20200515143646.3857579-17-hch@lst.deSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e0509451
......@@ -4,23 +4,9 @@
#ifndef _ASM_POWERPC_CACHEFLUSH_H
#define _ASM_POWERPC_CACHEFLUSH_H
#ifdef __KERNEL__
#include <linux/mm.h>
#include <asm/cputable.h>
/*
* No cache flushing is required when address mappings are changed,
* because the caches on PowerPCs are physically addressed.
*/
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_icache_page(vma, page) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
#ifdef CONFIG_PPC_BOOK3S_64
/*
* Book3s has no ptesync after setting a pte, so without this ptesync it's
......@@ -33,20 +19,20 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
{
asm volatile("ptesync" ::: "memory");
}
#else
static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
#endif
#define flush_cache_vmap flush_cache_vmap
#endif /* CONFIG_PPC_BOOK3S_64 */
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *page);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
void flush_icache_range(unsigned long start, unsigned long stop);
extern void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page, unsigned long addr,
int len);
extern void flush_dcache_icache_page(struct page *page);
#define flush_icache_range flush_icache_range
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len);
#define flush_icache_user_range flush_icache_user_range
void flush_dcache_icache_page(struct page *page);
void __flush_dcache_icache(void *page);
/**
......@@ -111,14 +97,6 @@ static inline void invalidate_dcache_range(unsigned long start,
mb(); /* sync */
}
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
flush_icache_user_range(vma, page, vaddr, len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
#endif /* __KERNEL__ */
#include <asm-generic/cacheflush.h>
#endif /* _ASM_POWERPC_CACHEFLUSH_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment