Commit 203b7b6a authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm: rationalise flush_icache_pages() and flush_icache_page()

Move the default (no-op) implementation of flush_icache_pages() to
<linux/cacheflush.h> from <asm-generic/cacheflush.h>.  Remove the
flush_icache_page() wrapper from each architecture into
<linux/cacheflush.h>.

Link: https://lkml.kernel.org/r/20230802151406.3735276-32-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 29269ad9
...@@ -53,10 +53,6 @@ extern void flush_icache_user_page(struct vm_area_struct *vma, ...@@ -53,10 +53,6 @@ extern void flush_icache_user_page(struct vm_area_struct *vma,
#define flush_icache_user_page flush_icache_user_page #define flush_icache_user_page flush_icache_user_page
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
/* This is used only in __do_fault and do_swap_page. */
#define flush_icache_page(vma, page) \
flush_icache_user_page((vma), (page), 0, 0)
/* /*
* Both implementations of flush_icache_user_page flush the entire * Both implementations of flush_icache_user_page flush the entire
* address space, so one call, no matter how many pages. * address space, so one call, no matter how many pages.
...@@ -66,6 +62,7 @@ static inline void flush_icache_pages(struct vm_area_struct *vma, ...@@ -66,6 +62,7 @@ static inline void flush_icache_pages(struct vm_area_struct *vma,
{ {
flush_icache_user_page(vma, page, 0, 0); flush_icache_user_page(vma, page, 0, 0);
} }
#define flush_icache_pages flush_icache_pages
#include <asm-generic/cacheflush.h> #include <asm-generic/cacheflush.h>
......
...@@ -18,15 +18,6 @@ ...@@ -18,15 +18,6 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/shmparam.h> #include <asm/shmparam.h>
/*
* Semantically we need this because icache doesn't snoop dcache/dma.
* However ARC Cache flush requires paddr as well as vaddr, latter not available
* in the flush_icache_page() API. So we no-op it but do the equivalent work
* in update_mmu_cache()
*/
#define flush_icache_page(vma, page)
#define flush_icache_pages(vma, page, nr)
void flush_cache_all(void); void flush_cache_all(void);
void flush_icache_range(unsigned long kstart, unsigned long kend); void flush_icache_range(unsigned long kstart, unsigned long kend);
......
...@@ -321,13 +321,6 @@ static inline void flush_anon_page(struct vm_area_struct *vma, ...@@ -321,13 +321,6 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages) #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
/*
* We don't appear to need to do anything here. In fact, if we did, we'd
* duplicate cache flushing elsewhere performed by flush_dcache_page().
*/
#define flush_icache_page(vma,page) do { } while (0)
#define flush_icache_pages(vma, page, nr) do { } while (0)
/* /*
* flush_cache_vmap() is used when creating mappings (eg, via vmap, * flush_cache_vmap() is used when creating mappings (eg, via vmap,
* vmalloc, ioremap etc) in kernel space for pages. On non-VIPT * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
......
...@@ -45,7 +45,6 @@ extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, u ...@@ -45,7 +45,6 @@ extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, u
#define flush_cache_vmap(start, end) cache_wbinv_all() #define flush_cache_vmap(start, end) cache_wbinv_all()
#define flush_cache_vunmap(start, end) cache_wbinv_all() #define flush_cache_vunmap(start, end) cache_wbinv_all()
#define flush_icache_page(vma, page) do {} while (0);
#define flush_icache_range(start, end) cache_wbinv_range(start, end) #define flush_icache_range(start, end) cache_wbinv_range(start, end)
#define flush_icache_mm_range(mm, start, end) cache_wbinv_range(start, end) #define flush_icache_mm_range(mm, start, end) cache_wbinv_range(start, end)
#define flush_icache_deferred(mm) do {} while (0); #define flush_icache_deferred(mm) do {} while (0);
......
...@@ -33,7 +33,6 @@ static inline void flush_dcache_page(struct page *page) ...@@ -33,7 +33,6 @@ static inline void flush_dcache_page(struct page *page)
#define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_page(vma, page) do { } while (0)
#define flush_icache_range(start, end) cache_wbinv_range(start, end) #define flush_icache_range(start, end) cache_wbinv_range(start, end)
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
* - flush_cache_range(vma, start, end) flushes a range of pages * - flush_cache_range(vma, start, end) flushes a range of pages
* - flush_icache_range(start, end) flush a range of instructions * - flush_icache_range(start, end) flush a range of instructions
* - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
* - flush_icache_page(vma, pg) flushes(invalidates) a page for icache * - flush_icache_pages(vma, pg, nr) flushes(invalidates) nr pages for icache
* *
* Need to doublecheck which one is really needed for ptrace stuff to work. * Need to doublecheck which one is really needed for ptrace stuff to work.
*/ */
......
...@@ -46,8 +46,6 @@ void local_flush_icache_range(unsigned long start, unsigned long end); ...@@ -46,8 +46,6 @@ void local_flush_icache_range(unsigned long start, unsigned long end);
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0)
#define flush_icache_page(vma, page) do { } while (0)
#define flush_icache_pages(vma, page) do { } while (0)
#define flush_icache_user_page(vma, page, addr, len) do { } while (0) #define flush_icache_user_page(vma, page, addr, len) do { } while (0)
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0)
......
...@@ -261,7 +261,6 @@ static inline void __flush_pages_to_ram(void *vaddr, unsigned int nr) ...@@ -261,7 +261,6 @@ static inline void __flush_pages_to_ram(void *vaddr, unsigned int nr)
#define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_pages(vma, page, nr) \ #define flush_icache_pages(vma, page, nr) \
__flush_pages_to_ram(page_address(page), nr) __flush_pages_to_ram(page_address(page), nr)
#define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1)
extern void flush_icache_user_page(struct vm_area_struct *vma, struct page *page, extern void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len); unsigned long addr, int len);
......
...@@ -82,12 +82,6 @@ static inline void flush_anon_page(struct vm_area_struct *vma, ...@@ -82,12 +82,6 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
__flush_anon_page(page, vmaddr); __flush_anon_page(page, vmaddr);
} }
static inline void flush_icache_pages(struct vm_area_struct *vma,
struct page *page, unsigned int nr)
{
}
#define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1)
extern void (*flush_icache_range)(unsigned long start, unsigned long end); extern void (*flush_icache_range)(unsigned long start, unsigned long end);
extern void (*local_flush_icache_range)(unsigned long start, unsigned long end); extern void (*local_flush_icache_range)(unsigned long start, unsigned long end);
extern void (*__flush_icache_user_range)(unsigned long start, extern void (*__flush_icache_user_range)(unsigned long start,
......
...@@ -35,7 +35,7 @@ void flush_dcache_folio(struct folio *folio); ...@@ -35,7 +35,7 @@ void flush_dcache_folio(struct folio *folio);
extern void flush_icache_range(unsigned long start, unsigned long end); extern void flush_icache_range(unsigned long start, unsigned long end);
void flush_icache_pages(struct vm_area_struct *vma, struct page *page, void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
unsigned int nr); unsigned int nr);
#define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1); #define flush_icache_pages flush_icache_pages
#define flush_cache_vmap(start, end) flush_dcache_range(start, end) #define flush_cache_vmap(start, end) flush_dcache_range(start, end)
#define flush_cache_vunmap(start, end) flush_dcache_range(start, end) #define flush_cache_vunmap(start, end) flush_dcache_range(start, end)
......
...@@ -60,7 +60,7 @@ static inline void flush_dcache_page(struct page *page) ...@@ -60,7 +60,7 @@ static inline void flush_dcache_page(struct page *page)
void flush_icache_pages(struct vm_area_struct *vma, struct page *page, void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
unsigned int nr); unsigned int nr);
#define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1) #define flush_icache_pages flush_icache_pages
#define flush_icache_range(s,e) do { \ #define flush_icache_range(s,e) do { \
flush_kernel_dcache_range_asm(s,e); \ flush_kernel_dcache_range_asm(s,e); \
......
...@@ -53,7 +53,7 @@ extern void flush_icache_range(unsigned long start, unsigned long end); ...@@ -53,7 +53,7 @@ extern void flush_icache_range(unsigned long start, unsigned long end);
#define flush_icache_user_range flush_icache_range #define flush_icache_user_range flush_icache_range
void flush_icache_pages(struct vm_area_struct *vma, struct page *page, void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
unsigned int nr); unsigned int nr);
#define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1) #define flush_icache_pages flush_icache_pages
extern void flush_cache_sigtramp(unsigned long address); extern void flush_cache_sigtramp(unsigned long address);
struct flusher_data { struct flusher_data {
......
...@@ -16,8 +16,6 @@ ...@@ -16,8 +16,6 @@
#define flush_cache_page(vma,addr,pfn) \ #define flush_cache_page(vma,addr,pfn) \
sparc32_cachetlb_ops->cache_page(vma, addr) sparc32_cachetlb_ops->cache_page(vma, addr)
#define flush_icache_range(start, end) do { } while (0) #define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma, pg) do { } while (0)
#define flush_icache_pages(vma, pg, nr) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \ do { \
......
...@@ -53,9 +53,6 @@ static inline void flush_dcache_page(struct page *page) ...@@ -53,9 +53,6 @@ static inline void flush_dcache_page(struct page *page)
flush_dcache_folio(page_folio(page)); flush_dcache_folio(page_folio(page));
} }
#define flush_icache_page(vma, pg) do { } while(0)
#define flush_icache_pages(vma, pg, nr) do { } while(0)
void flush_ptrace_access(struct vm_area_struct *, struct page *, void flush_ptrace_access(struct vm_area_struct *, struct page *,
unsigned long uaddr, void *kaddr, unsigned long uaddr, void *kaddr,
unsigned long len, int write); unsigned long len, int write);
......
...@@ -160,10 +160,6 @@ void local_flush_cache_page(struct vm_area_struct *vma, ...@@ -160,10 +160,6 @@ void local_flush_cache_page(struct vm_area_struct *vma,
__invalidate_icache_range(start,(end) - (start)); \ __invalidate_icache_range(start,(end) - (start)); \
} while (0) } while (0)
/* This is not required, see Documentation/core-api/cachetlb.rst */
#define flush_icache_page(vma,page) do { } while (0)
#define flush_icache_pages(vma, page, nr) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0)
......
...@@ -77,18 +77,6 @@ static inline void flush_icache_range(unsigned long start, unsigned long end) ...@@ -77,18 +77,6 @@ static inline void flush_icache_range(unsigned long start, unsigned long end)
#define flush_icache_user_range flush_icache_range #define flush_icache_user_range flush_icache_range
#endif #endif
#ifndef flush_icache_page
static inline void flush_icache_pages(struct vm_area_struct *vma,
struct page *page, unsigned int nr)
{
}
static inline void flush_icache_page(struct vm_area_struct *vma,
struct page *page)
{
}
#endif
#ifndef flush_icache_user_page #ifndef flush_icache_user_page
static inline void flush_icache_user_page(struct vm_area_struct *vma, static inline void flush_icache_user_page(struct vm_area_struct *vma,
struct page *page, struct page *page,
......
...@@ -17,4 +17,13 @@ static inline void flush_dcache_folio(struct folio *folio) ...@@ -17,4 +17,13 @@ static inline void flush_dcache_folio(struct folio *folio)
#define flush_dcache_folio flush_dcache_folio #define flush_dcache_folio flush_dcache_folio
#endif /* ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE */ #endif /* ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE */
#ifndef flush_icache_pages
static inline void flush_icache_pages(struct vm_area_struct *vma,
struct page *page, unsigned int nr)
{
}
#endif
#define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1)
#endif /* _LINUX_CACHEFLUSH_H */ #endif /* _LINUX_CACHEFLUSH_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment