Commit ec8c0446 authored by Ralf Baechle's avatar Ralf Baechle Committed by Linus Torvalds

[PATCH] Optimize D-cache alias handling on fork

Virtually index, physically tagged cache architectures can get away
without cache flushing when forking.  This patch adds a new cache
flushing function flush_cache_dup_mm(struct mm_struct *) which for the
moment I've implemented to do the same thing on all architectures
except on MIPS where it's a no-op.
Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent bcd02280
......@@ -179,10 +179,21 @@ Here are the routines, one by one:
lines associated with 'mm'.
This interface is used to handle whole address space
page table operations such as what happens during
fork, exit, and exec.
page table operations such as what happens during exit and exec.
2) void flush_cache_dup_mm(struct mm_struct *mm)
This interface flushes an entire user address space from
the caches. That is, after running, there will be no cache
lines associated with 'mm'.
This interface is used to handle whole address space
page table operations such as what happens during fork.
This option is separate from flush_cache_mm to allow some
optimizations for VIPT caches.
2) void flush_cache_range(struct vm_area_struct *vma,
3) void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
Here we are flushing a specific range of (user) virtual
......@@ -199,7 +210,7 @@ Here are the routines, one by one:
call flush_cache_page (see below) for each entry which may be
modified.
3) void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
4) void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
This time we need to remove a PAGE_SIZE sized range
from the cache. The 'vma' is the backing structure used by
......@@ -220,7 +231,7 @@ Here are the routines, one by one:
This is used primarily during fault processing.
4) void flush_cache_kmaps(void)
5) void flush_cache_kmaps(void)
This routine need only be implemented if the platform utilizes
highmem. It will be called right before all of the kmaps
......@@ -232,7 +243,7 @@ Here are the routines, one by one:
This routing should be implemented in asm/highmem.h
5) void flush_cache_vmap(unsigned long start, unsigned long end)
6) void flush_cache_vmap(unsigned long start, unsigned long end)
void flush_cache_vunmap(unsigned long start, unsigned long end)
Here in these two interfaces we are flushing a specific range
......
......@@ -6,6 +6,7 @@
/* Caches aren't brain-dead on the Alpha. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
......
......@@ -319,6 +319,8 @@ extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
unsigned long len, int write);
#endif
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
/*
* flush_cache_user_range is used when we want to ensure that the
* Harvard caches are synchronised for the user space address range.
......
......@@ -22,6 +22,7 @@
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma,start,end) do { } while (0)
#define flush_cache_page(vma,vmaddr,pfn) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
......
......@@ -87,6 +87,7 @@ void invalidate_icache_region(void *start, size_t len);
*/
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
......
......@@ -9,6 +9,7 @@
*/
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
......
......@@ -20,6 +20,7 @@
*/
#define flush_cache_all() do {} while(0)
#define flush_cache_mm(mm) do {} while(0)
#define flush_cache_dup_mm(mm) do {} while(0)
#define flush_cache_range(mm, start, end) do {} while(0)
#define flush_cache_page(vma, vmaddr, pfn) do {} while(0)
#define flush_cache_vmap(start, end) do {} while(0)
......
......@@ -12,6 +12,7 @@
#define flush_cache_all()
#define flush_cache_mm(mm)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma,a,b)
#define flush_cache_page(vma,p,pfn)
#define flush_dcache_page(page)
......
......@@ -7,6 +7,7 @@
/* Caches aren't brain-dead on the intel. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
......
......@@ -18,6 +18,7 @@
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_icache_page(vma,page) do { } while (0)
......
......@@ -9,6 +9,7 @@ extern void _flush_cache_copyback_all(void);
#if defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_OPSP) || defined(CONFIG_CHIP_M32104)
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
......@@ -29,6 +30,7 @@ extern void smp_flush_cache_all(void);
#elif defined(CONFIG_CHIP_M32102)
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
......@@ -41,6 +43,7 @@ extern void smp_flush_cache_all(void);
#else
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
......
......@@ -89,6 +89,8 @@ static inline void flush_cache_mm(struct mm_struct *mm)
__flush_cache_030();
}
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
/* flush_cache_range/flush_cache_page must be macros to avoid
a dependency on linux/mm.h, which includes this file... */
static inline void flush_cache_range(struct vm_area_struct *vma,
......
......@@ -8,6 +8,7 @@
#define flush_cache_all() __flush_cache_all()
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) __flush_cache_all()
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_dcache_range(start,len) __flush_cache_all()
......
......@@ -17,6 +17,7 @@
*
* - flush_cache_all() flushes entire cache
* - flush_cache_mm(mm) flushes the specified mm context's cache lines
* - flush_cache_dup mm(mm) handles cache flushing when forking
* - flush_cache_page(mm, vmaddr, pfn) flushes a single page
* - flush_cache_range(vma, start, end) flushes a range of pages
* - flush_icache_range(start, end) flush a range of instructions
......@@ -31,6 +32,7 @@
extern void (*flush_cache_all)(void);
extern void (*__flush_cache_all)(void);
extern void (*flush_cache_mm)(struct mm_struct *mm);
#define flush_cache_dup_mm(mm) do { (void) (mm); } while (0)
extern void (*flush_cache_range)(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
......
......@@ -15,6 +15,8 @@
#define flush_cache_mm(mm) flush_cache_all_local()
#endif
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
#define flush_kernel_dcache_range(start,size) \
flush_kernel_dcache_range_asm((start), (start)+(size));
......
......@@ -18,6 +18,7 @@
*/
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_icache_page(vma, page) do { } while (0)
......
......@@ -7,6 +7,7 @@
/* Caches aren't brain-dead on the s390. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
......
......@@ -15,6 +15,7 @@
*
* - flush_cache_all() flushes entire cache
* - flush_cache_mm(mm) flushes the specified mm context's cache lines
* - flush_cache_dup mm(mm) handles cache flushing when forking
* - flush_cache_page(mm, vmaddr, pfn) flushes a single page
* - flush_cache_range(vma, start, end) flushes a range of pages
*
......@@ -27,6 +28,7 @@
*/
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
......
......@@ -15,6 +15,7 @@
*
* - flush_cache_all() flushes entire cache
* - flush_cache_mm(mm) flushes the specified mm context's cache lines
* - flush_cache_dup mm(mm) handles cache flushing when forking
* - flush_cache_page(mm, vmaddr, pfn) flushes a single page
* - flush_cache_range(vma, start, end) flushes a range of pages
*
......@@ -39,6 +40,7 @@
void flush_cache_all(void);
void flush_cache_mm(struct mm_struct *mm);
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
......@@ -48,6 +50,7 @@ void flush_icache_page(struct vm_area_struct *vma, struct page *page);
#else
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
......
......@@ -18,6 +18,7 @@
*/
void flush_cache_all(void);
void flush_cache_mm(struct mm_struct *mm);
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
......
......@@ -21,6 +21,8 @@ extern void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page, unsigned long addr,
int len);
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
......
......@@ -48,6 +48,7 @@ BTFIXUPDEF_CALL(void, flush_cache_page, struct vm_area_struct *, unsigned long)
#define flush_cache_all() BTFIXUP_CALL(flush_cache_all)()
#define flush_cache_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
#define flush_cache_dup_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
#define flush_cache_range(vma,start,end) BTFIXUP_CALL(flush_cache_range)(vma,start,end)
#define flush_cache_page(vma,addr,pfn) BTFIXUP_CALL(flush_cache_page)(vma,addr)
#define flush_icache_range(start, end) do { } while (0)
......
......@@ -12,6 +12,7 @@
/* These are the same regardless of whether this is an SMP kernel or not. */
#define flush_cache_mm(__mm) \
do { if ((__mm) == current->mm) flushw_user(); } while(0)
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
#define flush_cache_range(vma, start, end) \
flush_cache_mm((vma)->vm_mm)
#define flush_cache_page(vma, page, pfn) \
......
......@@ -24,6 +24,7 @@
systems with MMUs, so we don't need them. */
#define flush_cache_all() ((void)0)
#define flush_cache_mm(mm) ((void)0)
#define flush_cache_dup_mm(mm) ((void)0)
#define flush_cache_range(vma, start, end) ((void)0)
#define flush_cache_page(vma, vmaddr, pfn) ((void)0)
#define flush_dcache_page(page) ((void)0)
......
......@@ -7,6 +7,7 @@
/* Caches aren't brain-dead on the intel. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
......
......@@ -75,6 +75,7 @@ extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
#define flush_cache_all() __flush_invalidate_cache_all();
#define flush_cache_mm(mm) __flush_invalidate_cache_all();
#define flush_cache_dup_mm(mm) __flush_invalidate_cache_all();
#define flush_cache_vmap(start,end) __flush_invalidate_cache_all();
#define flush_cache_vunmap(start,end) __flush_invalidate_cache_all();
......@@ -88,6 +89,7 @@ extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned lon
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_vmap(start,end) do { } while (0)
#define flush_cache_vunmap(start,end) do { } while (0)
......
......@@ -203,7 +203,7 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
struct mempolicy *pol;
down_write(&oldmm->mmap_sem);
flush_cache_mm(oldmm);
flush_cache_dup_mm(oldmm);
/*
* Not linked in yet - no deadlock potential:
*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment