Commit 4a72e942 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] rmap 23 empty flush_dcache_mmap_lock

From: Hugh Dickins <hugh@veritas.com>

Most architectures (like i386) do nothing in flush_dcache_page, or don't scan
i_mmap in flush_dcache_page, so don't need flush_dcache_mmap_lock to do
anything: define it and flush_dcache_mmap_unlock away.  Noticed arm26, cris,
h8300 still defining flush_page_to_ram: delete it again.
parent 16ceff2d
...@@ -10,6 +10,8 @@ ...@@ -10,6 +10,8 @@
#define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0) #define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0)
......
...@@ -24,7 +24,6 @@ ...@@ -24,7 +24,6 @@
#define flush_cache_mm(mm) do { } while (0) #define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma,start,end) do { } while (0) #define flush_cache_range(vma,start,end) do { } while (0)
#define flush_cache_page(vma,vmaddr) do { } while (0) #define flush_cache_page(vma,vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0)
...@@ -32,6 +31,8 @@ ...@@ -32,6 +31,8 @@
#define clean_dcache_range(start,end) do { } while (0) #define clean_dcache_range(start,end) do { } while (0)
#define flush_dcache_range(start,end) do { } while (0) #define flush_dcache_range(start,end) do { } while (0)
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define clean_dcache_entry(_s) do { } while (0) #define clean_dcache_entry(_s) do { } while (0)
#define clean_cache_entry(_start) do { } while (0) #define clean_cache_entry(_start) do { } while (0)
......
...@@ -11,8 +11,9 @@ ...@@ -11,8 +11,9 @@
#define flush_cache_mm(mm) do { } while (0) #define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0) #define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start, end) do { } while (0) #define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0) #define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
......
...@@ -14,8 +14,9 @@ ...@@ -14,8 +14,9 @@
#define flush_cache_mm(mm) #define flush_cache_mm(mm)
#define flush_cache_range(vma,a,b) #define flush_cache_range(vma,a,b)
#define flush_cache_page(vma,p) #define flush_cache_page(vma,p)
#define flush_page_to_ram(page)
#define flush_dcache_page(page) #define flush_dcache_page(page)
#define flush_dcache_mmap_lock(mapping)
#define flush_dcache_mmap_unlock(mapping)
#define flush_icache() #define flush_icache()
#define flush_icache_page(vma,page) #define flush_icache_page(vma,page)
#define flush_icache_range(start,len) #define flush_icache_range(start,len)
......
...@@ -29,6 +29,9 @@ do { \ ...@@ -29,6 +29,9 @@ do { \
clear_bit(PG_arch_1, &(page)->flags); \ clear_bit(PG_arch_1, &(page)->flags); \
} while (0) } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
extern void flush_icache_range (unsigned long start, unsigned long end); extern void flush_icache_range (unsigned long start, unsigned long end);
#define flush_icache_user_range(vma, page, user_addr, len) \ #define flush_icache_user_range(vma, page, user_addr, len) \
......
...@@ -128,6 +128,8 @@ static inline void __flush_page_to_ram(void *vaddr) ...@@ -128,6 +128,8 @@ static inline void __flush_page_to_ram(void *vaddr)
} }
#define flush_dcache_page(page) __flush_page_to_ram(page_address(page)) #define flush_dcache_page(page) __flush_page_to_ram(page_address(page))
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page)) #define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page))
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
......
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
#define flush_cache_page(vma, vmaddr) do { } while (0) #define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_dcache_range(start,len) do { } while (0) #define flush_dcache_range(start,len) do { } while (0)
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start,len) __flush_cache_all() #define flush_icache_range(start,len) __flush_cache_all()
#define flush_icache_page(vma,pg) do { } while (0) #define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
......
...@@ -45,6 +45,9 @@ static inline void flush_dcache_page(struct page *page) ...@@ -45,6 +45,9 @@ static inline void flush_dcache_page(struct page *page)
} }
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
extern void (*flush_icache_page)(struct vm_area_struct *vma, extern void (*flush_icache_page)(struct vm_area_struct *vma,
struct page *page); struct page *page);
extern void (*flush_icache_range)(unsigned long start, unsigned long end); extern void (*flush_icache_range)(unsigned long start, unsigned long end);
......
...@@ -28,6 +28,9 @@ ...@@ -28,6 +28,9 @@
#define flush_cache_vunmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0)
extern void flush_dcache_page(struct page *page); extern void flush_dcache_page(struct page *page);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
extern void flush_icache_range(unsigned long, unsigned long); extern void flush_icache_range(unsigned long, unsigned long);
extern void flush_icache_user_range(struct vm_area_struct *vma, extern void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page, unsigned long addr, int len); struct page *page, unsigned long addr, int len);
......
...@@ -18,6 +18,9 @@ ...@@ -18,6 +18,9 @@
#define flush_cache_vunmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0)
extern void flush_dcache_page(struct page *page); extern void flush_dcache_page(struct page *page);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
extern void __flush_icache_range(unsigned long, unsigned long); extern void __flush_icache_range(unsigned long, unsigned long);
extern void flush_icache_user_range(struct vm_area_struct *vma, extern void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page, unsigned long addr, struct page *page, unsigned long addr,
......
...@@ -10,6 +10,8 @@ ...@@ -10,6 +10,8 @@
#define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0) #define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start, end) do { } while (0) #define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0) #define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
......
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
#define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0) #define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start, end) do { } while (0) #define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0) #define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
......
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
#define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0) #define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start, end) do { } while (0) #define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0) #define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
......
...@@ -30,6 +30,10 @@ extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, ...@@ -30,6 +30,10 @@ extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end); unsigned long end);
extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr); extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr);
extern void flush_dcache_page(struct page *pg); extern void flush_dcache_page(struct page *pg);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
extern void flush_icache_range(unsigned long start, unsigned long end); extern void flush_icache_range(unsigned long start, unsigned long end);
extern void flush_cache_sigtramp(unsigned long addr); extern void flush_cache_sigtramp(unsigned long addr);
extern void flush_icache_user_range(struct vm_area_struct *vma, extern void flush_icache_user_range(struct vm_area_struct *vma,
......
...@@ -70,6 +70,8 @@ BTFIXUPDEF_CALL(void, flush_sig_insns, struct mm_struct *, unsigned long) ...@@ -70,6 +70,8 @@ BTFIXUPDEF_CALL(void, flush_sig_insns, struct mm_struct *, unsigned long)
extern void sparc_flush_page_to_ram(struct page *page); extern void sparc_flush_page_to_ram(struct page *page);
#define flush_dcache_page(page) sparc_flush_page_to_ram(page) #define flush_dcache_page(page) sparc_flush_page_to_ram(page)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_cache_vmap(start, end) flush_cache_all() #define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all() #define flush_cache_vunmap(start, end) flush_cache_all()
......
...@@ -42,6 +42,8 @@ extern void __flush_dcache_range(unsigned long start, unsigned long end); ...@@ -42,6 +42,8 @@ extern void __flush_dcache_range(unsigned long start, unsigned long end);
memcpy(dst, src, len) memcpy(dst, src, len)
extern void flush_dcache_page(struct page *page); extern void flush_dcache_page(struct page *page);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0)
......
...@@ -27,6 +27,8 @@ ...@@ -27,6 +27,8 @@
#define flush_cache_range(vma, start, end) ((void)0) #define flush_cache_range(vma, start, end) ((void)0)
#define flush_cache_page(vma, vmaddr) ((void)0) #define flush_cache_page(vma, vmaddr) ((void)0)
#define flush_dcache_page(page) ((void)0) #define flush_dcache_page(page) ((void)0)
#define flush_dcache_mmap_lock(mapping) ((void)0)
#define flush_dcache_mmap_unlock(mapping) ((void)0)
#define flush_cache_vmap(start, end) ((void)0) #define flush_cache_vmap(start, end) ((void)0)
#define flush_cache_vunmap(start, end) ((void)0) #define flush_cache_vunmap(start, end) ((void)0)
......
...@@ -10,6 +10,8 @@ ...@@ -10,6 +10,8 @@
#define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0) #define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start, end) do { } while (0) #define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0) #define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment