Commit 7f0424f9 authored by Russell King's avatar Russell King Committed by Linus Torvalds

[PATCH] fix problematic flush_cache_page in kernel/ptrace.c

At present, flush_cache_page() is used to handle the case where we
unmap a page or alter the page permissions on the target page with
one exception - access_process_vm().  Based upon the former, the
decision to implement this function is:

        do we need to flush the cache when we unmap or change
        the mapping permissions?

However, kernel/ptrace.c: access_process_vm() also includes into this:

        or we need to ensure cache coherency between the kernel
        and user space mapping of this page.

I argue that the use of flush_cache_page() here in the generic
code is wrong, and if an architecture wishes to use it for this
purpose, it should do so within it's architecture private
implementation of copy_to_user_page() and copy_from_user_page().

So this patch removes the flush_cache_page() from kernel/ptrace.c,
adding it to the arch-specific copy_{to,from}_user_page() where
flush_cache_page is non-empty.
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 59f9f96e
...@@ -354,10 +354,6 @@ maps this page at its virtual address. ...@@ -354,10 +354,6 @@ maps this page at its virtual address.
of arbitrary user pages (f.e. for ptrace()) it will use of arbitrary user pages (f.e. for ptrace()) it will use
these two routines. these two routines.
The page has been kmap()'d, and flush_cache_page() has
just been called for the user mapping of this page (if
necessary).
Any necessary cache flushing or other coherency operations Any necessary cache flushing or other coherency operations
that need to occur should happen here. If the processor's that need to occur should happen here. If the processor's
instruction cache does not snoop cpu stores, it is very instruction cache does not snoop cpu stores, it is very
......
...@@ -237,11 +237,17 @@ extern void dmac_flush_range(unsigned long, unsigned long); ...@@ -237,11 +237,17 @@ extern void dmac_flush_range(unsigned long, unsigned long);
* space" model to handle this. * space" model to handle this.
*/ */
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { memcpy(dst, src, len); \ do { \
flush_icache_user_range(vma, page, vaddr, len); \ flush_cache_page(vma, vaddr); \
} while (0) memcpy(dst, src, len); \
flush_dcache_page(page); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len) do { \
flush_cache_page(vma, vaddr); \
memcpy(dst, src, len); \
} while (0)
/* /*
* Convert calls to our calling convention. * Convert calls to our calling convention.
......
...@@ -132,10 +132,18 @@ static inline void __flush_page_to_ram(void *vaddr) ...@@ -132,10 +132,18 @@ static inline void __flush_page_to_ram(void *vaddr)
#define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page)) #define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page))
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len) do { \
flush_cache_page(vma, vaddr); \
memcpy(dst, src, len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len) do { \
flush_cache_page(vma, vaddr); \
memcpy(dst, src, len); \
} while (0)
extern void flush_icache_range(unsigned long address, unsigned long endaddr); extern void flush_icache_range(unsigned long address, unsigned long endaddr);
......
...@@ -77,11 +77,17 @@ extern void flush_dcache_page(struct page *page); ...@@ -77,11 +77,17 @@ extern void flush_dcache_page(struct page *page);
#define flush_icache_range(s,e) do { flush_kernel_dcache_range_asm(s,e); flush_kernel_icache_range_asm(s,e); } while (0) #define flush_icache_range(s,e) do { flush_kernel_dcache_range_asm(s,e); flush_kernel_icache_range_asm(s,e); } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { memcpy(dst, src, len); \ do { \
flush_cache_page(vma, vaddr); \
memcpy(dst, src, len); \
flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \ flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
} while (0) } while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len) do { \
flush_cache_page(vma, vaddr); \
memcpy(dst, src, len); \
} while (0)
static inline void flush_cache_range(struct vm_area_struct *vma, static inline void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
......
...@@ -14,10 +14,16 @@ extern void __flush_invalidate_region(void *start, int size); ...@@ -14,10 +14,16 @@ extern void __flush_invalidate_region(void *start, int size);
#define flush_cache_vunmap(start, end) flush_cache_all() #define flush_cache_vunmap(start, end) flush_cache_all()
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { memcpy(dst, src, len); \ do { \
flush_cache_page(vma, vaddr); \
memcpy(dst, src, len); \
flush_icache_user_range(vma, page, vaddr, len); \ flush_icache_user_range(vma, page, vaddr, len); \
} while (0) } while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len) do { \
flush_cache_page(vma, vaddr); \
memcpy(dst, src, len); \
} while (0)
#endif /* __ASM_SH_CACHEFLUSH_H */ #endif /* __ASM_SH_CACHEFLUSH_H */
...@@ -30,13 +30,17 @@ extern void flush_icache_user_range(struct vm_area_struct *vma, ...@@ -30,13 +30,17 @@ extern void flush_icache_user_range(struct vm_area_struct *vma,
#define flush_icache_page(vma, page) do { } while (0) #define flush_icache_page(vma, page) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { memcpy(dst, src, len); \ do { \
flush_cache_page(vma, vaddr); \
memcpy(dst, src, len); \
flush_icache_user_range(vma, page, vaddr, len); \ flush_icache_user_range(vma, page, vaddr, len); \
} while (0) } while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len) do { \
flush_cache_page(vma, vaddr); \
memcpy(dst, src, len); \
} while (0)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -57,9 +57,15 @@ BTFIXUPDEF_CALL(void, flush_cache_page, struct vm_area_struct *, unsigned long) ...@@ -57,9 +57,15 @@ BTFIXUPDEF_CALL(void, flush_cache_page, struct vm_area_struct *, unsigned long)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len) do { \
flush_cache_page(vma, vaddr); \
memcpy(dst, src, len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len) do { \
flush_cache_page(vma, vaddr); \
memcpy(dst, src, len); \
} while (0)
BTFIXUPDEF_CALL(void, __flush_page_to_ram, unsigned long) BTFIXUPDEF_CALL(void, __flush_page_to_ram, unsigned long)
BTFIXUPDEF_CALL(void, flush_sig_insns, struct mm_struct *, unsigned long) BTFIXUPDEF_CALL(void, flush_sig_insns, struct mm_struct *, unsigned long)
......
...@@ -38,9 +38,16 @@ extern void __flush_dcache_range(unsigned long start, unsigned long end); ...@@ -38,9 +38,16 @@ extern void __flush_dcache_range(unsigned long start, unsigned long end);
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len) do { \
flush_cache_page(vma, vaddr); \
memcpy(dst, src, len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len) do { \
flush_cache_page(vma, vaddr); \
memcpy(dst, src, len); \
} while (0)
extern void flush_dcache_page(struct page *page); extern void flush_dcache_page(struct page *page);
#define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0)
......
...@@ -202,8 +202,6 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in ...@@ -202,8 +202,6 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
if (bytes > PAGE_SIZE-offset) if (bytes > PAGE_SIZE-offset)
bytes = PAGE_SIZE-offset; bytes = PAGE_SIZE-offset;
flush_cache_page(vma, addr);
maddr = kmap(page); maddr = kmap(page);
if (write) { if (write) {
copy_to_user_page(vma, page, addr, copy_to_user_page(vma, page, addr,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment