Commit e97fde15 authored by Paul Mackerras's avatar Paul Mackerras Committed by Linus Torvalds

[PATCH] flush_icache_user_range (v2.5.4)

The patch below changes access_process_vm to use a new architecture
hook, flush_icache_user_range, instead of flush_icache_page, and adds
a definition of flush_icache_user_range which does the same thing as
flush_icache_page for all architectures except PPC.  (The PPC update
that is in Linus' BK tree already includes a suitable definition of
flush_icache_user_range.)

The reason for doing this is that when flush_icache_page is called
from do_no_page or do_swap_page, I want to be able to do the flush
conditionally, based on the state of the page.  In contrast,
access_process_vm needs to do the flush unconditionally since it has
just modified the page.  In the access_process_vm case it is useful to
have the information about the user address and length that have been
modified since then we can just flush the affected cache lines rather
than the whole page.

This patch should make it easy to improve performance on alpha, since
there (as I understand it) the icache flush is not needed at all in
do_no_page or do_swap_page, but is needed in access_process_vm.  All
that is needed is to make flush_icache_page a noop on alpha.  The
patch below doesn't do this, I'll let the alpha maintainers push that
change if they want.
parent ebae1d3a
...@@ -341,6 +341,17 @@ Here is the new interface: ...@@ -341,6 +341,17 @@ Here is the new interface:
If the icache does not snoop stores then this routine will need If the icache does not snoop stores then this routine will need
to flush it. to flush it.
void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page, unsigned long addr, int len)
This is called when the kernel stores into addresses that are
part of the address space of a user process (which may be some
other process than the current process). The addr argument
gives the virtual address in that process's address space,
page is the page which is being modified, and len indicates
how many bytes have been modified. The modified region must
not cross a page boundary. Currently this is only called from
kernel/ptrace.c.
void flush_icache_page(struct vm_area_struct *vma, struct page *page) void flush_icache_page(struct vm_area_struct *vma, struct page *page)
All the functionality of flush_icache_page can be implemented in All the functionality of flush_icache_page can be implemented in
flush_dcache_page and update_mmu_cache. In 2.5 the hope is to flush_dcache_page and update_mmu_cache. In 2.5 the hope is to
......
...@@ -1061,7 +1061,8 @@ ipi_flush_icache_page(void *x) ...@@ -1061,7 +1061,8 @@ ipi_flush_icache_page(void *x)
} }
void void
flush_icache_page(struct vm_area_struct *vma, struct page *page) flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
......
...@@ -70,8 +70,7 @@ flush_tlb_other(struct mm_struct *mm) ...@@ -70,8 +70,7 @@ flush_tlb_other(struct mm_struct *mm)
} }
/* We need to flush the userspace icache after setting breakpoints in /* We need to flush the userspace icache after setting breakpoints in
ptrace. I don't think it's needed in do_swap_page, or do_no_page, ptrace.
but I don't know how to get rid of it either.
Instead of indiscriminately using imb, take advantage of the fact Instead of indiscriminately using imb, take advantage of the fact
that icache entries are tagged with the ASN and load a new mm context. */ that icache entries are tagged with the ASN and load a new mm context. */
...@@ -79,7 +78,8 @@ flush_tlb_other(struct mm_struct *mm) ...@@ -79,7 +78,8 @@ flush_tlb_other(struct mm_struct *mm)
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
static inline void static inline void
flush_icache_page(struct vm_area_struct *vma, struct page *page) flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len)
{ {
if (vma->vm_flags & VM_EXEC) { if (vma->vm_flags & VM_EXEC) {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
...@@ -90,9 +90,13 @@ flush_icache_page(struct vm_area_struct *vma, struct page *page) ...@@ -90,9 +90,13 @@ flush_icache_page(struct vm_area_struct *vma, struct page *page)
} }
} }
#else #else
extern void flush_icache_page(struct vm_area_struct *vma, struct page *page); extern void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page, unsigned long addr, int len);
#endif #endif
/* this is used only in do_no_page and do_swap_page */
#define flush_icache_page(vma, page) flush_icache_user_range((vma), (page), 0, 0)
/* /*
* Flush just one page in the current TLB set. * Flush just one page in the current TLB set.
* We need to be very careful about the icache here, there * We need to be very careful about the icache here, there
......
...@@ -125,6 +125,7 @@ extern void paging_init(void); ...@@ -125,6 +125,7 @@ extern void paging_init(void);
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
#define flush_icache_range(start, end) do { } while (0) #define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0) #define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
/* /*
* TLB flushing (implemented in arch/cris/mm/tlb.c): * TLB flushing (implemented in arch/cris/mm/tlb.c):
......
...@@ -33,6 +33,7 @@ extern void paging_init(void); ...@@ -33,6 +33,7 @@ extern void paging_init(void);
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
#define flush_icache_range(start, end) do { } while (0) #define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0) #define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define __flush_tlb() \ #define __flush_tlb() \
do { \ do { \
......
...@@ -127,6 +127,7 @@ extern inline void __flush_page_to_ram(unsigned long address) ...@@ -127,6 +127,7 @@ extern inline void __flush_page_to_ram(unsigned long address)
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0) #define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
/* Push n pages at kernel virtual address and clear the icache */ /* Push n pages at kernel virtual address and clear the icache */
/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */ /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
......
...@@ -51,6 +51,8 @@ extern void (*_flush_icache_page)(struct vm_area_struct *vma, ...@@ -51,6 +51,8 @@ extern void (*_flush_icache_page)(struct vm_area_struct *vma,
#define flush_icache_range(start, end) _flush_icache_range(start,end) #define flush_icache_range(start, end) _flush_icache_range(start,end)
#define flush_icache_page(vma, page) _flush_icache_page(vma, page) #define flush_icache_page(vma, page) _flush_icache_page(vma, page)
#define flush_icache_user_range(vma, page, addr, len) \
_flush_icache_page((vma), (page))
/* /*
......
...@@ -43,6 +43,8 @@ extern void (*_flush_page_to_ram)(struct page * page); ...@@ -43,6 +43,8 @@ extern void (*_flush_page_to_ram)(struct page * page);
#define flush_page_to_ram(page) _flush_page_to_ram(page) #define flush_page_to_ram(page) _flush_page_to_ram(page)
#define flush_icache_range(start, end) _flush_cache_l1() #define flush_icache_range(start, end) _flush_cache_l1()
#define flush_icache_user_range(vma, page, addr, len) \
flush_icache_page((vma), (page))
#define flush_icache_page(vma, page) \ #define flush_icache_page(vma, page) \
do { \ do { \
...@@ -66,6 +68,8 @@ extern void andes_flush_icache_page(unsigned long); ...@@ -66,6 +68,8 @@ extern void andes_flush_icache_page(unsigned long);
#define flush_cache_page(vma,page) do { } while(0) #define flush_cache_page(vma,page) do { } while(0)
#define flush_page_to_ram(page) do { } while(0) #define flush_page_to_ram(page) do { } while(0)
#define flush_icache_range(start, end) _flush_cache_l1() #define flush_icache_range(start, end) _flush_cache_l1()
#define flush_icache_user_range(vma, page, addr, len) \
flush_icache_page((vma), (page))
#define flush_icache_page(vma, page) \ #define flush_icache_page(vma, page) \
do { \ do { \
if ((vma)->vm_flags & VM_EXEC) \ if ((vma)->vm_flags & VM_EXEC) \
......
...@@ -106,6 +106,9 @@ extern inline void flush_cache_mm(struct mm_struct *mm) { ...@@ -106,6 +106,9 @@ extern inline void flush_cache_mm(struct mm_struct *mm) {
#define flush_icache_range(start, end) \ #define flush_icache_range(start, end) \
__flush_icache_range(start, end - start) __flush_icache_range(start, end - start)
#define flush_icache_user_range(vma, page, addr, len) \
flush_icache_page((vma), (page))
#define flush_icache_page(vma, page) \ #define flush_icache_page(vma, page) \
__flush_icache_range(page_address(page), PAGE_SIZE) __flush_icache_range(page_address(page), PAGE_SIZE)
......
...@@ -42,6 +42,7 @@ extern void paging_init(void); ...@@ -42,6 +42,7 @@ extern void paging_init(void);
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
#define flush_icache_range(start, end) do { } while (0) #define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0) #define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
/* /*
* The S390 doesn't have any external MMU info: the kernel page * The S390 doesn't have any external MMU info: the kernel page
......
...@@ -38,6 +38,7 @@ extern void paging_init(void); ...@@ -38,6 +38,7 @@ extern void paging_init(void);
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
#define flush_icache_range(start, end) do { } while (0) #define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0) #define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
/* /*
* The S390 doesn't have any external MMU info: the kernel page * The S390 doesn't have any external MMU info: the kernel page
......
...@@ -41,6 +41,7 @@ extern void paging_init(void); ...@@ -41,6 +41,7 @@ extern void paging_init(void);
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
#define flush_icache_range(start, end) do { } while (0) #define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0) #define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define flush_cache_sigtramp(vaddr) do { } while (0) #define flush_cache_sigtramp(vaddr) do { } while (0)
#define p3_cache_init() do { } while (0) #define p3_cache_init() do { } while (0)
...@@ -64,6 +65,7 @@ extern void flush_cache_sigtramp(unsigned long addr); ...@@ -64,6 +65,7 @@ extern void flush_cache_sigtramp(unsigned long addr);
#define flush_page_to_ram(page) do { } while (0) #define flush_page_to_ram(page) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0) #define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
/* Initialization of P3 area for copy_user_page */ /* Initialization of P3 area for copy_user_page */
extern void p3_cache_init(void); extern void p3_cache_init(void);
......
...@@ -348,6 +348,7 @@ BTFIXUPDEF_CALL(pte_t *, pte_offset, pmd_t *, unsigned long) ...@@ -348,6 +348,7 @@ BTFIXUPDEF_CALL(pte_t *, pte_offset, pmd_t *, unsigned long)
extern unsigned int pg_iobits; extern unsigned int pg_iobits;
#define flush_icache_page(vma, pg) do { } while(0) #define flush_icache_page(vma, pg) do { } while(0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
/* Certain architectures need to do special things when pte's /* Certain architectures need to do special things when pte's
* within a page table are directly modified. Thus, the following * within a page table are directly modified. Thus, the following
......
...@@ -277,6 +277,7 @@ extern pgd_t swapper_pg_dir[1]; ...@@ -277,6 +277,7 @@ extern pgd_t swapper_pg_dir[1];
extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
#define flush_icache_page(vma, pg) do { } while(0) #define flush_icache_page(vma, pg) do { } while(0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
/* Make a non-present pseudo-TTE. */ /* Make a non-present pseudo-TTE. */
extern inline pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space) extern inline pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space)
......
...@@ -151,7 +151,7 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in ...@@ -151,7 +151,7 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
if (write) { if (write) {
memcpy(maddr + offset, buf, bytes); memcpy(maddr + offset, buf, bytes);
flush_page_to_ram(page); flush_page_to_ram(page);
flush_icache_page(vma, page); flush_icache_user_range(vma, page, addr, bytes);
} else { } else {
memcpy(buf, maddr + offset, bytes); memcpy(buf, maddr + offset, bytes);
flush_page_to_ram(page); flush_page_to_ram(page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment