Commit f00a75c0 authored by Russell King's avatar Russell King

ARM: Pass VMA to copy_user_highpage() implementations

Our copy_user_highpage() implementations may require cache maintainence.
Ensure that implementations have all necessary details to perform this
maintainence.
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 8a0382f6
......@@ -117,11 +117,12 @@
#endif
struct page;
struct vm_area_struct;
struct cpu_user_fns {
void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
unsigned long vaddr);
unsigned long vaddr, struct vm_area_struct *vma);
};
#ifdef MULTI_USER
......@@ -137,7 +138,7 @@ extern struct cpu_user_fns cpu_user;
extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr);
extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr);
unsigned long vaddr, struct vm_area_struct *vma);
#endif
#define clear_user_highpage(page,vaddr) \
......@@ -145,7 +146,7 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
#define __HAVE_ARCH_COPY_USER_HIGHPAGE
#define copy_user_highpage(to,from,vaddr,vma) \
__cpu_copy_user_highpage(to, from, vaddr)
__cpu_copy_user_highpage(to, from, vaddr, vma)
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, const void *from);
......
......@@ -68,7 +68,7 @@ feroceon_copy_user_page(void *kto, const void *kfrom)
}
void feroceon_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr)
unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
......
......@@ -38,7 +38,7 @@ v3_copy_user_page(void *kto, const void *kfrom)
}
void v3_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr)
unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
......
......@@ -69,7 +69,7 @@ mc_copy_user_page(void *from, void *to)
}
void v4_mc_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr)
unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto = kmap_atomic(to, KM_USER1);
......
......@@ -48,7 +48,7 @@ v4wb_copy_user_page(void *kto, const void *kfrom)
}
void v4wb_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr)
unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
......
......@@ -44,7 +44,7 @@ v4wt_copy_user_page(void *kto, const void *kfrom)
}
void v4wt_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr)
unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
......
......@@ -34,7 +34,7 @@ static DEFINE_SPINLOCK(v6_lock);
* attack the kernel's existing mapping of these pages.
*/
static void v6_copy_user_highpage_nonaliasing(struct page *to,
struct page *from, unsigned long vaddr)
struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
......@@ -73,7 +73,7 @@ static void discard_old_kernel_data(void *kto)
* Copy the page, taking account of the cache colour.
*/
static void v6_copy_user_highpage_aliasing(struct page *to,
struct page *from, unsigned long vaddr)
struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
{
unsigned int offset = CACHE_COLOUR(vaddr);
unsigned long kfrom, kto;
......
......@@ -71,7 +71,7 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom)
}
void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr)
unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
......
......@@ -91,7 +91,7 @@ mc_copy_user_page(void *from, void *to)
}
void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr)
unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto = kmap_atomic(to, KM_USER1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment