Commit 2e510b77 authored by Russell King's avatar Russell King

[ARM] Fix copy/clear user page functions for VIPT aliasing caches.

This fixes the copy and clear user page functions for ARMv6 aliasing
caches.  When we copy data into a page destined for user space, we
must make sure that there are no dirty cache lines associated with
the kernel space mapping of this page.
parent 8db06cee
......@@ -31,14 +31,46 @@ static spinlock_t v6_lock = SPIN_LOCK_UNLOCKED;
#define DCACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
/*
* Copy the user page. No aliasing to deal with so we can just
* attack the kernel's existing mapping of these pages.
*/
void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr)
{
copy_page(kto, kfrom);
}
/*
* Clear the user page. No aliasing to deal with so we can just
* attack the kernel's existing mapping of this page.
*/
void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr)
{
clear_page(kaddr);
}
/*
* Copy the page, taking account of the cache colour.
*/
void v6_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr)
{
unsigned int offset = DCACHE_COLOUR(vaddr);
unsigned long from, to;
/*
* Discard data in the kernel mapping for the new page.
* FIXME: needs this MCRR to be supported.
*/
__asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
:
: "r" (kto),
"r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES)
: "cc");
/*
* Now copy the page using the same cache colour as the
* pages ultimate destination.
*/
spin_lock(&v6_lock);
set_pte(from_pte + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, from_pgprot));
......@@ -55,11 +87,30 @@ void v6_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
spin_unlock(&v6_lock);
}
void v6_clear_user_page(void *kaddr, unsigned long vaddr)
/*
* Clear the user page. We need to deal with the aliasing issues,
* so remap the kernel page into the same cache colour as the user
* page.
*/
void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
{
unsigned int offset = DCACHE_COLOUR(vaddr);
unsigned long to = to_address + (offset << PAGE_SHIFT);
/*
* Discard data in the kernel mapping for the new page
* FIXME: needs this MCRR to be supported.
*/
__asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
:
: "r" (kaddr),
"r" ((unsigned long)kaddr + PAGE_SIZE - L1_CACHE_BYTES)
: "cc");
/*
* Now clear the page using the same cache colour as
* the pages ultimate destination.
*/
spin_lock(&v6_lock);
set_pte(to_pte + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, to_pgprot));
......@@ -70,26 +121,31 @@ void v6_clear_user_page(void *kaddr, unsigned long vaddr)
}
struct cpu_user_fns v6_user_fns __initdata = {
.cpu_clear_user_page = v6_clear_user_page,
.cpu_copy_user_page = v6_copy_user_page,
.cpu_clear_user_page = v6_clear_user_page_nonaliasing,
.cpu_copy_user_page = v6_copy_user_page_nonaliasing,
};
static int __init v6_userpage_init(void)
{
pgd_t *pgd;
pmd_t *pmd;
pgd = pgd_offset_k(from_address);
pmd = pmd_alloc(&init_mm, pgd, from_address);
if (!pmd)
BUG();
from_pte = pte_alloc_kernel(&init_mm, pmd, from_address);
if (!from_pte)
BUG();
to_pte = pte_alloc_kernel(&init_mm, pmd, to_address);
if (!to_pte)
BUG();
if (cache_is_vipt_aliasing()) {
pgd_t *pgd;
pmd_t *pmd;
pgd = pgd_offset_k(from_address);
pmd = pmd_alloc(&init_mm, pgd, from_address);
if (!pmd)
BUG();
from_pte = pte_alloc_kernel(&init_mm, pmd, from_address);
if (!from_pte)
BUG();
to_pte = pte_alloc_kernel(&init_mm, pmd, to_address);
if (!to_pte)
BUG();
v6_user_fns.cpu_clear_user_page = v6_clear_user_page_aliasing;
v6_user_fns.cpu_copy_user_page = v6_copy_user_page_aliasing;
}
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment