Commit 67ece144 authored by Russell King's avatar Russell King

ARM: pgtable: consolidate set_pte_ext(TOP_PTE,...) + tlb flush

A number of places establish a PTE in our top page table and
immediately flush the TLB.  Rather than having this at every callsite,
provide an inline function for this purpose.

This changes some global tlb flushes to be local; each time we setup
one of these mappings, we always do it with preemption disabled which
would prevent us migrating to another CPU.
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 6e78df17
...@@ -74,8 +74,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from, ...@@ -74,8 +74,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
raw_spin_lock(&minicache_lock); raw_spin_lock(&minicache_lock);
set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), mk_pte(from, minicache_pgprot), 0); set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot));
flush_tlb_kernel_page(COPYPAGE_MINICACHE);
mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
......
...@@ -90,11 +90,8 @@ static void v6_copy_user_highpage_aliasing(struct page *to, ...@@ -90,11 +90,8 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT); kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT);
kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT); kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT);
set_pte_ext(TOP_PTE(kfrom), mk_pte(from, PAGE_KERNEL), 0); set_top_pte(kfrom, mk_pte(from, PAGE_KERNEL));
set_pte_ext(TOP_PTE(kto), mk_pte(to, PAGE_KERNEL), 0); set_top_pte(kto, mk_pte(to, PAGE_KERNEL));
flush_tlb_kernel_page(kfrom);
flush_tlb_kernel_page(kto);
copy_page((void *)kto, (void *)kfrom); copy_page((void *)kto, (void *)kfrom);
...@@ -119,8 +116,7 @@ static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vad ...@@ -119,8 +116,7 @@ static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vad
*/ */
raw_spin_lock(&v6_lock); raw_spin_lock(&v6_lock);
set_pte_ext(TOP_PTE(to), mk_pte(page, PAGE_KERNEL), 0); set_top_pte(to, mk_pte(page, PAGE_KERNEL));
flush_tlb_kernel_page(to);
clear_page((void *)to); clear_page((void *)to);
raw_spin_unlock(&v6_lock); raw_spin_unlock(&v6_lock);
......
...@@ -94,8 +94,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from, ...@@ -94,8 +94,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
raw_spin_lock(&minicache_lock); raw_spin_lock(&minicache_lock);
set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), mk_pte(from, minicache_pgprot), 0); set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot));
flush_tlb_kernel_page(COPYPAGE_MINICACHE);
mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
......
...@@ -28,8 +28,7 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) ...@@ -28,8 +28,7 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
const int zero = 0; const int zero = 0;
set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0); set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
flush_tlb_kernel_page(to);
asm( "mcrr p15, 0, %1, %0, c14\n" asm( "mcrr p15, 0, %1, %0, c14\n"
" mcr p15, 0, %2, c7, c10, 4" " mcr p15, 0, %2, c7, c10, 4"
...@@ -40,13 +39,12 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) ...@@ -40,13 +39,12 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
{ {
unsigned long colour = CACHE_COLOUR(vaddr); unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
unsigned long offset = vaddr & (PAGE_SIZE - 1); unsigned long offset = vaddr & (PAGE_SIZE - 1);
unsigned long to; unsigned long to;
set_pte_ext(TOP_PTE(FLUSH_ALIAS_START) + colour, pfn_pte(pfn, PAGE_KERNEL), 0); set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
to = FLUSH_ALIAS_START + (colour << PAGE_SHIFT) + offset; to = va + offset;
flush_tlb_kernel_page(to);
flush_icache_range(to, to + len); flush_icache_range(to, to + len);
} }
......
...@@ -71,13 +71,12 @@ void *__kmap_atomic(struct page *page) ...@@ -71,13 +71,12 @@ void *__kmap_atomic(struct page *page)
*/ */
BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
#endif #endif
set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0);
/* /*
* When debugging is off, kunmap_atomic leaves the previous mapping * When debugging is off, kunmap_atomic leaves the previous mapping
* in place, so this TLB flush ensures the TLB is updated with the * in place, so the contained TLB flush ensures the TLB is updated
* new mapping. * with the new mapping.
*/ */
local_flush_tlb_kernel_page(vaddr); set_top_pte(vaddr, mk_pte(page, kmap_prot));
return (void *)vaddr; return (void *)vaddr;
} }
...@@ -96,8 +95,7 @@ void __kunmap_atomic(void *kvaddr) ...@@ -96,8 +95,7 @@ void __kunmap_atomic(void *kvaddr)
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
#ifdef CONFIG_DEBUG_HIGHMEM #ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); set_top_pte(vaddr, __pte(0));
local_flush_tlb_kernel_page(vaddr);
#else #else
(void) idx; /* to kill a warning */ (void) idx; /* to kill a warning */
#endif #endif
...@@ -123,8 +121,7 @@ void *kmap_atomic_pfn(unsigned long pfn) ...@@ -123,8 +121,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
#ifdef CONFIG_DEBUG_HIGHMEM #ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
#endif #endif
set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0); set_top_pte(vaddr, pfn_pte(pfn, kmap_prot));
local_flush_tlb_kernel_page(vaddr);
return (void *)vaddr; return (void *)vaddr;
} }
......
...@@ -18,6 +18,12 @@ extern pmd_t *top_pmd; ...@@ -18,6 +18,12 @@ extern pmd_t *top_pmd;
/* PFN alias flushing, for VIPT caches */ /* PFN alias flushing, for VIPT caches */
#define FLUSH_ALIAS_START 0xffff4000 #define FLUSH_ALIAS_START 0xffff4000
static inline void set_top_pte(unsigned long va, pte_t pte)
{
set_pte_ext(TOP_PTE(va), pte, 0);
local_flush_tlb_kernel_page(va);
}
static inline pmd_t *pmd_off_k(unsigned long virt) static inline pmd_t *pmd_off_k(unsigned long virt)
{ {
return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt); return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment