Commit 39af22a7 authored by Nicolas Pitre's avatar Nicolas Pitre Committed by Nicolas Pitre

ARM: get rid of kmap_high_l1_vipt()

Since commit 3e4d3af5 "mm: stack based kmap_atomic()", it is no longer
necessary to carry an ad hoc version of kmap_atomic() added in commit
7e5a69e8 "ARM: 6007/1: fix highmem with VIPT cache and DMA" to cope
with reentrancy.

In fact, it is now actively wrong to rely on fixed kmap type indices
(namely KM_L1_CACHE) as kmap_atomic() totally ignores them now and a
concurrent instance of it may reuse any slot for any purpose.
Signed-off-by: default avatarNicolas Pitre <nicolas.pitre@linaro.org>
parent b0c3844d
...@@ -25,9 +25,6 @@ extern void *kmap_high(struct page *page); ...@@ -25,9 +25,6 @@ extern void *kmap_high(struct page *page);
extern void *kmap_high_get(struct page *page); extern void *kmap_high_get(struct page *page);
extern void kunmap_high(struct page *page); extern void kunmap_high(struct page *page);
extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte);
extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte);
/* /*
* The following functions are already defined by <linux/highmem.h> * The following functions are already defined by <linux/highmem.h>
* when CONFIG_HIGHMEM is not set. * when CONFIG_HIGHMEM is not set.
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/highmem.h>
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/highmem.h> #include <asm/highmem.h>
...@@ -480,10 +481,10 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, ...@@ -480,10 +481,10 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
op(vaddr, len, dir); op(vaddr, len, dir);
kunmap_high(page); kunmap_high(page);
} else if (cache_is_vipt()) { } else if (cache_is_vipt()) {
pte_t saved_pte; /* unmapped pages might still be cached */
vaddr = kmap_high_l1_vipt(page, &saved_pte); vaddr = kmap_atomic(page);
op(vaddr + offset, len, dir); op(vaddr + offset, len, dir);
kunmap_high_l1_vipt(page, saved_pte); kunmap_atomic(vaddr);
} }
} else { } else {
vaddr = page_address(page) + offset; vaddr = page_address(page) + offset;
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/highmem.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/cachetype.h> #include <asm/cachetype.h>
...@@ -180,10 +181,10 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) ...@@ -180,10 +181,10 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
__cpuc_flush_dcache_area(addr, PAGE_SIZE); __cpuc_flush_dcache_area(addr, PAGE_SIZE);
kunmap_high(page); kunmap_high(page);
} else if (cache_is_vipt()) { } else if (cache_is_vipt()) {
pte_t saved_pte; /* unmapped pages might still be cached */
addr = kmap_high_l1_vipt(page, &saved_pte); addr = kmap_atomic(page);
__cpuc_flush_dcache_area(addr, PAGE_SIZE); __cpuc_flush_dcache_area(addr, PAGE_SIZE);
kunmap_high_l1_vipt(page, saved_pte); kunmap_atomic(addr);
} }
} }
......
...@@ -140,90 +140,3 @@ struct page *kmap_atomic_to_page(const void *ptr) ...@@ -140,90 +140,3 @@ struct page *kmap_atomic_to_page(const void *ptr)
pte = TOP_PTE(vaddr); pte = TOP_PTE(vaddr);
return pte_page(*pte); return pte_page(*pte);
} }
#ifdef CONFIG_CPU_CACHE_VIPT
#include <linux/percpu.h>
/*
* The VIVT cache of a highmem page is always flushed before the page
* is unmapped. Hence unmapped highmem pages need no cache maintenance
* in that case.
*
* However unmapped pages may still be cached with a VIPT cache, and
* it is not possible to perform cache maintenance on them using physical
* addresses unfortunately. So we have no choice but to set up a temporary
* virtual mapping for that purpose.
*
* Yet this VIPT cache maintenance may be triggered from DMA support
* functions which are possibly called from interrupt context. As we don't
* want to keep interrupt disabled all the time when such maintenance is
* taking place, we therefore allow for some reentrancy by preserving and
* restoring the previous fixmap entry before the interrupted context is
* resumed. If the reentrancy depth is 0 then there is no need to restore
* the previous fixmap, and leaving the current one in place allow it to
* be reused the next time without a TLB flush (common with DMA).
*/
static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
{
unsigned int idx, cpu;
int *depth;
unsigned long vaddr, flags;
pte_t pte, *ptep;
if (!in_interrupt())
preempt_disable();
cpu = smp_processor_id();
depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
ptep = TOP_PTE(vaddr);
pte = mk_pte(page, kmap_prot);
raw_local_irq_save(flags);
(*depth)++;
if (pte_val(*ptep) == pte_val(pte)) {
*saved_pte = pte;
} else {
*saved_pte = *ptep;
set_pte_ext(ptep, pte, 0);
local_flush_tlb_kernel_page(vaddr);
}
raw_local_irq_restore(flags);
return (void *)vaddr;
}
void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
{
unsigned int idx, cpu = smp_processor_id();
int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
unsigned long vaddr, flags;
pte_t pte, *ptep;
idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
ptep = TOP_PTE(vaddr);
pte = mk_pte(page, kmap_prot);
BUG_ON(pte_val(*ptep) != pte_val(pte));
BUG_ON(*depth <= 0);
raw_local_irq_save(flags);
(*depth)--;
if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
set_pte_ext(ptep, saved_pte, 0);
local_flush_tlb_kernel_page(vaddr);
}
raw_local_irq_restore(flags);
if (!in_interrupt())
preempt_enable();
}
#endif /* CONFIG_CPU_CACHE_VIPT */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment