Commit 53d862fa authored by John David Anglin's avatar John David Anglin Committed by Helge Deller

parisc: Fix invalidate/flush vmap routines

Cache move-in for virtual accesses is controlled by the TLB.  Thus,
we must generally purge TLB entries before flushing.  The flush routines
must use TLB entries that inhibit cache move-in.

V2: Load physical address prior to flushing TLB.  In flush_cache_page,
flush TLB when flushing and purging.

V3: Don't flush when start equals end.
Signed-off-by: default avatarJohn David Anglin <dave.anglin@bell.net>
Signed-off-by: default avatarHelge Deller <deller@gmx.de>
parent 411fadd6
...@@ -611,8 +611,8 @@ void ...@@ -611,8 +611,8 @@ void
flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
{ {
if (pfn_valid(pfn)) { if (pfn_valid(pfn)) {
flush_tlb_page(vma, vmaddr);
if (likely(vma->vm_mm->context.space_id)) { if (likely(vma->vm_mm->context.space_id)) {
flush_tlb_page(vma, vmaddr);
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
} else { } else {
__purge_cache_page(vma, vmaddr, PFN_PHYS(pfn)); __purge_cache_page(vma, vmaddr, PFN_PHYS(pfn));
...@@ -624,6 +624,7 @@ void flush_kernel_vmap_range(void *vaddr, int size) ...@@ -624,6 +624,7 @@ void flush_kernel_vmap_range(void *vaddr, int size)
{ {
unsigned long start = (unsigned long)vaddr; unsigned long start = (unsigned long)vaddr;
unsigned long end = start + size; unsigned long end = start + size;
unsigned long flags, physaddr;
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
(unsigned long)size >= parisc_cache_flush_threshold) { (unsigned long)size >= parisc_cache_flush_threshold) {
...@@ -632,8 +633,14 @@ void flush_kernel_vmap_range(void *vaddr, int size) ...@@ -632,8 +633,14 @@ void flush_kernel_vmap_range(void *vaddr, int size)
return; return;
} }
flush_kernel_dcache_range_asm(start, end); while (start < end) {
flush_tlb_kernel_range(start, end); physaddr = lpa(start);
purge_tlb_start(flags);
pdtlb(SR_KERNEL, start);
purge_tlb_end(flags);
flush_dcache_page_asm(physaddr, start);
start += PAGE_SIZE;
}
} }
EXPORT_SYMBOL(flush_kernel_vmap_range); EXPORT_SYMBOL(flush_kernel_vmap_range);
...@@ -641,6 +648,7 @@ void invalidate_kernel_vmap_range(void *vaddr, int size) ...@@ -641,6 +648,7 @@ void invalidate_kernel_vmap_range(void *vaddr, int size)
{ {
unsigned long start = (unsigned long)vaddr; unsigned long start = (unsigned long)vaddr;
unsigned long end = start + size; unsigned long end = start + size;
unsigned long flags, physaddr;
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
(unsigned long)size >= parisc_cache_flush_threshold) { (unsigned long)size >= parisc_cache_flush_threshold) {
...@@ -649,7 +657,13 @@ void invalidate_kernel_vmap_range(void *vaddr, int size) ...@@ -649,7 +657,13 @@ void invalidate_kernel_vmap_range(void *vaddr, int size)
return; return;
} }
purge_kernel_dcache_range_asm(start, end); while (start < end) {
flush_tlb_kernel_range(start, end); physaddr = lpa(start);
purge_tlb_start(flags);
pdtlb(SR_KERNEL, start);
purge_tlb_end(flags);
purge_dcache_page_asm(physaddr, start);
start += PAGE_SIZE;
}
} }
EXPORT_SYMBOL(invalidate_kernel_vmap_range); EXPORT_SYMBOL(invalidate_kernel_vmap_range);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment