Commit 1fc7db24 authored by John David Anglin's avatar John David Anglin Committed by Helge Deller

parisc: Don't enforce DMA completion order in cache flushes

The only place we need to ensure all outstanding cache coherence
operations are complete is in invalidate_kernel_vmap_range. All
parisc drivers synchronize DMA operations internally and do not
call invalidate_kernel_vmap_range. We only need this for non-coherent
I/O operations.
Signed-off-by: default avatarJohn David Anglin <dave.anglin@bell.net>
Signed-off-by: default avatarHelge Deller <deller@gmx.de>
parent 41dc0b53
...@@ -54,6 +54,7 @@ void parisc_setup_cache_timing(void); ...@@ -54,6 +54,7 @@ void parisc_setup_cache_timing(void);
#define asm_io_sync() asm volatile("sync" \ #define asm_io_sync() asm volatile("sync" \
ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \ ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \
ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) :::"memory") ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) :::"memory")
#define asm_syncdma() asm volatile("syncdma" :::"memory")
#endif /* ! __ASSEMBLY__ */ #endif /* ! __ASSEMBLY__ */
......
...@@ -754,6 +754,9 @@ void invalidate_kernel_vmap_range(void *vaddr, int size) ...@@ -754,6 +754,9 @@ void invalidate_kernel_vmap_range(void *vaddr, int size)
unsigned long start = (unsigned long)vaddr; unsigned long start = (unsigned long)vaddr;
unsigned long end = start + size; unsigned long end = start + size;
/* Ensure DMA is complete */
asm_syncdma();
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
(unsigned long)size >= parisc_cache_flush_threshold) { (unsigned long)size >= parisc_cache_flush_threshold) {
flush_tlb_kernel_range(start, end); flush_tlb_kernel_range(start, end);
......
...@@ -300,7 +300,6 @@ fdoneloop2: ...@@ -300,7 +300,6 @@ fdoneloop2:
fdce,m %arg1(%sr1, %arg0) /* Fdce for one loop */ fdce,m %arg1(%sr1, %arg0) /* Fdce for one loop */
fdsync: fdsync:
syncdma
sync sync
mtsm %r22 /* restore I-bit */ mtsm %r22 /* restore I-bit */
89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP) 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
...@@ -1098,7 +1097,6 @@ ENTRY_CFI(flush_kernel_dcache_range_asm) ...@@ -1098,7 +1097,6 @@ ENTRY_CFI(flush_kernel_dcache_range_asm)
sync sync
89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP) 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
syncdma
bv %r0(%r2) bv %r0(%r2)
nop nop
ENDPROC_CFI(flush_kernel_dcache_range_asm) ENDPROC_CFI(flush_kernel_dcache_range_asm)
...@@ -1140,7 +1138,6 @@ ENTRY_CFI(purge_kernel_dcache_range_asm) ...@@ -1140,7 +1138,6 @@ ENTRY_CFI(purge_kernel_dcache_range_asm)
sync sync
89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP) 89: ALTERNATIVE(88b, 89b, ALT_COND_NO_DCACHE, INSN_NOP)
syncdma
bv %r0(%r2) bv %r0(%r2)
nop nop
ENDPROC_CFI(purge_kernel_dcache_range_asm) ENDPROC_CFI(purge_kernel_dcache_range_asm)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment