Commit d3580427 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/pmem: Add flush routines using new pmem store and sync instruction

Start using dcbstps; phwsync; sequence for flushing persistent memory range.
The new instructions are implemented as a variant of dcbf and hwsync and on
P8 and P9 they will be executed as those instructions. We avoid using them on
older hardware. This helps to avoid difficult to debug bugs.
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200701072235.223558-4-aneesh.kumar@linux.ibm.com
parent 32db09d9
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/cputable.h> #include <asm/cputable.h>
#include <asm/cpu_has_feature.h>
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
/* /*
......
...@@ -9,20 +9,62 @@ ...@@ -9,20 +9,62 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
static inline void __clean_pmem_range(unsigned long start, unsigned long stop)
{
unsigned long shift = l1_dcache_shift();
unsigned long bytes = l1_dcache_bytes();
void *addr = (void *)(start & ~(bytes - 1));
unsigned long size = stop - (unsigned long)addr + (bytes - 1);
unsigned long i;
for (i = 0; i < size >> shift; i++, addr += bytes)
asm volatile(PPC_DCBSTPS(%0, %1): :"i"(0), "r"(addr): "memory");
asm volatile(PPC_PHWSYNC ::: "memory");
}
static inline void __flush_pmem_range(unsigned long start, unsigned long stop)
{
unsigned long shift = l1_dcache_shift();
unsigned long bytes = l1_dcache_bytes();
void *addr = (void *)(start & ~(bytes - 1));
unsigned long size = stop - (unsigned long)addr + (bytes - 1);
unsigned long i;
for (i = 0; i < size >> shift; i++, addr += bytes)
asm volatile(PPC_DCBFPS(%0, %1): :"i"(0), "r"(addr): "memory");
asm volatile(PPC_PHWSYNC ::: "memory");
}
static inline void clean_pmem_range(unsigned long start, unsigned long stop)
{
if (cpu_has_feature(CPU_FTR_ARCH_207S))
return __clean_pmem_range(start, stop);
}
static inline void flush_pmem_range(unsigned long start, unsigned long stop)
{
if (cpu_has_feature(CPU_FTR_ARCH_207S))
return __flush_pmem_range(start, stop);
}
/* /*
* CONFIG_ARCH_HAS_PMEM_API symbols * CONFIG_ARCH_HAS_PMEM_API symbols
*/ */
void arch_wb_cache_pmem(void *addr, size_t size) void arch_wb_cache_pmem(void *addr, size_t size)
{ {
unsigned long start = (unsigned long) addr; unsigned long start = (unsigned long) addr;
flush_dcache_range(start, start + size); clean_pmem_range(start, start + size);
} }
EXPORT_SYMBOL_GPL(arch_wb_cache_pmem); EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
void arch_invalidate_pmem(void *addr, size_t size) void arch_invalidate_pmem(void *addr, size_t size)
{ {
unsigned long start = (unsigned long) addr; unsigned long start = (unsigned long) addr;
flush_dcache_range(start, start + size); flush_pmem_range(start, start + size);
} }
EXPORT_SYMBOL_GPL(arch_invalidate_pmem); EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
...@@ -35,7 +77,7 @@ long __copy_from_user_flushcache(void *dest, const void __user *src, ...@@ -35,7 +77,7 @@ long __copy_from_user_flushcache(void *dest, const void __user *src,
unsigned long copied, start = (unsigned long) dest; unsigned long copied, start = (unsigned long) dest;
copied = __copy_from_user(dest, src, size); copied = __copy_from_user(dest, src, size);
flush_dcache_range(start, start + size); clean_pmem_range(start, start + size);
return copied; return copied;
} }
...@@ -45,7 +87,7 @@ void *memcpy_flushcache(void *dest, const void *src, size_t size) ...@@ -45,7 +87,7 @@ void *memcpy_flushcache(void *dest, const void *src, size_t size)
unsigned long start = (unsigned long) dest; unsigned long start = (unsigned long) dest;
memcpy(dest, src, size); memcpy(dest, src, size);
flush_dcache_range(start, start + size); clean_pmem_range(start, start + size);
return dest; return dest;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment