Commit 7c8a6a71 authored by Dan Williams's avatar Dan Williams

pmem: kill wmb_pmem()

All users have been replaced with flushing in the pmem driver.

Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 91131dbd
...@@ -26,8 +26,7 @@ ...@@ -26,8 +26,7 @@
* @n: length of the copy in bytes * @n: length of the copy in bytes
* *
* Copy data to persistent memory media via non-temporal stores so that * Copy data to persistent memory media via non-temporal stores so that
* a subsequent arch_wmb_pmem() can flush cpu and memory controller * a subsequent pmem driver flush operation will drain posted write queues.
* write buffers to guarantee durability.
*/ */
static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
size_t n) size_t n)
...@@ -56,33 +55,13 @@ static inline int arch_memcpy_from_pmem(void *dst, const void __pmem *src, ...@@ -56,33 +55,13 @@ static inline int arch_memcpy_from_pmem(void *dst, const void __pmem *src,
return 0; return 0;
} }
/**
* arch_wmb_pmem - synchronize writes to persistent memory
*
* After a series of arch_memcpy_to_pmem() operations this drains data
* from cpu write buffers and any platform (memory controller) buffers
* to ensure that written data is durable on persistent memory media.
*/
static inline void arch_wmb_pmem(void)
{
/*
* wmb() to 'sfence' all previous writes such that they are
* architecturally visible to 'pcommit'. Note, that we've
* already arranged for pmem writes to avoid the cache via
* arch_memcpy_to_pmem().
*/
wmb();
pcommit_sfence();
}
/** /**
* arch_wb_cache_pmem - write back a cache range with CLWB * arch_wb_cache_pmem - write back a cache range with CLWB
* @vaddr: virtual start address * @vaddr: virtual start address
* @size: number of bytes to write back * @size: number of bytes to write back
* *
* Write back a cache range using the CLWB (cache line write back) * Write back a cache range using the CLWB (cache line write back)
* instruction. This function requires explicit ordering with an * instruction.
* arch_wmb_pmem() call.
*/ */
static inline void arch_wb_cache_pmem(void __pmem *addr, size_t size) static inline void arch_wb_cache_pmem(void __pmem *addr, size_t size)
{ {
...@@ -113,7 +92,6 @@ static inline bool __iter_needs_pmem_wb(struct iov_iter *i) ...@@ -113,7 +92,6 @@ static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
* @i: iterator with source data * @i: iterator with source data
* *
* Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'. * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
* This function requires explicit ordering with an arch_wmb_pmem() call.
*/ */
static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes, static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
struct iov_iter *i) struct iov_iter *i)
...@@ -136,7 +114,6 @@ static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes, ...@@ -136,7 +114,6 @@ static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
* @size: number of bytes to zero * @size: number of bytes to zero
* *
* Write zeros into the memory range starting at 'addr' for 'size' bytes. * Write zeros into the memory range starting at 'addr' for 'size' bytes.
* This function requires explicit ordering with an arch_wmb_pmem() call.
*/ */
static inline void arch_clear_pmem(void __pmem *addr, size_t size) static inline void arch_clear_pmem(void __pmem *addr, size_t size)
{ {
...@@ -150,14 +127,5 @@ static inline void arch_invalidate_pmem(void __pmem *addr, size_t size) ...@@ -150,14 +127,5 @@ static inline void arch_invalidate_pmem(void __pmem *addr, size_t size)
{ {
clflush_cache_range((void __force *) addr, size); clflush_cache_range((void __force *) addr, size);
} }
static inline bool __arch_has_wmb_pmem(void)
{
/*
* We require that wmb() be an 'sfence', that is only guaranteed on
* 64-bit builds
*/
return static_cpu_has(X86_FEATURE_PCOMMIT);
}
#endif /* CONFIG_ARCH_HAS_PMEM_API */ #endif /* CONFIG_ARCH_HAS_PMEM_API */
#endif /* __ASM_X86_PMEM_H__ */ #endif /* __ASM_X86_PMEM_H__ */
...@@ -26,16 +26,6 @@ ...@@ -26,16 +26,6 @@
* calling these symbols with arch_has_pmem_api() and redirect to the * calling these symbols with arch_has_pmem_api() and redirect to the
* implementation in asm/pmem.h. * implementation in asm/pmem.h.
*/ */
static inline bool __arch_has_wmb_pmem(void)
{
return false;
}
static inline void arch_wmb_pmem(void)
{
BUG();
}
static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
size_t n) size_t n)
{ {
...@@ -101,20 +91,6 @@ static inline int memcpy_from_pmem(void *dst, void __pmem const *src, ...@@ -101,20 +91,6 @@ static inline int memcpy_from_pmem(void *dst, void __pmem const *src,
return default_memcpy_from_pmem(dst, src, size); return default_memcpy_from_pmem(dst, src, size);
} }
/**
* arch_has_wmb_pmem - true if wmb_pmem() ensures durability
*
* For a given cpu implementation within an architecture it is possible
* that wmb_pmem() resolves to a nop. In the case this returns
* false, pmem api users are unable to ensure durability and may want to
* fall back to a different data consistency model, or otherwise notify
* the user.
*/
static inline bool arch_has_wmb_pmem(void)
{
return arch_has_pmem_api() && __arch_has_wmb_pmem();
}
/* /*
* These defaults seek to offer decent performance and minimize the * These defaults seek to offer decent performance and minimize the
* window between i/o completion and writes being durable on media. * window between i/o completion and writes being durable on media.
...@@ -152,7 +128,7 @@ static inline void default_clear_pmem(void __pmem *addr, size_t size) ...@@ -152,7 +128,7 @@ static inline void default_clear_pmem(void __pmem *addr, size_t size)
* being effectively evicted from, or never written to, the processor * being effectively evicted from, or never written to, the processor
* cache hierarchy after the copy completes. After memcpy_to_pmem() * cache hierarchy after the copy completes. After memcpy_to_pmem()
* data may still reside in cpu or platform buffers, so this operation * data may still reside in cpu or platform buffers, so this operation
* must be followed by a wmb_pmem(). * must be followed by a blkdev_issue_flush() on the pmem block device.
*/ */
static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n) static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n)
{ {
...@@ -162,21 +138,6 @@ static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n) ...@@ -162,21 +138,6 @@ static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n)
default_memcpy_to_pmem(dst, src, n); default_memcpy_to_pmem(dst, src, n);
} }
/**
* wmb_pmem - synchronize writes to persistent memory
*
* After a series of memcpy_to_pmem() operations this drains data from
* cpu write buffers and any platform (memory controller) buffers to
* ensure that written data is durable on persistent memory media.
*/
static inline void wmb_pmem(void)
{
if (arch_has_wmb_pmem())
arch_wmb_pmem();
else
wmb();
}
/** /**
* copy_from_iter_pmem - copy data from an iterator to PMEM * copy_from_iter_pmem - copy data from an iterator to PMEM
* @addr: PMEM destination address * @addr: PMEM destination address
...@@ -184,7 +145,7 @@ static inline void wmb_pmem(void) ...@@ -184,7 +145,7 @@ static inline void wmb_pmem(void)
* @i: iterator with source data * @i: iterator with source data
* *
* Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'. * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
* This function requires explicit ordering with a wmb_pmem() call. * See blkdev_issue_flush() note for memcpy_to_pmem().
*/ */
static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes, static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes,
struct iov_iter *i) struct iov_iter *i)
...@@ -200,7 +161,7 @@ static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes, ...@@ -200,7 +161,7 @@ static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes,
* @size: number of bytes to zero * @size: number of bytes to zero
* *
* Write zeros into the memory range starting at 'addr' for 'size' bytes. * Write zeros into the memory range starting at 'addr' for 'size' bytes.
* This function requires explicit ordering with a wmb_pmem() call. * See blkdev_issue_flush() note for memcpy_to_pmem().
*/ */
static inline void clear_pmem(void __pmem *addr, size_t size) static inline void clear_pmem(void __pmem *addr, size_t size)
{ {
...@@ -230,7 +191,7 @@ static inline void invalidate_pmem(void __pmem *addr, size_t size) ...@@ -230,7 +191,7 @@ static inline void invalidate_pmem(void __pmem *addr, size_t size)
* @size: number of bytes to write back * @size: number of bytes to write back
* *
* Write back the processor cache range starting at 'addr' for 'size' bytes. * Write back the processor cache range starting at 'addr' for 'size' bytes.
* This function requires explicit ordering with a wmb_pmem() call. * See blkdev_issue_flush() note for memcpy_to_pmem().
*/ */
static inline void wb_cache_pmem(void __pmem *addr, size_t size) static inline void wb_cache_pmem(void __pmem *addr, size_t size)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment