Commit 7a9eb206 authored by Dan Williams's avatar Dan Williams

pmem: kill __pmem address space

The __pmem address space was meant to annotate codepaths that touch
persistent memory and need to coordinate a call to wmb_pmem().  Now that
wmb_pmem() is gone, there is little need to keep this annotation.

Cc: Christoph Hellwig <hch@lst.de>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 7c8a6a71
...@@ -395,7 +395,7 @@ prototypes: ...@@ -395,7 +395,7 @@ prototypes:
int (*release) (struct gendisk *, fmode_t); int (*release) (struct gendisk *, fmode_t);
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*direct_access) (struct block_device *, sector_t, void __pmem **, int (*direct_access) (struct block_device *, sector_t, void **,
unsigned long *); unsigned long *);
int (*media_changed) (struct gendisk *); int (*media_changed) (struct gendisk *);
void (*unlock_native_capacity) (struct gendisk *); void (*unlock_native_capacity) (struct gendisk *);
......
...@@ -143,12 +143,12 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio) ...@@ -143,12 +143,12 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
*/ */
static long static long
axon_ram_direct_access(struct block_device *device, sector_t sector, axon_ram_direct_access(struct block_device *device, sector_t sector,
void __pmem **kaddr, pfn_t *pfn, long size) void **kaddr, pfn_t *pfn, long size)
{ {
struct axon_ram_bank *bank = device->bd_disk->private_data; struct axon_ram_bank *bank = device->bd_disk->private_data;
loff_t offset = (loff_t)sector << AXON_RAM_SECTOR_SHIFT; loff_t offset = (loff_t)sector << AXON_RAM_SECTOR_SHIFT;
*kaddr = (void __pmem __force *) bank->io_addr + offset; *kaddr = (void *) bank->io_addr + offset;
*pfn = phys_to_pfn_t(bank->ph_addr + offset, PFN_DEV); *pfn = phys_to_pfn_t(bank->ph_addr + offset, PFN_DEV);
return bank->size - offset; return bank->size - offset;
} }
......
...@@ -28,10 +28,9 @@ ...@@ -28,10 +28,9 @@
* Copy data to persistent memory media via non-temporal stores so that * Copy data to persistent memory media via non-temporal stores so that
* a subsequent pmem driver flush operation will drain posted write queues. * a subsequent pmem driver flush operation will drain posted write queues.
*/ */
static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
size_t n)
{ {
int unwritten; int rem;
/* /*
* We are copying between two kernel buffers, if * We are copying between two kernel buffers, if
...@@ -39,19 +38,17 @@ static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, ...@@ -39,19 +38,17 @@ static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
* fault) we would have already reported a general protection fault * fault) we would have already reported a general protection fault
* before the WARN+BUG. * before the WARN+BUG.
*/ */
unwritten = __copy_from_user_inatomic_nocache((void __force *) dst, rem = __copy_from_user_inatomic_nocache(dst, (void __user *) src, n);
(void __user *) src, n); if (WARN(rem, "%s: fault copying %p <- %p unwritten: %d\n",
if (WARN(unwritten, "%s: fault copying %p <- %p unwritten: %d\n", __func__, dst, src, rem))
__func__, dst, src, unwritten))
BUG(); BUG();
} }
static inline int arch_memcpy_from_pmem(void *dst, const void __pmem *src, static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n)
size_t n)
{ {
if (static_cpu_has(X86_FEATURE_MCE_RECOVERY)) if (static_cpu_has(X86_FEATURE_MCE_RECOVERY))
return memcpy_mcsafe(dst, (void __force *) src, n); return memcpy_mcsafe(dst, src, n);
memcpy(dst, (void __force *) src, n); memcpy(dst, src, n);
return 0; return 0;
} }
...@@ -63,15 +60,14 @@ static inline int arch_memcpy_from_pmem(void *dst, const void __pmem *src, ...@@ -63,15 +60,14 @@ static inline int arch_memcpy_from_pmem(void *dst, const void __pmem *src,
* Write back a cache range using the CLWB (cache line write back) * Write back a cache range using the CLWB (cache line write back)
* instruction. * instruction.
*/ */
static inline void arch_wb_cache_pmem(void __pmem *addr, size_t size) static inline void arch_wb_cache_pmem(void *addr, size_t size)
{ {
u16 x86_clflush_size = boot_cpu_data.x86_clflush_size; u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
unsigned long clflush_mask = x86_clflush_size - 1; unsigned long clflush_mask = x86_clflush_size - 1;
void *vaddr = (void __force *)addr; void *vend = addr + size;
void *vend = vaddr + size;
void *p; void *p;
for (p = (void *)((unsigned long)vaddr & ~clflush_mask); for (p = (void *)((unsigned long)addr & ~clflush_mask);
p < vend; p += x86_clflush_size) p < vend; p += x86_clflush_size)
clwb(p); clwb(p);
} }
...@@ -93,14 +89,13 @@ static inline bool __iter_needs_pmem_wb(struct iov_iter *i) ...@@ -93,14 +89,13 @@ static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
* *
* Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'. * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
*/ */
static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes, static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
struct iov_iter *i) struct iov_iter *i)
{ {
void *vaddr = (void __force *)addr;
size_t len; size_t len;
/* TODO: skip the write-back by always using non-temporal stores */ /* TODO: skip the write-back by always using non-temporal stores */
len = copy_from_iter_nocache(vaddr, bytes, i); len = copy_from_iter_nocache(addr, bytes, i);
if (__iter_needs_pmem_wb(i)) if (__iter_needs_pmem_wb(i))
arch_wb_cache_pmem(addr, bytes); arch_wb_cache_pmem(addr, bytes);
...@@ -115,17 +110,15 @@ static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes, ...@@ -115,17 +110,15 @@ static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
* *
* Write zeros into the memory range starting at 'addr' for 'size' bytes. * Write zeros into the memory range starting at 'addr' for 'size' bytes.
*/ */
static inline void arch_clear_pmem(void __pmem *addr, size_t size) static inline void arch_clear_pmem(void *addr, size_t size)
{ {
void *vaddr = (void __force *)addr; memset(addr, 0, size);
memset(vaddr, 0, size);
arch_wb_cache_pmem(addr, size); arch_wb_cache_pmem(addr, size);
} }
static inline void arch_invalidate_pmem(void __pmem *addr, size_t size) static inline void arch_invalidate_pmem(void *addr, size_t size)
{ {
clflush_cache_range((void __force *) addr, size); clflush_cache_range(addr, size);
} }
#endif /* CONFIG_ARCH_HAS_PMEM_API */ #endif /* CONFIG_ARCH_HAS_PMEM_API */
#endif /* __ASM_X86_PMEM_H__ */ #endif /* __ASM_X86_PMEM_H__ */
...@@ -164,7 +164,7 @@ enum nd_blk_mmio_selector { ...@@ -164,7 +164,7 @@ enum nd_blk_mmio_selector {
struct nd_blk_addr { struct nd_blk_addr {
union { union {
void __iomem *base; void __iomem *base;
void __pmem *aperture; void *aperture;
}; };
}; };
......
...@@ -381,7 +381,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector, ...@@ -381,7 +381,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
#ifdef CONFIG_BLK_DEV_RAM_DAX #ifdef CONFIG_BLK_DEV_RAM_DAX
static long brd_direct_access(struct block_device *bdev, sector_t sector, static long brd_direct_access(struct block_device *bdev, sector_t sector,
void __pmem **kaddr, pfn_t *pfn, long size) void **kaddr, pfn_t *pfn, long size)
{ {
struct brd_device *brd = bdev->bd_disk->private_data; struct brd_device *brd = bdev->bd_disk->private_data;
struct page *page; struct page *page;
...@@ -391,7 +391,7 @@ static long brd_direct_access(struct block_device *bdev, sector_t sector, ...@@ -391,7 +391,7 @@ static long brd_direct_access(struct block_device *bdev, sector_t sector,
page = brd_insert_page(brd, sector); page = brd_insert_page(brd, sector);
if (!page) if (!page)
return -ENOSPC; return -ENOSPC;
*kaddr = (void __pmem *)page_address(page); *kaddr = page_address(page);
*pfn = page_to_pfn_t(page); *pfn = page_to_pfn_t(page);
return PAGE_SIZE; return PAGE_SIZE;
......
...@@ -74,7 +74,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, ...@@ -74,7 +74,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
bool bad_pmem = false; bool bad_pmem = false;
void *mem = kmap_atomic(page); void *mem = kmap_atomic(page);
phys_addr_t pmem_off = sector * 512 + pmem->data_offset; phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
void __pmem *pmem_addr = pmem->virt_addr + pmem_off; void *pmem_addr = pmem->virt_addr + pmem_off;
if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
bad_pmem = true; bad_pmem = true;
...@@ -173,7 +173,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector, ...@@ -173,7 +173,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */ /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
__weak long pmem_direct_access(struct block_device *bdev, sector_t sector, __weak long pmem_direct_access(struct block_device *bdev, sector_t sector,
void __pmem **kaddr, pfn_t *pfn, long size) void **kaddr, pfn_t *pfn, long size)
{ {
struct pmem_device *pmem = bdev->bd_queue->queuedata; struct pmem_device *pmem = bdev->bd_queue->queuedata;
resource_size_t offset = sector * 512 + pmem->data_offset; resource_size_t offset = sector * 512 + pmem->data_offset;
...@@ -284,7 +284,7 @@ static int pmem_attach_disk(struct device *dev, ...@@ -284,7 +284,7 @@ static int pmem_attach_disk(struct device *dev,
if (IS_ERR(addr)) if (IS_ERR(addr))
return PTR_ERR(addr); return PTR_ERR(addr);
pmem->virt_addr = (void __pmem *) addr; pmem->virt_addr = addr;
blk_queue_write_cache(q, true, true); blk_queue_write_cache(q, true, true);
blk_queue_make_request(q, pmem_make_request); blk_queue_make_request(q, pmem_make_request);
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#include <linux/fs.h> #include <linux/fs.h>
long pmem_direct_access(struct block_device *bdev, sector_t sector, long pmem_direct_access(struct block_device *bdev, sector_t sector,
void __pmem **kaddr, pfn_t *pfn, long size); void **kaddr, pfn_t *pfn, long size);
/* this definition is in it's own header for tools/testing/nvdimm to consume */ /* this definition is in it's own header for tools/testing/nvdimm to consume */
struct pmem_device { struct pmem_device {
/* One contiguous memory region per device */ /* One contiguous memory region per device */
...@@ -14,7 +14,7 @@ struct pmem_device { ...@@ -14,7 +14,7 @@ struct pmem_device {
/* when non-zero this device is hosting a 'pfn' instance */ /* when non-zero this device is hosting a 'pfn' instance */
phys_addr_t data_offset; phys_addr_t data_offset;
u64 pfn_flags; u64 pfn_flags;
void __pmem *virt_addr; void *virt_addr;
/* immutable base size of the namespace */ /* immutable base size of the namespace */
size_t size; size_t size;
/* trim size when namespace capacity has been section aligned */ /* trim size when namespace capacity has been section aligned */
......
...@@ -31,7 +31,7 @@ static void dcssblk_release(struct gendisk *disk, fmode_t mode); ...@@ -31,7 +31,7 @@ static void dcssblk_release(struct gendisk *disk, fmode_t mode);
static blk_qc_t dcssblk_make_request(struct request_queue *q, static blk_qc_t dcssblk_make_request(struct request_queue *q,
struct bio *bio); struct bio *bio);
static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum, static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
void __pmem **kaddr, pfn_t *pfn, long size); void **kaddr, pfn_t *pfn, long size);
static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0"; static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
...@@ -884,7 +884,7 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio) ...@@ -884,7 +884,7 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
static long static long
dcssblk_direct_access (struct block_device *bdev, sector_t secnum, dcssblk_direct_access (struct block_device *bdev, sector_t secnum,
void __pmem **kaddr, pfn_t *pfn, long size) void **kaddr, pfn_t *pfn, long size)
{ {
struct dcssblk_dev_info *dev_info; struct dcssblk_dev_info *dev_info;
unsigned long offset, dev_sz; unsigned long offset, dev_sz;
...@@ -894,7 +894,7 @@ dcssblk_direct_access (struct block_device *bdev, sector_t secnum, ...@@ -894,7 +894,7 @@ dcssblk_direct_access (struct block_device *bdev, sector_t secnum,
return -ENODEV; return -ENODEV;
dev_sz = dev_info->end - dev_info->start; dev_sz = dev_info->end - dev_info->start;
offset = secnum * 512; offset = secnum * 512;
*kaddr = (void __pmem *) (dev_info->start + offset); *kaddr = (void *) dev_info->start + offset;
*pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), PFN_DEV); *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), PFN_DEV);
return dev_sz - offset; return dev_sz - offset;
......
...@@ -75,13 +75,13 @@ static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax) ...@@ -75,13 +75,13 @@ static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
struct request_queue *q = bdev->bd_queue; struct request_queue *q = bdev->bd_queue;
long rc = -EIO; long rc = -EIO;
dax->addr = (void __pmem *) ERR_PTR(-EIO); dax->addr = ERR_PTR(-EIO);
if (blk_queue_enter(q, true) != 0) if (blk_queue_enter(q, true) != 0)
return rc; return rc;
rc = bdev_direct_access(bdev, dax); rc = bdev_direct_access(bdev, dax);
if (rc < 0) { if (rc < 0) {
dax->addr = (void __pmem *) ERR_PTR(rc); dax->addr = ERR_PTR(rc);
blk_queue_exit(q); blk_queue_exit(q);
return rc; return rc;
} }
...@@ -152,7 +152,7 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter, ...@@ -152,7 +152,7 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
int rw = iov_iter_rw(iter), rc; int rw = iov_iter_rw(iter), rc;
long map_len = 0; long map_len = 0;
struct blk_dax_ctl dax = { struct blk_dax_ctl dax = {
.addr = (void __pmem *) ERR_PTR(-EIO), .addr = ERR_PTR(-EIO),
}; };
unsigned blkbits = inode->i_blkbits; unsigned blkbits = inode->i_blkbits;
sector_t file_blks = (i_size_read(inode) + (1 << blkbits) - 1) sector_t file_blks = (i_size_read(inode) + (1 << blkbits) - 1)
......
...@@ -1659,7 +1659,7 @@ static inline bool integrity_req_gap_front_merge(struct request *req, ...@@ -1659,7 +1659,7 @@ static inline bool integrity_req_gap_front_merge(struct request *req,
*/ */
struct blk_dax_ctl { struct blk_dax_ctl {
sector_t sector; sector_t sector;
void __pmem *addr; void *addr;
long size; long size;
pfn_t pfn; pfn_t pfn;
}; };
...@@ -1670,8 +1670,8 @@ struct block_device_operations { ...@@ -1670,8 +1670,8 @@ struct block_device_operations {
int (*rw_page)(struct block_device *, sector_t, struct page *, int rw); int (*rw_page)(struct block_device *, sector_t, struct page *, int rw);
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
long (*direct_access)(struct block_device *, sector_t, void __pmem **, long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *,
pfn_t *, long); long);
unsigned int (*check_events) (struct gendisk *disk, unsigned int (*check_events) (struct gendisk *disk,
unsigned int clearing); unsigned int clearing);
/* ->media_changed() is DEPRECATED, use ->check_events() instead */ /* ->media_changed() is DEPRECATED, use ->check_events() instead */
......
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
# define __release(x) __context__(x,-1) # define __release(x) __context__(x,-1)
# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
# define __percpu __attribute__((noderef, address_space(3))) # define __percpu __attribute__((noderef, address_space(3)))
# define __pmem __attribute__((noderef, address_space(5)))
#ifdef CONFIG_SPARSE_RCU_POINTER #ifdef CONFIG_SPARSE_RCU_POINTER
# define __rcu __attribute__((noderef, address_space(4))) # define __rcu __attribute__((noderef, address_space(4)))
#else /* CONFIG_SPARSE_RCU_POINTER */ #else /* CONFIG_SPARSE_RCU_POINTER */
...@@ -45,7 +44,6 @@ extern void __chk_io_ptr(const volatile void __iomem *); ...@@ -45,7 +44,6 @@ extern void __chk_io_ptr(const volatile void __iomem *);
# define __cond_lock(x,c) (c) # define __cond_lock(x,c) (c)
# define __percpu # define __percpu
# define __rcu # define __rcu
# define __pmem
# define __private # define __private
# define ACCESS_PRIVATE(p, member) ((p)->member) # define ACCESS_PRIVATE(p, member) ((p)->member)
#endif /* __CHECKER__ */ #endif /* __CHECKER__ */
......
...@@ -68,7 +68,7 @@ struct nd_namespace_io { ...@@ -68,7 +68,7 @@ struct nd_namespace_io {
struct nd_namespace_common common; struct nd_namespace_common common;
struct resource res; struct resource res;
resource_size_t size; resource_size_t size;
void __pmem *addr; void *addr;
struct badblocks bb; struct badblocks bb;
}; };
......
...@@ -26,37 +26,35 @@ ...@@ -26,37 +26,35 @@
* calling these symbols with arch_has_pmem_api() and redirect to the * calling these symbols with arch_has_pmem_api() and redirect to the
* implementation in asm/pmem.h. * implementation in asm/pmem.h.
*/ */
static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src, static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
size_t n)
{ {
BUG(); BUG();
} }
static inline int arch_memcpy_from_pmem(void *dst, const void __pmem *src, static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n)
size_t n)
{ {
BUG(); BUG();
return -EFAULT; return -EFAULT;
} }
static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes, static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
struct iov_iter *i) struct iov_iter *i)
{ {
BUG(); BUG();
return 0; return 0;
} }
static inline void arch_clear_pmem(void __pmem *addr, size_t size) static inline void arch_clear_pmem(void *addr, size_t size)
{ {
BUG(); BUG();
} }
static inline void arch_wb_cache_pmem(void __pmem *addr, size_t size) static inline void arch_wb_cache_pmem(void *addr, size_t size)
{ {
BUG(); BUG();
} }
static inline void arch_invalidate_pmem(void __pmem *addr, size_t size) static inline void arch_invalidate_pmem(void *addr, size_t size)
{ {
BUG(); BUG();
} }
...@@ -67,13 +65,6 @@ static inline bool arch_has_pmem_api(void) ...@@ -67,13 +65,6 @@ static inline bool arch_has_pmem_api(void)
return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API); return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API);
} }
static inline int default_memcpy_from_pmem(void *dst, void __pmem const *src,
size_t size)
{
memcpy(dst, (void __force *) src, size);
return 0;
}
/* /*
* memcpy_from_pmem - read from persistent memory with error handling * memcpy_from_pmem - read from persistent memory with error handling
* @dst: destination buffer * @dst: destination buffer
...@@ -82,40 +73,13 @@ static inline int default_memcpy_from_pmem(void *dst, void __pmem const *src, ...@@ -82,40 +73,13 @@ static inline int default_memcpy_from_pmem(void *dst, void __pmem const *src,
* *
* Returns 0 on success negative error code on failure. * Returns 0 on success negative error code on failure.
*/ */
static inline int memcpy_from_pmem(void *dst, void __pmem const *src, static inline int memcpy_from_pmem(void *dst, void const *src, size_t size)
size_t size)
{ {
if (arch_has_pmem_api()) if (arch_has_pmem_api())
return arch_memcpy_from_pmem(dst, src, size); return arch_memcpy_from_pmem(dst, src, size);
else else
return default_memcpy_from_pmem(dst, src, size); memcpy(dst, src, size);
} return 0;
/*
* These defaults seek to offer decent performance and minimize the
* window between i/o completion and writes being durable on media.
* However, it is undefined / architecture specific whether
* ARCH_MEMREMAP_PMEM + default_memcpy_to_pmem is sufficient for
* making data durable relative to i/o completion.
*/
static inline void default_memcpy_to_pmem(void __pmem *dst, const void *src,
size_t size)
{
memcpy((void __force *) dst, src, size);
}
static inline size_t default_copy_from_iter_pmem(void __pmem *addr,
size_t bytes, struct iov_iter *i)
{
return copy_from_iter_nocache((void __force *)addr, bytes, i);
}
static inline void default_clear_pmem(void __pmem *addr, size_t size)
{
if (size == PAGE_SIZE && ((unsigned long)addr & ~PAGE_MASK) == 0)
clear_page((void __force *)addr);
else
memset((void __force *)addr, 0, size);
} }
/** /**
...@@ -130,12 +94,12 @@ static inline void default_clear_pmem(void __pmem *addr, size_t size) ...@@ -130,12 +94,12 @@ static inline void default_clear_pmem(void __pmem *addr, size_t size)
* data may still reside in cpu or platform buffers, so this operation * data may still reside in cpu or platform buffers, so this operation
* must be followed by a blkdev_issue_flush() on the pmem block device. * must be followed by a blkdev_issue_flush() on the pmem block device.
*/ */
static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n) static inline void memcpy_to_pmem(void *dst, const void *src, size_t n)
{ {
if (arch_has_pmem_api()) if (arch_has_pmem_api())
arch_memcpy_to_pmem(dst, src, n); arch_memcpy_to_pmem(dst, src, n);
else else
default_memcpy_to_pmem(dst, src, n); memcpy(dst, src, n);
} }
/** /**
...@@ -147,12 +111,12 @@ static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n) ...@@ -147,12 +111,12 @@ static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n)
* Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'. * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
* See blkdev_issue_flush() note for memcpy_to_pmem(). * See blkdev_issue_flush() note for memcpy_to_pmem().
*/ */
static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes, static inline size_t copy_from_iter_pmem(void *addr, size_t bytes,
struct iov_iter *i) struct iov_iter *i)
{ {
if (arch_has_pmem_api()) if (arch_has_pmem_api())
return arch_copy_from_iter_pmem(addr, bytes, i); return arch_copy_from_iter_pmem(addr, bytes, i);
return default_copy_from_iter_pmem(addr, bytes, i); return copy_from_iter_nocache(addr, bytes, i);
} }
/** /**
...@@ -163,12 +127,12 @@ static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes, ...@@ -163,12 +127,12 @@ static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes,
* Write zeros into the memory range starting at 'addr' for 'size' bytes. * Write zeros into the memory range starting at 'addr' for 'size' bytes.
* See blkdev_issue_flush() note for memcpy_to_pmem(). * See blkdev_issue_flush() note for memcpy_to_pmem().
*/ */
static inline void clear_pmem(void __pmem *addr, size_t size) static inline void clear_pmem(void *addr, size_t size)
{ {
if (arch_has_pmem_api()) if (arch_has_pmem_api())
arch_clear_pmem(addr, size); arch_clear_pmem(addr, size);
else else
default_clear_pmem(addr, size); memset(addr, 0, size);
} }
/** /**
...@@ -179,7 +143,7 @@ static inline void clear_pmem(void __pmem *addr, size_t size) ...@@ -179,7 +143,7 @@ static inline void clear_pmem(void __pmem *addr, size_t size)
* For platforms that support clearing poison this flushes any poisoned * For platforms that support clearing poison this flushes any poisoned
* ranges out of the cache * ranges out of the cache
*/ */
static inline void invalidate_pmem(void __pmem *addr, size_t size) static inline void invalidate_pmem(void *addr, size_t size)
{ {
if (arch_has_pmem_api()) if (arch_has_pmem_api())
arch_invalidate_pmem(addr, size); arch_invalidate_pmem(addr, size);
...@@ -193,7 +157,7 @@ static inline void invalidate_pmem(void __pmem *addr, size_t size) ...@@ -193,7 +157,7 @@ static inline void invalidate_pmem(void __pmem *addr, size_t size)
* Write back the processor cache range starting at 'addr' for 'size' bytes. * Write back the processor cache range starting at 'addr' for 'size' bytes.
* See blkdev_issue_flush() note for memcpy_to_pmem(). * See blkdev_issue_flush() note for memcpy_to_pmem().
*/ */
static inline void wb_cache_pmem(void __pmem *addr, size_t size) static inline void wb_cache_pmem(void *addr, size_t size)
{ {
if (arch_has_pmem_api()) if (arch_has_pmem_api())
arch_wb_cache_pmem(addr, size); arch_wb_cache_pmem(addr, size);
......
...@@ -313,7 +313,6 @@ our $Sparse = qr{ ...@@ -313,7 +313,6 @@ our $Sparse = qr{
__kernel| __kernel|
__force| __force|
__iomem| __iomem|
__pmem|
__must_check| __must_check|
__init_refok| __init_refok|
__kprobes| __kprobes|
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#include <nd.h> #include <nd.h>
long pmem_direct_access(struct block_device *bdev, sector_t sector, long pmem_direct_access(struct block_device *bdev, sector_t sector,
void __pmem **kaddr, pfn_t *pfn, long size) void **kaddr, pfn_t *pfn, long size)
{ {
struct pmem_device *pmem = bdev->bd_queue->queuedata; struct pmem_device *pmem = bdev->bd_queue->queuedata;
resource_size_t offset = sector * 512 + pmem->data_offset; resource_size_t offset = sector * 512 + pmem->data_offset;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment