Commit 679c8bd3 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Vishal Verma

dax: export a low-level __dax_zero_page_range helper

This allows XFS to perform zeroing using the iomap infrastructure and
avoid buffer heads.
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
[vishal: fix conflicts with dax-error-handling]
Signed-off-by: default avatarVishal Verma <vishal.l.verma@intel.com>
parent 3dc29161
...@@ -947,6 +947,23 @@ int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -947,6 +947,23 @@ int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
} }
EXPORT_SYMBOL_GPL(dax_pfn_mkwrite); EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
unsigned int offset, unsigned int length)
{
struct blk_dax_ctl dax = {
.sector = sector,
.size = PAGE_SIZE,
};
if (dax_map_atomic(bdev, &dax) < 0)
return PTR_ERR(dax.addr);
clear_pmem(dax.addr + offset, length);
wmb_pmem();
dax_unmap_atomic(bdev, &dax);
return 0;
}
EXPORT_SYMBOL_GPL(__dax_zero_page_range);
/** /**
* dax_zero_page_range - zero a range within a page of a DAX file * dax_zero_page_range - zero a range within a page of a DAX file
* @inode: The file being truncated * @inode: The file being truncated
...@@ -982,23 +999,11 @@ int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length, ...@@ -982,23 +999,11 @@ int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
bh.b_bdev = inode->i_sb->s_bdev; bh.b_bdev = inode->i_sb->s_bdev;
bh.b_size = PAGE_SIZE; bh.b_size = PAGE_SIZE;
err = get_block(inode, index, &bh, 0); err = get_block(inode, index, &bh, 0);
if (err < 0) if (err < 0 || !buffer_written(&bh))
return err; return err;
if (buffer_written(&bh)) {
struct block_device *bdev = bh.b_bdev;
struct blk_dax_ctl dax = {
.sector = to_sector(&bh, inode),
.size = PAGE_SIZE,
};
if (dax_map_atomic(bdev, &dax) < 0) return __dax_zero_page_range(bh.b_bdev, to_sector(&bh, inode),
return PTR_ERR(dax.addr); offset, length);
clear_pmem(dax.addr + offset, length);
wmb_pmem();
dax_unmap_atomic(bdev, &dax);
}
return 0;
} }
EXPORT_SYMBOL_GPL(dax_zero_page_range); EXPORT_SYMBOL_GPL(dax_zero_page_range);
......
...@@ -14,12 +14,19 @@ int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); ...@@ -14,12 +14,19 @@ int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
#ifdef CONFIG_FS_DAX #ifdef CONFIG_FS_DAX
struct page *read_dax_sector(struct block_device *bdev, sector_t n); struct page *read_dax_sector(struct block_device *bdev, sector_t n);
int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
unsigned int offset, unsigned int length);
#else #else
static inline struct page *read_dax_sector(struct block_device *bdev, static inline struct page *read_dax_sector(struct block_device *bdev,
sector_t n) sector_t n)
{ {
return ERR_PTR(-ENXIO); return ERR_PTR(-ENXIO);
} }
static inline int __dax_zero_page_range(struct block_device *bdev,
sector_t sector, unsigned int offset, unsigned int length)
{
return -ENXIO;
}
#endif #endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment