Commit 11c59c92 authored by Ross Zwisler's avatar Ross Zwisler Committed by Dave Chinner

dax: correct dax iomap code namespace

The recently added DAX functions that use the new struct iomap data
structure were named iomap_dax_rw(), iomap_dax_fault() and
iomap_dax_actor().  These are actually defined in fs/dax.c, though, so
should be part of the "dax" namespace and not the "iomap" namespace.
Rename them to dax_iomap_rw(), dax_iomap_fault() and dax_iomap_actor()
respectively.
Signed-off-by: default avatarRoss Zwisler <ross.zwisler@linux.intel.com>
Suggested-by: default avatarDave Chinner <david@fromorbit.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarDave Chinner <david@fromorbit.com>
parent b9fde046
...@@ -1031,7 +1031,7 @@ EXPORT_SYMBOL_GPL(dax_truncate_page); ...@@ -1031,7 +1031,7 @@ EXPORT_SYMBOL_GPL(dax_truncate_page);
#ifdef CONFIG_FS_IOMAP #ifdef CONFIG_FS_IOMAP
static loff_t static loff_t
iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data, dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
struct iomap *iomap) struct iomap *iomap)
{ {
struct iov_iter *iter = data; struct iov_iter *iter = data;
...@@ -1088,7 +1088,7 @@ iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data, ...@@ -1088,7 +1088,7 @@ iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
} }
/** /**
* iomap_dax_rw - Perform I/O to a DAX file * dax_iomap_rw - Perform I/O to a DAX file
* @iocb: The control block for this I/O * @iocb: The control block for this I/O
* @iter: The addresses to do I/O from or to * @iter: The addresses to do I/O from or to
* @ops: iomap ops passed from the file system * @ops: iomap ops passed from the file system
...@@ -1098,7 +1098,7 @@ iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data, ...@@ -1098,7 +1098,7 @@ iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
* and evicting any page cache pages in the region under I/O. * and evicting any page cache pages in the region under I/O.
*/ */
ssize_t ssize_t
iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter, dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
struct iomap_ops *ops) struct iomap_ops *ops)
{ {
struct address_space *mapping = iocb->ki_filp->f_mapping; struct address_space *mapping = iocb->ki_filp->f_mapping;
...@@ -1128,7 +1128,7 @@ iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter, ...@@ -1128,7 +1128,7 @@ iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter,
while (iov_iter_count(iter)) { while (iov_iter_count(iter)) {
ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
iter, iomap_dax_actor); iter, dax_iomap_actor);
if (ret <= 0) if (ret <= 0)
break; break;
pos += ret; pos += ret;
...@@ -1138,10 +1138,10 @@ iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter, ...@@ -1138,10 +1138,10 @@ iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter,
iocb->ki_pos += done; iocb->ki_pos += done;
return done ? done : ret; return done ? done : ret;
} }
EXPORT_SYMBOL_GPL(iomap_dax_rw); EXPORT_SYMBOL_GPL(dax_iomap_rw);
/** /**
* iomap_dax_fault - handle a page fault on a DAX file * dax_iomap_fault - handle a page fault on a DAX file
* @vma: The virtual memory area where the fault occurred * @vma: The virtual memory area where the fault occurred
* @vmf: The description of the fault * @vmf: The description of the fault
* @ops: iomap ops passed from the file system * @ops: iomap ops passed from the file system
...@@ -1150,7 +1150,7 @@ EXPORT_SYMBOL_GPL(iomap_dax_rw); ...@@ -1150,7 +1150,7 @@ EXPORT_SYMBOL_GPL(iomap_dax_rw);
* or mkwrite handler for DAX files. Assumes the caller has done all the * or mkwrite handler for DAX files. Assumes the caller has done all the
* necessary locking for the page fault to proceed successfully. * necessary locking for the page fault to proceed successfully.
*/ */
int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
struct iomap_ops *ops) struct iomap_ops *ops)
{ {
struct address_space *mapping = vma->vm_file->f_mapping; struct address_space *mapping = vma->vm_file->f_mapping;
...@@ -1252,5 +1252,5 @@ int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, ...@@ -1252,5 +1252,5 @@ int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
return VM_FAULT_SIGBUS | major; return VM_FAULT_SIGBUS | major;
return VM_FAULT_NOPAGE | major; return VM_FAULT_NOPAGE | major;
} }
EXPORT_SYMBOL_GPL(iomap_dax_fault); EXPORT_SYMBOL_GPL(dax_iomap_fault);
#endif /* CONFIG_FS_IOMAP */ #endif /* CONFIG_FS_IOMAP */
...@@ -38,7 +38,7 @@ static ssize_t ext2_dax_read_iter(struct kiocb *iocb, struct iov_iter *to) ...@@ -38,7 +38,7 @@ static ssize_t ext2_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
return 0; /* skip atime */ return 0; /* skip atime */
inode_lock_shared(inode); inode_lock_shared(inode);
ret = iomap_dax_rw(iocb, to, &ext2_iomap_ops); ret = dax_iomap_rw(iocb, to, &ext2_iomap_ops);
inode_unlock_shared(inode); inode_unlock_shared(inode);
file_accessed(iocb->ki_filp); file_accessed(iocb->ki_filp);
...@@ -62,7 +62,7 @@ static ssize_t ext2_dax_write_iter(struct kiocb *iocb, struct iov_iter *from) ...@@ -62,7 +62,7 @@ static ssize_t ext2_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (ret) if (ret)
goto out_unlock; goto out_unlock;
ret = iomap_dax_rw(iocb, from, &ext2_iomap_ops); ret = dax_iomap_rw(iocb, from, &ext2_iomap_ops);
if (ret > 0 && iocb->ki_pos > i_size_read(inode)) { if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
i_size_write(inode, iocb->ki_pos); i_size_write(inode, iocb->ki_pos);
mark_inode_dirty(inode); mark_inode_dirty(inode);
...@@ -99,7 +99,7 @@ static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -99,7 +99,7 @@ static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
} }
down_read(&ei->dax_sem); down_read(&ei->dax_sem);
ret = iomap_dax_fault(vma, vmf, &ext2_iomap_ops); ret = dax_iomap_fault(vma, vmf, &ext2_iomap_ops);
up_read(&ei->dax_sem); up_read(&ei->dax_sem);
if (vmf->flags & FAULT_FLAG_WRITE) if (vmf->flags & FAULT_FLAG_WRITE)
......
...@@ -344,7 +344,7 @@ xfs_file_dax_read( ...@@ -344,7 +344,7 @@ xfs_file_dax_read(
return 0; /* skip atime */ return 0; /* skip atime */
xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
ret = iomap_dax_rw(iocb, to, &xfs_iomap_ops); ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
file_accessed(iocb->ki_filp); file_accessed(iocb->ki_filp);
...@@ -691,7 +691,7 @@ xfs_file_dax_write( ...@@ -691,7 +691,7 @@ xfs_file_dax_write(
trace_xfs_file_dax_write(ip, count, pos); trace_xfs_file_dax_write(ip, count, pos);
ret = iomap_dax_rw(iocb, from, &xfs_iomap_ops); ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops);
if (ret > 0 && iocb->ki_pos > i_size_read(inode)) { if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
i_size_write(inode, iocb->ki_pos); i_size_write(inode, iocb->ki_pos);
error = xfs_setfilesize(ip, pos, ret); error = xfs_setfilesize(ip, pos, ret);
...@@ -1640,7 +1640,7 @@ xfs_filemap_page_mkwrite( ...@@ -1640,7 +1640,7 @@ xfs_filemap_page_mkwrite(
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
if (IS_DAX(inode)) { if (IS_DAX(inode)) {
ret = iomap_dax_fault(vma, vmf, &xfs_iomap_ops); ret = dax_iomap_fault(vma, vmf, &xfs_iomap_ops);
} else { } else {
ret = iomap_page_mkwrite(vma, vmf, &xfs_iomap_ops); ret = iomap_page_mkwrite(vma, vmf, &xfs_iomap_ops);
ret = block_page_mkwrite_return(ret); ret = block_page_mkwrite_return(ret);
...@@ -1674,7 +1674,7 @@ xfs_filemap_fault( ...@@ -1674,7 +1674,7 @@ xfs_filemap_fault(
* changes to xfs_get_blocks_direct() to map unwritten extent * changes to xfs_get_blocks_direct() to map unwritten extent
* ioend for conversion on read-only mappings. * ioend for conversion on read-only mappings.
*/ */
ret = iomap_dax_fault(vma, vmf, &xfs_iomap_ops); ret = dax_iomap_fault(vma, vmf, &xfs_iomap_ops);
} else } else
ret = filemap_fault(vma, vmf); ret = filemap_fault(vma, vmf);
xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
......
...@@ -11,13 +11,13 @@ struct iomap_ops; ...@@ -11,13 +11,13 @@ struct iomap_ops;
/* We use lowest available exceptional entry bit for locking */ /* We use lowest available exceptional entry bit for locking */
#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT) #define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
ssize_t iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter, ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
struct iomap_ops *ops); struct iomap_ops *ops);
ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *,
get_block_t, dio_iodone_t, int flags); get_block_t, dio_iodone_t, int flags);
int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
int dax_truncate_page(struct inode *, loff_t from, get_block_t); int dax_truncate_page(struct inode *, loff_t from, get_block_t);
int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
struct iomap_ops *ops); struct iomap_ops *ops);
int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment