Commit 6b524995 authored by Ross Zwisler's avatar Ross Zwisler Committed by Linus Torvalds

dax: remote unused fault wrappers

Remove the unused wrappers dax_fault() and dax_pmd_fault().  After this
removal, rename __dax_fault() and __dax_pmd_fault() to dax_fault() and
dax_pmd_fault() respectively, and update all callers.

The dax_fault() and dax_pmd_fault() wrappers were initially intended to
capture some filesystem independent functionality around page faults
(calling sb_start_pagefault() & sb_end_pagefault(), updating file mtime
and ctime).

However, the following commits:

   5726b27b ("ext2: Add locking for DAX faults")
   ea3d7209 ("ext4: fix races between page faults and hole punching")

added locking to the ext2 and ext4 filesystems after these common
operations but before __dax_fault() and __dax_pmd_fault() were called.
This means that these wrappers are no longer used, and are unlikely to
be used in the future.

XFS has had locking analogous to what was recently added to ext2 and
ext4 since DAX support was initially introduced by:

   6b698ede ("xfs: add DAX file operations support")

Link: http://lkml.kernel.org/r/20160714214049.20075-2-ross.zwisler@linux.intel.comSigned-off-by: default avatarRoss Zwisler <ross.zwisler@linux.intel.com>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Andreas Dilger <adilger.kernel@dilger.ca>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Chinner <david@fromorbit.com>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Cc: Jonathan Corbet <corbet@lwn.net>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 221c7dc8
...@@ -819,16 +819,16 @@ static int dax_insert_mapping(struct address_space *mapping, ...@@ -819,16 +819,16 @@ static int dax_insert_mapping(struct address_space *mapping,
} }
/** /**
* __dax_fault - handle a page fault on a DAX file * dax_fault - handle a page fault on a DAX file
* @vma: The virtual memory area where the fault occurred * @vma: The virtual memory area where the fault occurred
* @vmf: The description of the fault * @vmf: The description of the fault
* @get_block: The filesystem method used to translate file offsets to blocks * @get_block: The filesystem method used to translate file offsets to blocks
* *
* When a page fault occurs, filesystems may call this helper in their * When a page fault occurs, filesystems may call this helper in their
* fault handler for DAX files. __dax_fault() assumes the caller has done all * fault handler for DAX files. dax_fault() assumes the caller has done all
* the necessary locking for the page fault to proceed successfully. * the necessary locking for the page fault to proceed successfully.
*/ */
int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
get_block_t get_block) get_block_t get_block)
{ {
struct file *file = vma->vm_file; struct file *file = vma->vm_file;
...@@ -913,33 +913,6 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, ...@@ -913,33 +913,6 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
return VM_FAULT_SIGBUS | major; return VM_FAULT_SIGBUS | major;
return VM_FAULT_NOPAGE | major; return VM_FAULT_NOPAGE | major;
} }
EXPORT_SYMBOL(__dax_fault);
/**
* dax_fault - handle a page fault on a DAX file
* @vma: The virtual memory area where the fault occurred
* @vmf: The description of the fault
* @get_block: The filesystem method used to translate file offsets to blocks
*
* When a page fault occurs, filesystems may call this helper in their
* fault handler for DAX files.
*/
int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
get_block_t get_block)
{
int result;
struct super_block *sb = file_inode(vma->vm_file)->i_sb;
if (vmf->flags & FAULT_FLAG_WRITE) {
sb_start_pagefault(sb);
file_update_time(vma->vm_file);
}
result = __dax_fault(vma, vmf, get_block);
if (vmf->flags & FAULT_FLAG_WRITE)
sb_end_pagefault(sb);
return result;
}
EXPORT_SYMBOL_GPL(dax_fault); EXPORT_SYMBOL_GPL(dax_fault);
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) #if defined(CONFIG_TRANSPARENT_HUGEPAGE)
...@@ -967,7 +940,16 @@ static void __dax_dbg(struct buffer_head *bh, unsigned long address, ...@@ -967,7 +940,16 @@ static void __dax_dbg(struct buffer_head *bh, unsigned long address,
#define dax_pmd_dbg(bh, address, reason) __dax_dbg(bh, address, reason, "dax_pmd") #define dax_pmd_dbg(bh, address, reason) __dax_dbg(bh, address, reason, "dax_pmd")
int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, /**
* dax_pmd_fault - handle a PMD fault on a DAX file
* @vma: The virtual memory area where the fault occurred
* @vmf: The description of the fault
* @get_block: The filesystem method used to translate file offsets to blocks
*
* When a page fault occurs, filesystems may call this helper in their
* pmd_fault handler for DAX files.
*/
int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmd, unsigned int flags, get_block_t get_block) pmd_t *pmd, unsigned int flags, get_block_t get_block)
{ {
struct file *file = vma->vm_file; struct file *file = vma->vm_file;
...@@ -1119,7 +1101,7 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, ...@@ -1119,7 +1101,7 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
* *
* The PMD path doesn't have an equivalent to * The PMD path doesn't have an equivalent to
* dax_pfn_mkwrite(), though, so for a read followed by a * dax_pfn_mkwrite(), though, so for a read followed by a
* write we traverse all the way through __dax_pmd_fault() * write we traverse all the way through dax_pmd_fault()
* twice. This means we can just skip inserting a radix tree * twice. This means we can just skip inserting a radix tree
* entry completely on the initial read and just wait until * entry completely on the initial read and just wait until
* the write to insert a dirty entry. * the write to insert a dirty entry.
...@@ -1148,33 +1130,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, ...@@ -1148,33 +1130,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
result = VM_FAULT_FALLBACK; result = VM_FAULT_FALLBACK;
goto out; goto out;
} }
EXPORT_SYMBOL_GPL(__dax_pmd_fault);
/**
* dax_pmd_fault - handle a PMD fault on a DAX file
* @vma: The virtual memory area where the fault occurred
* @vmf: The description of the fault
* @get_block: The filesystem method used to translate file offsets to blocks
*
* When a page fault occurs, filesystems may call this helper in their
* pmd_fault handler for DAX files.
*/
int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmd, unsigned int flags, get_block_t get_block)
{
int result;
struct super_block *sb = file_inode(vma->vm_file)->i_sb;
if (flags & FAULT_FLAG_WRITE) {
sb_start_pagefault(sb);
file_update_time(vma->vm_file);
}
result = __dax_pmd_fault(vma, address, pmd, flags, get_block);
if (flags & FAULT_FLAG_WRITE)
sb_end_pagefault(sb);
return result;
}
EXPORT_SYMBOL_GPL(dax_pmd_fault); EXPORT_SYMBOL_GPL(dax_pmd_fault);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
......
...@@ -51,7 +51,7 @@ static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -51,7 +51,7 @@ static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
} }
down_read(&ei->dax_sem); down_read(&ei->dax_sem);
ret = __dax_fault(vma, vmf, ext2_get_block); ret = dax_fault(vma, vmf, ext2_get_block);
up_read(&ei->dax_sem); up_read(&ei->dax_sem);
if (vmf->flags & FAULT_FLAG_WRITE) if (vmf->flags & FAULT_FLAG_WRITE)
...@@ -72,7 +72,7 @@ static int ext2_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, ...@@ -72,7 +72,7 @@ static int ext2_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
} }
down_read(&ei->dax_sem); down_read(&ei->dax_sem);
ret = __dax_pmd_fault(vma, addr, pmd, flags, ext2_get_block); ret = dax_pmd_fault(vma, addr, pmd, flags, ext2_get_block);
up_read(&ei->dax_sem); up_read(&ei->dax_sem);
if (flags & FAULT_FLAG_WRITE) if (flags & FAULT_FLAG_WRITE)
......
...@@ -202,7 +202,7 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -202,7 +202,7 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (IS_ERR(handle)) if (IS_ERR(handle))
result = VM_FAULT_SIGBUS; result = VM_FAULT_SIGBUS;
else else
result = __dax_fault(vma, vmf, ext4_dax_get_block); result = dax_fault(vma, vmf, ext4_dax_get_block);
if (write) { if (write) {
if (!IS_ERR(handle)) if (!IS_ERR(handle))
...@@ -237,7 +237,7 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, ...@@ -237,7 +237,7 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
if (IS_ERR(handle)) if (IS_ERR(handle))
result = VM_FAULT_SIGBUS; result = VM_FAULT_SIGBUS;
else else
result = __dax_pmd_fault(vma, addr, pmd, flags, result = dax_pmd_fault(vma, addr, pmd, flags,
ext4_dax_get_block); ext4_dax_get_block);
if (write) { if (write) {
......
...@@ -1551,7 +1551,7 @@ xfs_filemap_page_mkwrite( ...@@ -1551,7 +1551,7 @@ xfs_filemap_page_mkwrite(
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
if (IS_DAX(inode)) { if (IS_DAX(inode)) {
ret = __dax_mkwrite(vma, vmf, xfs_get_blocks_dax_fault); ret = dax_mkwrite(vma, vmf, xfs_get_blocks_dax_fault);
} else { } else {
ret = block_page_mkwrite(vma, vmf, xfs_get_blocks); ret = block_page_mkwrite(vma, vmf, xfs_get_blocks);
ret = block_page_mkwrite_return(ret); ret = block_page_mkwrite_return(ret);
...@@ -1585,7 +1585,7 @@ xfs_filemap_fault( ...@@ -1585,7 +1585,7 @@ xfs_filemap_fault(
* changes to xfs_get_blocks_direct() to map unwritten extent * changes to xfs_get_blocks_direct() to map unwritten extent
* ioend for conversion on read-only mappings. * ioend for conversion on read-only mappings.
*/ */
ret = __dax_fault(vma, vmf, xfs_get_blocks_dax_fault); ret = dax_fault(vma, vmf, xfs_get_blocks_dax_fault);
} else } else
ret = filemap_fault(vma, vmf); ret = filemap_fault(vma, vmf);
xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
...@@ -1622,7 +1622,7 @@ xfs_filemap_pmd_fault( ...@@ -1622,7 +1622,7 @@ xfs_filemap_pmd_fault(
} }
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
ret = __dax_pmd_fault(vma, addr, pmd, flags, xfs_get_blocks_dax_fault); ret = dax_pmd_fault(vma, addr, pmd, flags, xfs_get_blocks_dax_fault);
xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
if (flags & FAULT_FLAG_WRITE) if (flags & FAULT_FLAG_WRITE)
......
...@@ -14,7 +14,6 @@ ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, ...@@ -14,7 +14,6 @@ ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *,
int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
int dax_truncate_page(struct inode *, loff_t from, get_block_t); int dax_truncate_page(struct inode *, loff_t from, get_block_t);
int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
void dax_wake_mapping_entry_waiter(struct address_space *mapping, void dax_wake_mapping_entry_waiter(struct address_space *mapping,
pgoff_t index, bool wake_all); pgoff_t index, bool wake_all);
...@@ -46,19 +45,15 @@ static inline int __dax_zero_page_range(struct block_device *bdev, ...@@ -46,19 +45,15 @@ static inline int __dax_zero_page_range(struct block_device *bdev,
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) #if defined(CONFIG_TRANSPARENT_HUGEPAGE)
int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *, int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
unsigned int flags, get_block_t); unsigned int flags, get_block_t);
int __dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
unsigned int flags, get_block_t);
#else #else
static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, unsigned int flags, get_block_t gb) pmd_t *pmd, unsigned int flags, get_block_t gb)
{ {
return VM_FAULT_FALLBACK; return VM_FAULT_FALLBACK;
} }
#define __dax_pmd_fault dax_pmd_fault
#endif #endif
int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
#define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb) #define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb)
#define __dax_mkwrite(vma, vmf, gb) __dax_fault(vma, vmf, gb)
static inline bool vma_is_dax(struct vm_area_struct *vma) static inline bool vma_is_dax(struct vm_area_struct *vma)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment