Commit d522d569 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Darrick J. Wong

xfs: consolidate the various page fault handlers

Add a new __xfs_filemap_fault helper that implements all four page fault
callouts, and make these methods themselves small stubs that set the
correct write_fault flag, and exit early for the non-DAX case for the
hugepage related ones.

Also remove the extra size checking in the pfn_fault path, which is now
handled in the core DAX code.

Life would be so much simpler if we only had one method for all this.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarRoss Zwisler <ross.zwisler@linux.intel.com>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
parent e7647fb4
...@@ -1011,95 +1011,67 @@ xfs_file_llseek( ...@@ -1011,95 +1011,67 @@ xfs_file_llseek(
* page_lock (MM) * page_lock (MM)
* i_lock (XFS - extent map serialisation) * i_lock (XFS - extent map serialisation)
*/ */
static int
/* __xfs_filemap_fault(
* mmap()d file has taken write protection fault and is being made writable. We struct vm_fault *vmf,
* can set the page state up correctly for a writable page, which means we can enum page_entry_size pe_size,
* do correct delalloc accounting (ENOSPC checking!) and unwritten extent bool write_fault)
* mapping.
*/
STATIC int
xfs_filemap_page_mkwrite(
struct vm_fault *vmf)
{ {
struct inode *inode = file_inode(vmf->vma->vm_file); struct inode *inode = file_inode(vmf->vma->vm_file);
struct xfs_inode *ip = XFS_I(inode);
int ret; int ret;
trace_xfs_filemap_page_mkwrite(XFS_I(inode)); trace_xfs_filemap_fault(ip, pe_size, write_fault);
if (write_fault) {
sb_start_pagefault(inode->i_sb); sb_start_pagefault(inode->i_sb);
file_update_time(vmf->vma->vm_file); file_update_time(vmf->vma->vm_file);
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); }
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
if (IS_DAX(inode)) { if (IS_DAX(inode)) {
ret = dax_iomap_fault(vmf, PE_SIZE_PTE, &xfs_iomap_ops); ret = dax_iomap_fault(vmf, pe_size, &xfs_iomap_ops);
} else { } else {
if (write_fault)
ret = iomap_page_mkwrite(vmf, &xfs_iomap_ops); ret = iomap_page_mkwrite(vmf, &xfs_iomap_ops);
else
ret = filemap_fault(vmf);
} }
xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
sb_end_pagefault(inode->i_sb);
if (write_fault)
sb_end_pagefault(inode->i_sb);
return ret; return ret;
} }
STATIC int static int
xfs_filemap_fault( xfs_filemap_fault(
struct vm_fault *vmf) struct vm_fault *vmf)
{ {
struct inode *inode = file_inode(vmf->vma->vm_file);
int ret;
trace_xfs_filemap_fault(XFS_I(inode));
/* DAX can shortcut the normal fault path on write faults! */ /* DAX can shortcut the normal fault path on write faults! */
if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(inode)) return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
return xfs_filemap_page_mkwrite(vmf); IS_DAX(file_inode(vmf->vma->vm_file)) &&
(vmf->flags & FAULT_FLAG_WRITE));
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
if (IS_DAX(inode))
ret = dax_iomap_fault(vmf, PE_SIZE_PTE, &xfs_iomap_ops);
else
ret = filemap_fault(vmf);
xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
return ret;
} }
/* static int
* Similar to xfs_filemap_fault(), the DAX fault path can call into here on
* both read and write faults. Hence we need to handle both cases. There is no
* ->huge_mkwrite callout for huge pages, so we have a single function here to
* handle both cases here. @flags carries the information on the type of fault
* occuring.
*/
STATIC int
xfs_filemap_huge_fault( xfs_filemap_huge_fault(
struct vm_fault *vmf, struct vm_fault *vmf,
enum page_entry_size pe_size) enum page_entry_size pe_size)
{ {
struct inode *inode = file_inode(vmf->vma->vm_file); if (!IS_DAX(file_inode(vmf->vma->vm_file)))
struct xfs_inode *ip = XFS_I(inode);
int ret;
if (!IS_DAX(inode))
return VM_FAULT_FALLBACK; return VM_FAULT_FALLBACK;
trace_xfs_filemap_huge_fault(ip); /* DAX can shortcut the normal fault path on write faults! */
return __xfs_filemap_fault(vmf, pe_size,
if (vmf->flags & FAULT_FLAG_WRITE) { (vmf->flags & FAULT_FLAG_WRITE));
sb_start_pagefault(inode->i_sb); }
file_update_time(vmf->vma->vm_file);
}
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
ret = dax_iomap_fault(vmf, pe_size, &xfs_iomap_ops);
xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
if (vmf->flags & FAULT_FLAG_WRITE)
sb_end_pagefault(inode->i_sb);
return ret; static int
xfs_filemap_page_mkwrite(
struct vm_fault *vmf)
{
return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
} }
/* /*
......
...@@ -688,11 +688,34 @@ DEFINE_INODE_EVENT(xfs_inode_set_cowblocks_tag); ...@@ -688,11 +688,34 @@ DEFINE_INODE_EVENT(xfs_inode_set_cowblocks_tag);
DEFINE_INODE_EVENT(xfs_inode_clear_cowblocks_tag); DEFINE_INODE_EVENT(xfs_inode_clear_cowblocks_tag);
DEFINE_INODE_EVENT(xfs_inode_free_cowblocks_invalid); DEFINE_INODE_EVENT(xfs_inode_free_cowblocks_invalid);
DEFINE_INODE_EVENT(xfs_filemap_fault);
DEFINE_INODE_EVENT(xfs_filemap_huge_fault);
DEFINE_INODE_EVENT(xfs_filemap_page_mkwrite);
DEFINE_INODE_EVENT(xfs_filemap_pfn_mkwrite); DEFINE_INODE_EVENT(xfs_filemap_pfn_mkwrite);
TRACE_EVENT(xfs_filemap_fault,
TP_PROTO(struct xfs_inode *ip, enum page_entry_size pe_size,
bool write_fault),
TP_ARGS(ip, pe_size, write_fault),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_ino_t, ino)
__field(enum page_entry_size, pe_size)
__field(bool, write_fault)
),
TP_fast_assign(
__entry->dev = VFS_I(ip)->i_sb->s_dev;
__entry->ino = ip->i_ino;
__entry->pe_size = pe_size;
__entry->write_fault = write_fault;
),
TP_printk("dev %d:%d ino 0x%llx %s write_fault %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__print_symbolic(__entry->pe_size,
{ PE_SIZE_PTE, "PTE" },
{ PE_SIZE_PMD, "PMD" },
{ PE_SIZE_PUD, "PUD" }),
__entry->write_fault)
)
DECLARE_EVENT_CLASS(xfs_iref_class, DECLARE_EVENT_CLASS(xfs_iref_class,
TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip),
TP_ARGS(ip, caller_ip), TP_ARGS(ip, caller_ip),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment