Commit f4200391 authored by Dave Jiang's avatar Dave Jiang Committed by Linus Torvalds

mm, dax: change pmd_fault() to take only vmf parameter

pmd_fault() and related functions really only need the vmf parameter since
the additional parameters are all included in the vmf struct.  Remove the
additional parameter and simplify pmd_fault() and friends.

Link: http://lkml.kernel.org/r/1484085142-2297-8-git-send-email-ross.zwisler@linux.intel.comSigned-off-by: default avatarDave Jiang <dave.jiang@intel.com>
Reviewed-by: default avatarRoss Zwisler <ross.zwisler@linux.intel.com>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Matthew Wilcox <mawilcox@microsoft.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d8a849e1
...@@ -472,8 +472,7 @@ static int dax_dev_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -472,8 +472,7 @@ static int dax_dev_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return rc; return rc;
} }
static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
struct vm_area_struct *vma, struct vm_fault *vmf)
{ {
unsigned long pmd_addr = vmf->address & PMD_MASK; unsigned long pmd_addr = vmf->address & PMD_MASK;
struct device *dev = &dax_dev->dev; struct device *dev = &dax_dev->dev;
...@@ -482,7 +481,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, ...@@ -482,7 +481,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev,
pgoff_t pgoff; pgoff_t pgoff;
pfn_t pfn; pfn_t pfn;
if (check_vma(dax_dev, vma, __func__)) if (check_vma(dax_dev, vmf->vma, __func__))
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
dax_region = dax_dev->region; dax_region = dax_dev->region;
...@@ -497,7 +496,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, ...@@ -497,7 +496,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev,
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} }
pgoff = linear_page_index(vma, pmd_addr); pgoff = linear_page_index(vmf->vma, pmd_addr);
phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE); phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE);
if (phys == -1) { if (phys == -1) {
dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__, dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
...@@ -507,22 +506,23 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, ...@@ -507,22 +506,23 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev,
pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
return vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn, return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, pfn,
vmf->flags & FAULT_FLAG_WRITE); vmf->flags & FAULT_FLAG_WRITE);
} }
static int dax_dev_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf) static int dax_dev_pmd_fault(struct vm_fault *vmf)
{ {
int rc; int rc;
struct file *filp = vma->vm_file; struct file *filp = vmf->vma->vm_file;
struct dax_dev *dax_dev = filp->private_data; struct dax_dev *dax_dev = filp->private_data;
dev_dbg(&dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__, dev_dbg(&dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
current->comm, (vmf->flags & FAULT_FLAG_WRITE) current->comm, (vmf->flags & FAULT_FLAG_WRITE)
? "write" : "read", vma->vm_start, vma->vm_end); ? "write" : "read",
vmf->vma->vm_start, vmf->vma->vm_end);
rcu_read_lock(); rcu_read_lock();
rc = __dax_dev_pmd_fault(dax_dev, vma, vmf); rc = __dax_dev_pmd_fault(dax_dev, vmf);
rcu_read_unlock(); rcu_read_unlock();
return rc; return rc;
......
...@@ -1256,11 +1256,10 @@ EXPORT_SYMBOL_GPL(dax_iomap_fault); ...@@ -1256,11 +1256,10 @@ EXPORT_SYMBOL_GPL(dax_iomap_fault);
*/ */
#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
static int dax_pmd_insert_mapping(struct vm_area_struct *vma, pmd_t *pmd, static int dax_pmd_insert_mapping(struct vm_fault *vmf, struct iomap *iomap,
struct vm_fault *vmf, unsigned long address, loff_t pos, void **entryp)
struct iomap *iomap, loff_t pos, bool write, void **entryp)
{ {
struct address_space *mapping = vma->vm_file->f_mapping; struct address_space *mapping = vmf->vma->vm_file->f_mapping;
struct block_device *bdev = iomap->bdev; struct block_device *bdev = iomap->bdev;
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct blk_dax_ctl dax = { struct blk_dax_ctl dax = {
...@@ -1287,31 +1286,30 @@ static int dax_pmd_insert_mapping(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -1287,31 +1286,30 @@ static int dax_pmd_insert_mapping(struct vm_area_struct *vma, pmd_t *pmd,
goto fallback; goto fallback;
*entryp = ret; *entryp = ret;
trace_dax_pmd_insert_mapping(inode, vma, address, write, length, trace_dax_pmd_insert_mapping(inode, vmf, length, dax.pfn, ret);
dax.pfn, ret); return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
return vmf_insert_pfn_pmd(vma, address, pmd, dax.pfn, write); dax.pfn, vmf->flags & FAULT_FLAG_WRITE);
unmap_fallback: unmap_fallback:
dax_unmap_atomic(bdev, &dax); dax_unmap_atomic(bdev, &dax);
fallback: fallback:
trace_dax_pmd_insert_mapping_fallback(inode, vma, address, write, trace_dax_pmd_insert_mapping_fallback(inode, vmf, length,
length, dax.pfn, ret); dax.pfn, ret);
return VM_FAULT_FALLBACK; return VM_FAULT_FALLBACK;
} }
static int dax_pmd_load_hole(struct vm_area_struct *vma, pmd_t *pmd, static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
struct vm_fault *vmf, unsigned long address, void **entryp)
struct iomap *iomap, void **entryp)
{ {
struct address_space *mapping = vma->vm_file->f_mapping; struct address_space *mapping = vmf->vma->vm_file->f_mapping;
unsigned long pmd_addr = address & PMD_MASK; unsigned long pmd_addr = vmf->address & PMD_MASK;
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct page *zero_page; struct page *zero_page;
void *ret = NULL; void *ret = NULL;
spinlock_t *ptl; spinlock_t *ptl;
pmd_t pmd_entry; pmd_t pmd_entry;
zero_page = mm_get_huge_zero_page(vma->vm_mm); zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
if (unlikely(!zero_page)) if (unlikely(!zero_page))
goto fallback; goto fallback;
...@@ -1322,27 +1320,27 @@ static int dax_pmd_load_hole(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -1322,27 +1320,27 @@ static int dax_pmd_load_hole(struct vm_area_struct *vma, pmd_t *pmd,
goto fallback; goto fallback;
*entryp = ret; *entryp = ret;
ptl = pmd_lock(vma->vm_mm, pmd); ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
if (!pmd_none(*pmd)) { if (!pmd_none(*(vmf->pmd))) {
spin_unlock(ptl); spin_unlock(ptl);
goto fallback; goto fallback;
} }
pmd_entry = mk_pmd(zero_page, vma->vm_page_prot); pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
pmd_entry = pmd_mkhuge(pmd_entry); pmd_entry = pmd_mkhuge(pmd_entry);
set_pmd_at(vma->vm_mm, pmd_addr, pmd, pmd_entry); set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
spin_unlock(ptl); spin_unlock(ptl);
trace_dax_pmd_load_hole(inode, vma, address, zero_page, ret); trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
return VM_FAULT_NOPAGE; return VM_FAULT_NOPAGE;
fallback: fallback:
trace_dax_pmd_load_hole_fallback(inode, vma, address, zero_page, ret); trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
return VM_FAULT_FALLBACK; return VM_FAULT_FALLBACK;
} }
int dax_iomap_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf, int dax_iomap_pmd_fault(struct vm_fault *vmf, struct iomap_ops *ops)
struct iomap_ops *ops)
{ {
struct vm_area_struct *vma = vmf->vma;
struct address_space *mapping = vma->vm_file->f_mapping; struct address_space *mapping = vma->vm_file->f_mapping;
unsigned long pmd_addr = vmf->address & PMD_MASK; unsigned long pmd_addr = vmf->address & PMD_MASK;
bool write = vmf->flags & FAULT_FLAG_WRITE; bool write = vmf->flags & FAULT_FLAG_WRITE;
...@@ -1363,7 +1361,7 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf, ...@@ -1363,7 +1361,7 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
pgoff = linear_page_index(vma, pmd_addr); pgoff = linear_page_index(vma, pmd_addr);
max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT; max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;
trace_dax_pmd_fault(inode, vma, vmf, max_pgoff, 0); trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
/* Fall back to PTEs if we're going to COW */ /* Fall back to PTEs if we're going to COW */
if (write && !(vma->vm_flags & VM_SHARED)) if (write && !(vma->vm_flags & VM_SHARED))
...@@ -1409,15 +1407,13 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf, ...@@ -1409,15 +1407,13 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
switch (iomap.type) { switch (iomap.type) {
case IOMAP_MAPPED: case IOMAP_MAPPED:
result = dax_pmd_insert_mapping(vma, vmf->pmd, vmf, result = dax_pmd_insert_mapping(vmf, &iomap, pos, &entry);
vmf->address, &iomap, pos, write, &entry);
break; break;
case IOMAP_UNWRITTEN: case IOMAP_UNWRITTEN:
case IOMAP_HOLE: case IOMAP_HOLE:
if (WARN_ON_ONCE(write)) if (WARN_ON_ONCE(write))
goto unlock_entry; goto unlock_entry;
result = dax_pmd_load_hole(vma, vmf->pmd, vmf, vmf->address, result = dax_pmd_load_hole(vmf, &iomap, &entry);
&iomap, &entry);
break; break;
default: default:
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
...@@ -1447,7 +1443,7 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf, ...@@ -1447,7 +1443,7 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
count_vm_event(THP_FAULT_FALLBACK); count_vm_event(THP_FAULT_FALLBACK);
} }
out: out:
trace_dax_pmd_fault_done(inode, vma, vmf, max_pgoff, result); trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
return result; return result;
} }
EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault); EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault);
......
...@@ -274,19 +274,19 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -274,19 +274,19 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
} }
static int static int
ext4_dax_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ext4_dax_pmd_fault(struct vm_fault *vmf)
{ {
int result; int result;
struct inode *inode = file_inode(vma->vm_file); struct inode *inode = file_inode(vmf->vma->vm_file);
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
bool write = vmf->flags & FAULT_FLAG_WRITE; bool write = vmf->flags & FAULT_FLAG_WRITE;
if (write) { if (write) {
sb_start_pagefault(sb); sb_start_pagefault(sb);
file_update_time(vma->vm_file); file_update_time(vmf->vma->vm_file);
} }
down_read(&EXT4_I(inode)->i_mmap_sem); down_read(&EXT4_I(inode)->i_mmap_sem);
result = dax_iomap_pmd_fault(vma, vmf, &ext4_iomap_ops); result = dax_iomap_pmd_fault(vmf, &ext4_iomap_ops);
up_read(&EXT4_I(inode)->i_mmap_sem); up_read(&EXT4_I(inode)->i_mmap_sem);
if (write) if (write)
sb_end_pagefault(sb); sb_end_pagefault(sb);
......
...@@ -1431,10 +1431,9 @@ xfs_filemap_fault( ...@@ -1431,10 +1431,9 @@ xfs_filemap_fault(
*/ */
STATIC int STATIC int
xfs_filemap_pmd_fault( xfs_filemap_pmd_fault(
struct vm_area_struct *vma,
struct vm_fault *vmf) struct vm_fault *vmf)
{ {
struct inode *inode = file_inode(vma->vm_file); struct inode *inode = file_inode(vmf->vma->vm_file);
struct xfs_inode *ip = XFS_I(inode); struct xfs_inode *ip = XFS_I(inode);
int ret; int ret;
...@@ -1445,11 +1444,11 @@ xfs_filemap_pmd_fault( ...@@ -1445,11 +1444,11 @@ xfs_filemap_pmd_fault(
if (vmf->flags & FAULT_FLAG_WRITE) { if (vmf->flags & FAULT_FLAG_WRITE) {
sb_start_pagefault(inode->i_sb); sb_start_pagefault(inode->i_sb);
file_update_time(vma->vm_file); file_update_time(vmf->vma->vm_file);
} }
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
ret = dax_iomap_pmd_fault(vma, vmf, &xfs_iomap_ops); ret = dax_iomap_pmd_fault(vmf, &xfs_iomap_ops);
xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
if (vmf->flags & FAULT_FLAG_WRITE) if (vmf->flags & FAULT_FLAG_WRITE)
......
...@@ -71,15 +71,14 @@ static inline unsigned int dax_radix_order(void *entry) ...@@ -71,15 +71,14 @@ static inline unsigned int dax_radix_order(void *entry)
return PMD_SHIFT - PAGE_SHIFT; return PMD_SHIFT - PAGE_SHIFT;
return 0; return 0;
} }
int dax_iomap_pmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf, int dax_iomap_pmd_fault(struct vm_fault *vmf, struct iomap_ops *ops);
struct iomap_ops *ops);
#else #else
static inline unsigned int dax_radix_order(void *entry) static inline unsigned int dax_radix_order(void *entry)
{ {
return 0; return 0;
} }
static inline int dax_iomap_pmd_fault(struct vm_area_struct *vma, static inline int dax_iomap_pmd_fault(struct vm_fault *vmf,
struct vm_fault *vmf, struct iomap_ops *ops) struct iomap_ops *ops)
{ {
return VM_FAULT_FALLBACK; return VM_FAULT_FALLBACK;
} }
......
...@@ -351,7 +351,7 @@ struct vm_operations_struct { ...@@ -351,7 +351,7 @@ struct vm_operations_struct {
void (*close)(struct vm_area_struct * area); void (*close)(struct vm_area_struct * area);
int (*mremap)(struct vm_area_struct * area); int (*mremap)(struct vm_area_struct * area);
int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
int (*pmd_fault)(struct vm_area_struct *vma, struct vm_fault *vmf); int (*pmd_fault)(struct vm_fault *vmf);
void (*map_pages)(struct vm_fault *vmf, void (*map_pages)(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff); pgoff_t start_pgoff, pgoff_t end_pgoff);
......
...@@ -7,9 +7,9 @@ ...@@ -7,9 +7,9 @@
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
DECLARE_EVENT_CLASS(dax_pmd_fault_class, DECLARE_EVENT_CLASS(dax_pmd_fault_class,
TP_PROTO(struct inode *inode, struct vm_area_struct *vma, TP_PROTO(struct inode *inode, struct vm_fault *vmf,
struct vm_fault *vmf, pgoff_t max_pgoff, int result), pgoff_t max_pgoff, int result),
TP_ARGS(inode, vma, vmf, max_pgoff, result), TP_ARGS(inode, vmf, max_pgoff, result),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(unsigned long, ino) __field(unsigned long, ino)
__field(unsigned long, vm_start) __field(unsigned long, vm_start)
...@@ -25,9 +25,9 @@ DECLARE_EVENT_CLASS(dax_pmd_fault_class, ...@@ -25,9 +25,9 @@ DECLARE_EVENT_CLASS(dax_pmd_fault_class,
TP_fast_assign( TP_fast_assign(
__entry->dev = inode->i_sb->s_dev; __entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino; __entry->ino = inode->i_ino;
__entry->vm_start = vma->vm_start; __entry->vm_start = vmf->vma->vm_start;
__entry->vm_end = vma->vm_end; __entry->vm_end = vmf->vma->vm_end;
__entry->vm_flags = vma->vm_flags; __entry->vm_flags = vmf->vma->vm_flags;
__entry->address = vmf->address; __entry->address = vmf->address;
__entry->flags = vmf->flags; __entry->flags = vmf->flags;
__entry->pgoff = vmf->pgoff; __entry->pgoff = vmf->pgoff;
...@@ -52,19 +52,18 @@ DECLARE_EVENT_CLASS(dax_pmd_fault_class, ...@@ -52,19 +52,18 @@ DECLARE_EVENT_CLASS(dax_pmd_fault_class,
#define DEFINE_PMD_FAULT_EVENT(name) \ #define DEFINE_PMD_FAULT_EVENT(name) \
DEFINE_EVENT(dax_pmd_fault_class, name, \ DEFINE_EVENT(dax_pmd_fault_class, name, \
TP_PROTO(struct inode *inode, struct vm_area_struct *vma, \ TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
struct vm_fault *vmf, \
pgoff_t max_pgoff, int result), \ pgoff_t max_pgoff, int result), \
TP_ARGS(inode, vma, vmf, max_pgoff, result)) TP_ARGS(inode, vmf, max_pgoff, result))
DEFINE_PMD_FAULT_EVENT(dax_pmd_fault); DEFINE_PMD_FAULT_EVENT(dax_pmd_fault);
DEFINE_PMD_FAULT_EVENT(dax_pmd_fault_done); DEFINE_PMD_FAULT_EVENT(dax_pmd_fault_done);
DECLARE_EVENT_CLASS(dax_pmd_load_hole_class, DECLARE_EVENT_CLASS(dax_pmd_load_hole_class,
TP_PROTO(struct inode *inode, struct vm_area_struct *vma, TP_PROTO(struct inode *inode, struct vm_fault *vmf,
unsigned long address, struct page *zero_page, struct page *zero_page,
void *radix_entry), void *radix_entry),
TP_ARGS(inode, vma, address, zero_page, radix_entry), TP_ARGS(inode, vmf, zero_page, radix_entry),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(unsigned long, ino) __field(unsigned long, ino)
__field(unsigned long, vm_flags) __field(unsigned long, vm_flags)
...@@ -76,8 +75,8 @@ DECLARE_EVENT_CLASS(dax_pmd_load_hole_class, ...@@ -76,8 +75,8 @@ DECLARE_EVENT_CLASS(dax_pmd_load_hole_class,
TP_fast_assign( TP_fast_assign(
__entry->dev = inode->i_sb->s_dev; __entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino; __entry->ino = inode->i_ino;
__entry->vm_flags = vma->vm_flags; __entry->vm_flags = vmf->vma->vm_flags;
__entry->address = address; __entry->address = vmf->address;
__entry->zero_page = zero_page; __entry->zero_page = zero_page;
__entry->radix_entry = radix_entry; __entry->radix_entry = radix_entry;
), ),
...@@ -95,19 +94,17 @@ DECLARE_EVENT_CLASS(dax_pmd_load_hole_class, ...@@ -95,19 +94,17 @@ DECLARE_EVENT_CLASS(dax_pmd_load_hole_class,
#define DEFINE_PMD_LOAD_HOLE_EVENT(name) \ #define DEFINE_PMD_LOAD_HOLE_EVENT(name) \
DEFINE_EVENT(dax_pmd_load_hole_class, name, \ DEFINE_EVENT(dax_pmd_load_hole_class, name, \
TP_PROTO(struct inode *inode, struct vm_area_struct *vma, \ TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
unsigned long address, struct page *zero_page, \ struct page *zero_page, void *radix_entry), \
void *radix_entry), \ TP_ARGS(inode, vmf, zero_page, radix_entry))
TP_ARGS(inode, vma, address, zero_page, radix_entry))
DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole); DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole);
DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole_fallback); DEFINE_PMD_LOAD_HOLE_EVENT(dax_pmd_load_hole_fallback);
DECLARE_EVENT_CLASS(dax_pmd_insert_mapping_class, DECLARE_EVENT_CLASS(dax_pmd_insert_mapping_class,
TP_PROTO(struct inode *inode, struct vm_area_struct *vma, TP_PROTO(struct inode *inode, struct vm_fault *vmf,
unsigned long address, int write, long length, pfn_t pfn, long length, pfn_t pfn, void *radix_entry),
void *radix_entry), TP_ARGS(inode, vmf, length, pfn, radix_entry),
TP_ARGS(inode, vma, address, write, length, pfn, radix_entry),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(unsigned long, ino) __field(unsigned long, ino)
__field(unsigned long, vm_flags) __field(unsigned long, vm_flags)
...@@ -121,9 +118,9 @@ DECLARE_EVENT_CLASS(dax_pmd_insert_mapping_class, ...@@ -121,9 +118,9 @@ DECLARE_EVENT_CLASS(dax_pmd_insert_mapping_class,
TP_fast_assign( TP_fast_assign(
__entry->dev = inode->i_sb->s_dev; __entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino; __entry->ino = inode->i_ino;
__entry->vm_flags = vma->vm_flags; __entry->vm_flags = vmf->vma->vm_flags;
__entry->address = address; __entry->address = vmf->address;
__entry->write = write; __entry->write = vmf->flags & FAULT_FLAG_WRITE;
__entry->length = length; __entry->length = length;
__entry->pfn_val = pfn.val; __entry->pfn_val = pfn.val;
__entry->radix_entry = radix_entry; __entry->radix_entry = radix_entry;
...@@ -146,10 +143,9 @@ DECLARE_EVENT_CLASS(dax_pmd_insert_mapping_class, ...@@ -146,10 +143,9 @@ DECLARE_EVENT_CLASS(dax_pmd_insert_mapping_class,
#define DEFINE_PMD_INSERT_MAPPING_EVENT(name) \ #define DEFINE_PMD_INSERT_MAPPING_EVENT(name) \
DEFINE_EVENT(dax_pmd_insert_mapping_class, name, \ DEFINE_EVENT(dax_pmd_insert_mapping_class, name, \
TP_PROTO(struct inode *inode, struct vm_area_struct *vma, \ TP_PROTO(struct inode *inode, struct vm_fault *vmf, \
unsigned long address, int write, long length, pfn_t pfn, \ long length, pfn_t pfn, void *radix_entry), \
void *radix_entry), \ TP_ARGS(inode, vmf, length, pfn, radix_entry))
TP_ARGS(inode, vma, address, write, length, pfn, radix_entry))
DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping); DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping);
DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping_fallback); DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping_fallback);
......
...@@ -3471,11 +3471,10 @@ static int do_numa_page(struct vm_fault *vmf) ...@@ -3471,11 +3471,10 @@ static int do_numa_page(struct vm_fault *vmf)
static int create_huge_pmd(struct vm_fault *vmf) static int create_huge_pmd(struct vm_fault *vmf)
{ {
struct vm_area_struct *vma = vmf->vma; if (vma_is_anonymous(vmf->vma))
if (vma_is_anonymous(vma))
return do_huge_pmd_anonymous_page(vmf); return do_huge_pmd_anonymous_page(vmf);
if (vma->vm_ops->pmd_fault) if (vmf->vma->vm_ops->pmd_fault)
return vma->vm_ops->pmd_fault(vma, vmf); return vmf->vma->vm_ops->pmd_fault(vmf);
return VM_FAULT_FALLBACK; return VM_FAULT_FALLBACK;
} }
...@@ -3484,7 +3483,7 @@ static int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd) ...@@ -3484,7 +3483,7 @@ static int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
if (vma_is_anonymous(vmf->vma)) if (vma_is_anonymous(vmf->vma))
return do_huge_pmd_wp_page(vmf, orig_pmd); return do_huge_pmd_wp_page(vmf, orig_pmd);
if (vmf->vma->vm_ops->pmd_fault) if (vmf->vma->vm_ops->pmd_fault)
return vmf->vma->vm_ops->pmd_fault(vmf->vma, vmf); return vmf->vma->vm_ops->pmd_fault(vmf);
/* COW handled on pte level: split pmd */ /* COW handled on pte level: split pmd */
VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma); VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment