Commit c791ace1 authored by Dave Jiang's avatar Dave Jiang Committed by Linus Torvalds

mm: replace FAULT_FLAG_SIZE with parameter to huge_fault

Since the introduction of FAULT_FLAG_SIZE to the vm_fault flag, it has
been somewhat painful with getting the flags set and removed at the
correct locations.  More than one kernel oops was introduced due to
difficulties of getting the placement correctly.

Remove the flag values and introduce an input parameter to huge_fault
that indicates the size of the page entry.  This makes the code easier
to trace and should avoid the issues we see with the fault flags where
removal of the flag was necessary in the fallback paths.

Link: http://lkml.kernel.org/r/148615748258.43180.1690152053774975329.stgit@djiang5-desk3.ch.intel.comSigned-off-by: default avatarDave Jiang <dave.jiang@intel.com>
Tested-by: default avatarDan Williams <dan.j.williams@intel.com>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Cc: Matthew Wilcox <mawilcox@microsoft.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Nilesh Choudhury <nilesh.choudhury@oracle.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9557feee
...@@ -538,7 +538,8 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) ...@@ -538,7 +538,8 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
} }
#endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ #endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
static int dax_dev_fault(struct vm_fault *vmf) static int dax_dev_huge_fault(struct vm_fault *vmf,
enum page_entry_size pe_size)
{ {
int rc; int rc;
struct file *filp = vmf->vma->vm_file; struct file *filp = vmf->vma->vm_file;
...@@ -550,14 +551,14 @@ static int dax_dev_fault(struct vm_fault *vmf) ...@@ -550,14 +551,14 @@ static int dax_dev_fault(struct vm_fault *vmf)
vmf->vma->vm_start, vmf->vma->vm_end); vmf->vma->vm_start, vmf->vma->vm_end);
rcu_read_lock(); rcu_read_lock();
switch (vmf->flags & FAULT_FLAG_SIZE_MASK) { switch (pe_size) {
case FAULT_FLAG_SIZE_PTE: case PE_SIZE_PTE:
rc = __dax_dev_pte_fault(dax_dev, vmf); rc = __dax_dev_pte_fault(dax_dev, vmf);
break; break;
case FAULT_FLAG_SIZE_PMD: case PE_SIZE_PMD:
rc = __dax_dev_pmd_fault(dax_dev, vmf); rc = __dax_dev_pmd_fault(dax_dev, vmf);
break; break;
case FAULT_FLAG_SIZE_PUD: case PE_SIZE_PUD:
rc = __dax_dev_pud_fault(dax_dev, vmf); rc = __dax_dev_pud_fault(dax_dev, vmf);
break; break;
default: default:
...@@ -568,9 +569,14 @@ static int dax_dev_fault(struct vm_fault *vmf) ...@@ -568,9 +569,14 @@ static int dax_dev_fault(struct vm_fault *vmf)
return rc; return rc;
} }
static int dax_dev_fault(struct vm_fault *vmf)
{
return dax_dev_huge_fault(vmf, PE_SIZE_PTE);
}
static const struct vm_operations_struct dax_dev_vm_ops = { static const struct vm_operations_struct dax_dev_vm_ops = {
.fault = dax_dev_fault, .fault = dax_dev_fault,
.huge_fault = dax_dev_fault, .huge_fault = dax_dev_huge_fault,
}; };
static int dax_mmap(struct file *filp, struct vm_area_struct *vma) static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
......
...@@ -1452,12 +1452,13 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, struct iomap_ops *ops) ...@@ -1452,12 +1452,13 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, struct iomap_ops *ops)
* has done all the necessary locking for page fault to proceed * has done all the necessary locking for page fault to proceed
* successfully. * successfully.
*/ */
int dax_iomap_fault(struct vm_fault *vmf, const struct iomap_ops *ops) int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
const struct iomap_ops *ops)
{ {
switch (vmf->flags & FAULT_FLAG_SIZE_MASK) { switch (pe_size) {
case FAULT_FLAG_SIZE_PTE: case PE_SIZE_PTE:
return dax_iomap_pte_fault(vmf, ops); return dax_iomap_pte_fault(vmf, ops);
case FAULT_FLAG_SIZE_PMD: case PE_SIZE_PMD:
return dax_iomap_pmd_fault(vmf, ops); return dax_iomap_pmd_fault(vmf, ops);
default: default:
return VM_FAULT_FALLBACK; return VM_FAULT_FALLBACK;
......
...@@ -99,7 +99,7 @@ static int ext2_dax_fault(struct vm_fault *vmf) ...@@ -99,7 +99,7 @@ static int ext2_dax_fault(struct vm_fault *vmf)
} }
down_read(&ei->dax_sem); down_read(&ei->dax_sem);
ret = dax_iomap_fault(vmf, &ext2_iomap_ops); ret = dax_iomap_fault(vmf, PE_SIZE_PTE, &ext2_iomap_ops);
up_read(&ei->dax_sem); up_read(&ei->dax_sem);
if (vmf->flags & FAULT_FLAG_WRITE) if (vmf->flags & FAULT_FLAG_WRITE)
......
...@@ -253,7 +253,8 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) ...@@ -253,7 +253,8 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
} }
#ifdef CONFIG_FS_DAX #ifdef CONFIG_FS_DAX
static int ext4_dax_fault(struct vm_fault *vmf) static int ext4_dax_huge_fault(struct vm_fault *vmf,
enum page_entry_size pe_size)
{ {
int result; int result;
struct inode *inode = file_inode(vmf->vma->vm_file); struct inode *inode = file_inode(vmf->vma->vm_file);
...@@ -265,7 +266,7 @@ static int ext4_dax_fault(struct vm_fault *vmf) ...@@ -265,7 +266,7 @@ static int ext4_dax_fault(struct vm_fault *vmf)
file_update_time(vmf->vma->vm_file); file_update_time(vmf->vma->vm_file);
} }
down_read(&EXT4_I(inode)->i_mmap_sem); down_read(&EXT4_I(inode)->i_mmap_sem);
result = dax_iomap_fault(vmf, &ext4_iomap_ops); result = dax_iomap_fault(vmf, pe_size, &ext4_iomap_ops);
up_read(&EXT4_I(inode)->i_mmap_sem); up_read(&EXT4_I(inode)->i_mmap_sem);
if (write) if (write)
sb_end_pagefault(sb); sb_end_pagefault(sb);
...@@ -273,6 +274,11 @@ static int ext4_dax_fault(struct vm_fault *vmf) ...@@ -273,6 +274,11 @@ static int ext4_dax_fault(struct vm_fault *vmf)
return result; return result;
} }
static int ext4_dax_fault(struct vm_fault *vmf)
{
return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
}
/* /*
* Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_fault() * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_fault()
* handler we check for races agaist truncate. Note that since we cycle through * handler we check for races agaist truncate. Note that since we cycle through
...@@ -305,7 +311,7 @@ static int ext4_dax_pfn_mkwrite(struct vm_fault *vmf) ...@@ -305,7 +311,7 @@ static int ext4_dax_pfn_mkwrite(struct vm_fault *vmf)
static const struct vm_operations_struct ext4_dax_vm_ops = { static const struct vm_operations_struct ext4_dax_vm_ops = {
.fault = ext4_dax_fault, .fault = ext4_dax_fault,
.huge_fault = ext4_dax_fault, .huge_fault = ext4_dax_huge_fault,
.page_mkwrite = ext4_dax_fault, .page_mkwrite = ext4_dax_fault,
.pfn_mkwrite = ext4_dax_pfn_mkwrite, .pfn_mkwrite = ext4_dax_pfn_mkwrite,
}; };
......
...@@ -1391,7 +1391,7 @@ xfs_filemap_page_mkwrite( ...@@ -1391,7 +1391,7 @@ xfs_filemap_page_mkwrite(
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
if (IS_DAX(inode)) { if (IS_DAX(inode)) {
ret = dax_iomap_fault(vmf, &xfs_iomap_ops); ret = dax_iomap_fault(vmf, PE_SIZE_PTE, &xfs_iomap_ops);
} else { } else {
ret = iomap_page_mkwrite(vmf, &xfs_iomap_ops); ret = iomap_page_mkwrite(vmf, &xfs_iomap_ops);
ret = block_page_mkwrite_return(ret); ret = block_page_mkwrite_return(ret);
...@@ -1418,7 +1418,7 @@ xfs_filemap_fault( ...@@ -1418,7 +1418,7 @@ xfs_filemap_fault(
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
if (IS_DAX(inode)) if (IS_DAX(inode))
ret = dax_iomap_fault(vmf, &xfs_iomap_ops); ret = dax_iomap_fault(vmf, PE_SIZE_PTE, &xfs_iomap_ops);
else else
ret = filemap_fault(vmf); ret = filemap_fault(vmf);
xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
...@@ -1435,7 +1435,8 @@ xfs_filemap_fault( ...@@ -1435,7 +1435,8 @@ xfs_filemap_fault(
*/ */
STATIC int STATIC int
xfs_filemap_huge_fault( xfs_filemap_huge_fault(
struct vm_fault *vmf) struct vm_fault *vmf,
enum page_entry_size pe_size)
{ {
struct inode *inode = file_inode(vmf->vma->vm_file); struct inode *inode = file_inode(vmf->vma->vm_file);
struct xfs_inode *ip = XFS_I(inode); struct xfs_inode *ip = XFS_I(inode);
...@@ -1452,7 +1453,7 @@ xfs_filemap_huge_fault( ...@@ -1452,7 +1453,7 @@ xfs_filemap_huge_fault(
} }
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
ret = dax_iomap_fault(vmf, &xfs_iomap_ops); ret = dax_iomap_fault(vmf, pe_size, &xfs_iomap_ops);
xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
if (vmf->flags & FAULT_FLAG_WRITE) if (vmf->flags & FAULT_FLAG_WRITE)
......
...@@ -38,7 +38,8 @@ static inline void *dax_radix_locked_entry(sector_t sector, unsigned long flags) ...@@ -38,7 +38,8 @@ static inline void *dax_radix_locked_entry(sector_t sector, unsigned long flags)
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops); const struct iomap_ops *ops);
int dax_iomap_fault(struct vm_fault *vmf, const struct iomap_ops *ops); int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
const struct iomap_ops *ops);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index); int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index);
int dax_invalidate_mapping_entry_sync(struct address_space *mapping, int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
......
...@@ -285,11 +285,6 @@ extern pgprot_t protection_map[16]; ...@@ -285,11 +285,6 @@ extern pgprot_t protection_map[16];
#define FAULT_FLAG_REMOTE 0x80 /* faulting for non current tsk/mm */ #define FAULT_FLAG_REMOTE 0x80 /* faulting for non current tsk/mm */
#define FAULT_FLAG_INSTRUCTION 0x100 /* The fault was during an instruction fetch */ #define FAULT_FLAG_INSTRUCTION 0x100 /* The fault was during an instruction fetch */
#define FAULT_FLAG_SIZE_MASK 0x7000 /* Support up to 8-level page tables */
#define FAULT_FLAG_SIZE_PTE 0x0000 /* First level (eg 4k) */
#define FAULT_FLAG_SIZE_PMD 0x1000 /* Second level (eg 2MB) */
#define FAULT_FLAG_SIZE_PUD 0x2000 /* Third level (eg 1GB) */
#define FAULT_FLAG_TRACE \ #define FAULT_FLAG_TRACE \
{ FAULT_FLAG_WRITE, "WRITE" }, \ { FAULT_FLAG_WRITE, "WRITE" }, \
{ FAULT_FLAG_MKWRITE, "MKWRITE" }, \ { FAULT_FLAG_MKWRITE, "MKWRITE" }, \
...@@ -349,6 +344,13 @@ struct vm_fault { ...@@ -349,6 +344,13 @@ struct vm_fault {
*/ */
}; };
/* page entry size for vm->huge_fault() */
enum page_entry_size {
PE_SIZE_PTE = 0,
PE_SIZE_PMD,
PE_SIZE_PUD,
};
/* /*
* These are the virtual MM functions - opening of an area, closing and * These are the virtual MM functions - opening of an area, closing and
* unmapping it (needed to keep files on disk up-to-date etc), pointer * unmapping it (needed to keep files on disk up-to-date etc), pointer
...@@ -359,7 +361,7 @@ struct vm_operations_struct { ...@@ -359,7 +361,7 @@ struct vm_operations_struct {
void (*close)(struct vm_area_struct * area); void (*close)(struct vm_area_struct * area);
int (*mremap)(struct vm_area_struct * area); int (*mremap)(struct vm_area_struct * area);
int (*fault)(struct vm_fault *vmf); int (*fault)(struct vm_fault *vmf);
int (*huge_fault)(struct vm_fault *vmf); int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size);
void (*map_pages)(struct vm_fault *vmf, void (*map_pages)(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff); pgoff_t start_pgoff, pgoff_t end_pgoff);
......
...@@ -3489,7 +3489,7 @@ static int create_huge_pmd(struct vm_fault *vmf) ...@@ -3489,7 +3489,7 @@ static int create_huge_pmd(struct vm_fault *vmf)
if (vma_is_anonymous(vmf->vma)) if (vma_is_anonymous(vmf->vma))
return do_huge_pmd_anonymous_page(vmf); return do_huge_pmd_anonymous_page(vmf);
if (vmf->vma->vm_ops->huge_fault) if (vmf->vma->vm_ops->huge_fault)
return vmf->vma->vm_ops->huge_fault(vmf); return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
return VM_FAULT_FALLBACK; return VM_FAULT_FALLBACK;
} }
...@@ -3498,7 +3498,7 @@ static int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd) ...@@ -3498,7 +3498,7 @@ static int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
if (vma_is_anonymous(vmf->vma)) if (vma_is_anonymous(vmf->vma))
return do_huge_pmd_wp_page(vmf, orig_pmd); return do_huge_pmd_wp_page(vmf, orig_pmd);
if (vmf->vma->vm_ops->huge_fault) if (vmf->vma->vm_ops->huge_fault)
return vmf->vma->vm_ops->huge_fault(vmf); return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
/* COW handled on pte level: split pmd */ /* COW handled on pte level: split pmd */
VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma); VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma);
...@@ -3519,7 +3519,7 @@ static int create_huge_pud(struct vm_fault *vmf) ...@@ -3519,7 +3519,7 @@ static int create_huge_pud(struct vm_fault *vmf)
if (vma_is_anonymous(vmf->vma)) if (vma_is_anonymous(vmf->vma))
return VM_FAULT_FALLBACK; return VM_FAULT_FALLBACK;
if (vmf->vma->vm_ops->huge_fault) if (vmf->vma->vm_ops->huge_fault)
return vmf->vma->vm_ops->huge_fault(vmf); return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
return VM_FAULT_FALLBACK; return VM_FAULT_FALLBACK;
} }
...@@ -3531,7 +3531,7 @@ static int wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud) ...@@ -3531,7 +3531,7 @@ static int wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
if (vma_is_anonymous(vmf->vma)) if (vma_is_anonymous(vmf->vma))
return VM_FAULT_FALLBACK; return VM_FAULT_FALLBACK;
if (vmf->vma->vm_ops->huge_fault) if (vmf->vma->vm_ops->huge_fault)
return vmf->vma->vm_ops->huge_fault(vmf); return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
return VM_FAULT_FALLBACK; return VM_FAULT_FALLBACK;
} }
...@@ -3659,7 +3659,6 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, ...@@ -3659,7 +3659,6 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
if (!vmf.pud) if (!vmf.pud)
return VM_FAULT_OOM; return VM_FAULT_OOM;
if (pud_none(*vmf.pud) && transparent_hugepage_enabled(vma)) { if (pud_none(*vmf.pud) && transparent_hugepage_enabled(vma)) {
vmf.flags |= FAULT_FLAG_SIZE_PUD;
ret = create_huge_pud(&vmf); ret = create_huge_pud(&vmf);
if (!(ret & VM_FAULT_FALLBACK)) if (!(ret & VM_FAULT_FALLBACK))
return ret; return ret;
...@@ -3670,8 +3669,6 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, ...@@ -3670,8 +3669,6 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) { if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
unsigned int dirty = flags & FAULT_FLAG_WRITE; unsigned int dirty = flags & FAULT_FLAG_WRITE;
vmf.flags |= FAULT_FLAG_SIZE_PUD;
/* NUMA case for anonymous PUDs would go here */ /* NUMA case for anonymous PUDs would go here */
if (dirty && !pud_write(orig_pud)) { if (dirty && !pud_write(orig_pud)) {
...@@ -3689,18 +3686,14 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, ...@@ -3689,18 +3686,14 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
if (!vmf.pmd) if (!vmf.pmd)
return VM_FAULT_OOM; return VM_FAULT_OOM;
if (pmd_none(*vmf.pmd) && transparent_hugepage_enabled(vma)) { if (pmd_none(*vmf.pmd) && transparent_hugepage_enabled(vma)) {
vmf.flags |= FAULT_FLAG_SIZE_PMD;
ret = create_huge_pmd(&vmf); ret = create_huge_pmd(&vmf);
if (!(ret & VM_FAULT_FALLBACK)) if (!(ret & VM_FAULT_FALLBACK))
return ret; return ret;
/* fall through path, remove PMD flag */
vmf.flags &= ~FAULT_FLAG_SIZE_PMD;
} else { } else {
pmd_t orig_pmd = *vmf.pmd; pmd_t orig_pmd = *vmf.pmd;
barrier(); barrier();
if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) { if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
vmf.flags |= FAULT_FLAG_SIZE_PMD;
if (pmd_protnone(orig_pmd) && vma_is_accessible(vma)) if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
return do_huge_pmd_numa_page(&vmf, orig_pmd); return do_huge_pmd_numa_page(&vmf, orig_pmd);
...@@ -3709,8 +3702,6 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, ...@@ -3709,8 +3702,6 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
ret = wp_huge_pmd(&vmf, orig_pmd); ret = wp_huge_pmd(&vmf, orig_pmd);
if (!(ret & VM_FAULT_FALLBACK)) if (!(ret & VM_FAULT_FALLBACK))
return ret; return ret;
/* fall through path, remove PUD flag */
vmf.flags &= ~FAULT_FLAG_SIZE_PUD;
} else { } else {
huge_pmd_set_accessed(&vmf, orig_pmd); huge_pmd_set_accessed(&vmf, orig_pmd);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment