Commit e1fb4a08 authored by Dave Jiang's avatar Dave Jiang Committed by Linus Torvalds

dax: remove VM_MIXEDMAP for fsdax and device dax

This patch is reworked from an earlier patch that Dan has posted:
https://patchwork.kernel.org/patch/10131727/

VM_MIXEDMAP is used by dax to direct mm paths like vm_normal_page() that
the memory page it is dealing with is not typical memory from the linear
map.  The get_user_pages_fast() path, since it does not resolve the vma,
is already using {pte,pmd}_devmap() as a stand-in for VM_MIXEDMAP, so we
use that as a VM_MIXEDMAP replacement in some locations.  In the cases
where there is no pte to consult we fallback to using vma_is_dax() to
detect the VM_MIXEDMAP special case.

Now that we have explicit driver pfn_t-flag opt-in/opt-out for
get_user_pages() support for DAX we can stop setting VM_MIXEDMAP.  This
also means we no longer need to worry about safely manipulating vm_flags
in a future where we support dynamically changing the dax mode of a
file.

DAX should also now be supported with madvise_behavior(), vma_merge(),
and copy_page_range().

This patch has been tested against ndctl unit test.  It has also been
tested against xfstests commit: 625515d using fake pmem created by
memmap and no additional issues have been observed.

Link: http://lkml.kernel.org/r/152847720311.55924.16999195879201817653.stgit@djiang5-desk3.ch.intel.comSigned-off-by: default avatarDave Jiang <dave.jiang@intel.com>
Acked-by: default avatarDan Williams <dan.j.williams@intel.com>
Cc: Jan Kara <jack@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e36488c8
...@@ -474,7 +474,7 @@ static int dax_mmap(struct file *filp, struct vm_area_struct *vma) ...@@ -474,7 +474,7 @@ static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
return rc; return rc;
vma->vm_ops = &dax_vm_ops; vma->vm_ops = &dax_vm_ops;
vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE; vma->vm_flags |= VM_HUGEPAGE;
return 0; return 0;
} }
......
...@@ -126,7 +126,6 @@ static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -126,7 +126,6 @@ static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma)
file_accessed(file); file_accessed(file);
vma->vm_ops = &ext2_dax_vm_ops; vma->vm_ops = &ext2_dax_vm_ops;
vma->vm_flags |= VM_MIXEDMAP;
return 0; return 0;
} }
#else #else
......
...@@ -374,7 +374,7 @@ static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -374,7 +374,7 @@ static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
file_accessed(file); file_accessed(file);
if (IS_DAX(file_inode(file))) { if (IS_DAX(file_inode(file))) {
vma->vm_ops = &ext4_dax_vm_ops; vma->vm_ops = &ext4_dax_vm_ops;
vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE; vma->vm_flags |= VM_HUGEPAGE;
} else { } else {
vma->vm_ops = &ext4_file_vm_ops; vma->vm_ops = &ext4_file_vm_ops;
} }
......
...@@ -1169,7 +1169,7 @@ xfs_file_mmap( ...@@ -1169,7 +1169,7 @@ xfs_file_mmap(
file_accessed(filp); file_accessed(filp);
vma->vm_ops = &xfs_file_vm_ops; vma->vm_ops = &xfs_file_vm_ops;
if (IS_DAX(file_inode(filp))) if (IS_DAX(file_inode(filp)))
vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE; vma->vm_flags |= VM_HUGEPAGE;
return 0; return 0;
} }
......
...@@ -676,7 +676,8 @@ int hmm_vma_get_pfns(struct hmm_range *range) ...@@ -676,7 +676,8 @@ int hmm_vma_get_pfns(struct hmm_range *range)
return -EINVAL; return -EINVAL;
/* FIXME support hugetlb fs */ /* FIXME support hugetlb fs */
if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) { if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) ||
vma_is_dax(vma)) {
hmm_pfns_special(range); hmm_pfns_special(range);
return -EINVAL; return -EINVAL;
} }
...@@ -849,7 +850,8 @@ int hmm_vma_fault(struct hmm_range *range, bool block) ...@@ -849,7 +850,8 @@ int hmm_vma_fault(struct hmm_range *range, bool block)
return -EINVAL; return -EINVAL;
/* FIXME support hugetlb fs */ /* FIXME support hugetlb fs */
if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) { if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) ||
vma_is_dax(vma)) {
hmm_pfns_special(range); hmm_pfns_special(range);
return -EINVAL; return -EINVAL;
} }
......
...@@ -762,11 +762,11 @@ int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, ...@@ -762,11 +762,11 @@ int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
* but we need to be consistent with PTEs and architectures that * but we need to be consistent with PTEs and architectures that
* can't support a 'special' bit. * can't support a 'special' bit.
*/ */
BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
!pfn_t_devmap(pfn));
BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
(VM_PFNMAP|VM_MIXEDMAP)); (VM_PFNMAP|VM_MIXEDMAP));
BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
BUG_ON(!pfn_t_devmap(pfn));
if (addr < vma->vm_start || addr >= vma->vm_end) if (addr < vma->vm_start || addr >= vma->vm_end)
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
......
...@@ -2430,6 +2430,9 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start, ...@@ -2430,6 +2430,9 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
VM_HUGETLB | VM_MIXEDMAP)) VM_HUGETLB | VM_MIXEDMAP))
return 0; /* just ignore the advice */ return 0; /* just ignore the advice */
if (vma_is_dax(vma))
return 0;
#ifdef VM_SAO #ifdef VM_SAO
if (*vm_flags & VM_SAO) if (*vm_flags & VM_SAO)
return 0; return 0;
......
...@@ -859,6 +859,10 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, ...@@ -859,6 +859,10 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
return NULL; return NULL;
} }
} }
if (pte_devmap(pte))
return NULL;
print_bad_pte(vma, addr, pte, NULL); print_bad_pte(vma, addr, pte, NULL);
return NULL; return NULL;
} }
...@@ -923,6 +927,8 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, ...@@ -923,6 +927,8 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
} }
} }
if (pmd_devmap(pmd))
return NULL;
if (is_zero_pfn(pfn)) if (is_zero_pfn(pfn))
return NULL; return NULL;
if (unlikely(pfn > highest_memmap_pfn)) if (unlikely(pfn > highest_memmap_pfn))
......
...@@ -2951,7 +2951,8 @@ int migrate_vma(const struct migrate_vma_ops *ops, ...@@ -2951,7 +2951,8 @@ int migrate_vma(const struct migrate_vma_ops *ops,
/* Sanity check the arguments */ /* Sanity check the arguments */
start &= PAGE_MASK; start &= PAGE_MASK;
end &= PAGE_MASK; end &= PAGE_MASK;
if (!vma || is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) if (!vma || is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) ||
vma_is_dax(vma))
return -EINVAL; return -EINVAL;
if (start < vma->vm_start || start >= vma->vm_end) if (start < vma->vm_start || start >= vma->vm_end)
return -EINVAL; return -EINVAL;
......
...@@ -527,7 +527,8 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, ...@@ -527,7 +527,8 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
vm_flags_t old_flags = vma->vm_flags; vm_flags_t old_flags = vma->vm_flags;
if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) || if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm)) is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) ||
vma_is_dax(vma))
/* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */ /* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */
goto out; goto out;
......
...@@ -1796,11 +1796,12 @@ unsigned long mmap_region(struct file *file, unsigned long addr, ...@@ -1796,11 +1796,12 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT); vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
if (vm_flags & VM_LOCKED) { if (vm_flags & VM_LOCKED) {
if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) || if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
vma == get_gate_vma(current->mm))) is_vm_hugetlb_page(vma) ||
mm->locked_vm += (len >> PAGE_SHIFT); vma == get_gate_vma(current->mm))
else
vma->vm_flags &= VM_LOCKED_CLEAR_MASK; vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
else
mm->locked_vm += (len >> PAGE_SHIFT);
} }
if (file) if (file)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment