Commit 4fb07ee6 authored by Wei Yang's avatar Wei Yang Committed by Linus Torvalds

userfaultfd: use vma_pagesize for all huge page size calculation

In __mcopy_atomic_hugetlb() we use two variables to deal with huge page
size: vma_hpagesize and huge_page_size.

Since they are the same, it is not necessary to use two different
mechanism. This patch makes it consistent by all using vma_hpagesize.

Link: http://lkml.kernel.org/r/20190927070032.2129-1-richardw.yang@linux.intel.comSigned-off-by: default avatarWei Yang <richardw.yang@linux.intel.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent df6c6500
......@@ -259,7 +259,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
pte_t dst_pteval;
BUG_ON(dst_addr >= dst_start + len);
VM_BUG_ON(dst_addr & ~huge_page_mask(h));
VM_BUG_ON(dst_addr & (vma_hpagesize - 1));
/*
* Serialize via hugetlb_fault_mutex
......@@ -270,7 +270,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
mutex_lock(&hugetlb_fault_mutex_table[hash]);
err = -ENOMEM;
dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
dst_pte = huge_pte_alloc(dst_mm, dst_addr, vma_hpagesize);
if (!dst_pte) {
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
goto out_unlock;
......@@ -297,7 +297,8 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
err = copy_huge_page_from_user(page,
(const void __user *)src_addr,
pages_per_huge_page(h), true);
vma_hpagesize / PAGE_SIZE,
true);
if (unlikely(err)) {
err = -EFAULT;
goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment