Commit 61c50040 authored by Axel Rasmussen's avatar Axel Rasmussen Committed by Andrew Morton

mm: userfaultfd: don't pass around both mm and vma

Quite a few userfaultfd functions took both mm and vma pointers as
arguments.  Since the mm is trivially accessible via vma->vm_mm, there's
no reason to pass both; it just needlessly extends the already long
argument list.

Get rid of the mm pointer, where possible, to shorten the argument list.

Link: https://lkml.kernel.org/r/20230314221250.682452-3-axelrasmussen@google.comSigned-off-by: default avatarAxel Rasmussen <axelrasmussen@google.com>
Acked-by: default avatarPeter Xu <peterx@redhat.com>
Acked-by: default avatarMike Rapoport (IBM) <rppt@kernel.org>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Hugh Dickins <hughd@google.com>
Cc: James Houghton <jthoughton@google.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Nadav Amit <namit@vmware.com>
Cc: Shuah Khan <shuah@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent a734991c
...@@ -1644,7 +1644,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, ...@@ -1644,7 +1644,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
/* Reset ptes for the whole vma range if wr-protected */ /* Reset ptes for the whole vma range if wr-protected */
if (userfaultfd_wp(vma)) if (userfaultfd_wp(vma))
uffd_wp_range(mm, vma, start, vma_end - start, false); uffd_wp_range(vma, start, vma_end - start, false);
new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS; new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
prev = vma_merge(&vmi, mm, prev, start, vma_end, new_flags, prev = vma_merge(&vmi, mm, prev, start, vma_end, new_flags,
......
...@@ -158,7 +158,7 @@ unsigned long hugetlb_total_pages(void); ...@@ -158,7 +158,7 @@ unsigned long hugetlb_total_pages(void);
vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags); unsigned long address, unsigned int flags);
#ifdef CONFIG_USERFAULTFD #ifdef CONFIG_USERFAULTFD
int hugetlb_mfill_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte, int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
struct vm_area_struct *dst_vma, struct vm_area_struct *dst_vma,
unsigned long dst_addr, unsigned long dst_addr,
unsigned long src_addr, unsigned long src_addr,
...@@ -393,8 +393,7 @@ static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, ...@@ -393,8 +393,7 @@ static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
} }
#ifdef CONFIG_USERFAULTFD #ifdef CONFIG_USERFAULTFD
static inline int hugetlb_mfill_atomic_pte(struct mm_struct *dst_mm, static inline int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
pte_t *dst_pte,
struct vm_area_struct *dst_vma, struct vm_area_struct *dst_vma,
unsigned long dst_addr, unsigned long dst_addr,
unsigned long src_addr, unsigned long src_addr,
......
...@@ -152,14 +152,14 @@ extern void shmem_uncharge(struct inode *inode, long pages); ...@@ -152,14 +152,14 @@ extern void shmem_uncharge(struct inode *inode, long pages);
#ifdef CONFIG_USERFAULTFD #ifdef CONFIG_USERFAULTFD
#ifdef CONFIG_SHMEM #ifdef CONFIG_SHMEM
extern int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, extern int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
struct vm_area_struct *dst_vma, struct vm_area_struct *dst_vma,
unsigned long dst_addr, unsigned long dst_addr,
unsigned long src_addr, unsigned long src_addr,
bool zeropage, bool wp_copy, bool zeropage, bool wp_copy,
struct page **pagep); struct page **pagep);
#else /* !CONFIG_SHMEM */ #else /* !CONFIG_SHMEM */
#define shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, \ #define shmem_mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, \
src_addr, zeropage, wp_copy, pagep) ({ BUG(); 0; }) src_addr, zeropage, wp_copy, pagep) ({ BUG(); 0; })
#endif /* CONFIG_SHMEM */ #endif /* CONFIG_SHMEM */
#endif /* CONFIG_USERFAULTFD */ #endif /* CONFIG_USERFAULTFD */
......
...@@ -56,7 +56,7 @@ enum mcopy_atomic_mode { ...@@ -56,7 +56,7 @@ enum mcopy_atomic_mode {
MCOPY_ATOMIC_CONTINUE, MCOPY_ATOMIC_CONTINUE,
}; };
extern int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, extern int mfill_atomic_install_pte(pmd_t *dst_pmd,
struct vm_area_struct *dst_vma, struct vm_area_struct *dst_vma,
unsigned long dst_addr, struct page *page, unsigned long dst_addr, struct page *page,
bool newly_allocated, bool wp_copy); bool newly_allocated, bool wp_copy);
...@@ -73,7 +73,7 @@ extern ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long dst ...@@ -73,7 +73,7 @@ extern ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long dst
extern int mwriteprotect_range(struct mm_struct *dst_mm, extern int mwriteprotect_range(struct mm_struct *dst_mm,
unsigned long start, unsigned long len, unsigned long start, unsigned long len,
bool enable_wp, atomic_t *mmap_changing); bool enable_wp, atomic_t *mmap_changing);
extern long uffd_wp_range(struct mm_struct *dst_mm, struct vm_area_struct *vma, extern long uffd_wp_range(struct vm_area_struct *vma,
unsigned long start, unsigned long len, bool enable_wp); unsigned long start, unsigned long len, bool enable_wp);
/* mm helpers */ /* mm helpers */
......
...@@ -6159,8 +6159,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -6159,8 +6159,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte * Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte
* with modifications for hugetlb pages. * with modifications for hugetlb pages.
*/ */
int hugetlb_mfill_atomic_pte(struct mm_struct *dst_mm, int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
pte_t *dst_pte,
struct vm_area_struct *dst_vma, struct vm_area_struct *dst_vma,
unsigned long dst_addr, unsigned long dst_addr,
unsigned long src_addr, unsigned long src_addr,
...@@ -6168,6 +6167,7 @@ int hugetlb_mfill_atomic_pte(struct mm_struct *dst_mm, ...@@ -6168,6 +6167,7 @@ int hugetlb_mfill_atomic_pte(struct mm_struct *dst_mm,
struct page **pagep, struct page **pagep,
bool wp_copy) bool wp_copy)
{ {
struct mm_struct *dst_mm = dst_vma->vm_mm;
bool is_continue = (mode == MCOPY_ATOMIC_CONTINUE); bool is_continue = (mode == MCOPY_ATOMIC_CONTINUE);
struct hstate *h = hstate_vma(dst_vma); struct hstate *h = hstate_vma(dst_vma);
struct address_space *mapping = dst_vma->vm_file->f_mapping; struct address_space *mapping = dst_vma->vm_file->f_mapping;
......
...@@ -2429,8 +2429,7 @@ static struct inode *shmem_get_inode(struct mnt_idmap *idmap, struct super_block ...@@ -2429,8 +2429,7 @@ static struct inode *shmem_get_inode(struct mnt_idmap *idmap, struct super_block
} }
#ifdef CONFIG_USERFAULTFD #ifdef CONFIG_USERFAULTFD
int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
pmd_t *dst_pmd,
struct vm_area_struct *dst_vma, struct vm_area_struct *dst_vma,
unsigned long dst_addr, unsigned long dst_addr,
unsigned long src_addr, unsigned long src_addr,
...@@ -2520,11 +2519,11 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, ...@@ -2520,11 +2519,11 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
goto out_release; goto out_release;
ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL,
gfp & GFP_RECLAIM_MASK, dst_mm); gfp & GFP_RECLAIM_MASK, dst_vma->vm_mm);
if (ret) if (ret)
goto out_release; goto out_release;
ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr, ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
&folio->page, true, wp_copy); &folio->page, true, wp_copy);
if (ret) if (ret)
goto out_delete_from_cache; goto out_delete_from_cache;
......
...@@ -55,12 +55,13 @@ struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm, ...@@ -55,12 +55,13 @@ struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm,
* This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem
* and anon, and for both shared and private VMAs. * and anon, and for both shared and private VMAs.
*/ */
int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, int mfill_atomic_install_pte(pmd_t *dst_pmd,
struct vm_area_struct *dst_vma, struct vm_area_struct *dst_vma,
unsigned long dst_addr, struct page *page, unsigned long dst_addr, struct page *page,
bool newly_allocated, bool wp_copy) bool newly_allocated, bool wp_copy)
{ {
int ret; int ret;
struct mm_struct *dst_mm = dst_vma->vm_mm;
pte_t _dst_pte, *dst_pte; pte_t _dst_pte, *dst_pte;
bool writable = dst_vma->vm_flags & VM_WRITE; bool writable = dst_vma->vm_flags & VM_WRITE;
bool vm_shared = dst_vma->vm_flags & VM_SHARED; bool vm_shared = dst_vma->vm_flags & VM_SHARED;
...@@ -127,8 +128,7 @@ int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, ...@@ -127,8 +128,7 @@ int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
return ret; return ret;
} }
static int mfill_atomic_pte_copy(struct mm_struct *dst_mm, static int mfill_atomic_pte_copy(pmd_t *dst_pmd,
pmd_t *dst_pmd,
struct vm_area_struct *dst_vma, struct vm_area_struct *dst_vma,
unsigned long dst_addr, unsigned long dst_addr,
unsigned long src_addr, unsigned long src_addr,
...@@ -190,10 +190,10 @@ static int mfill_atomic_pte_copy(struct mm_struct *dst_mm, ...@@ -190,10 +190,10 @@ static int mfill_atomic_pte_copy(struct mm_struct *dst_mm,
__SetPageUptodate(page); __SetPageUptodate(page);
ret = -ENOMEM; ret = -ENOMEM;
if (mem_cgroup_charge(page_folio(page), dst_mm, GFP_KERNEL)) if (mem_cgroup_charge(page_folio(page), dst_vma->vm_mm, GFP_KERNEL))
goto out_release; goto out_release;
ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr, ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
page, true, wp_copy); page, true, wp_copy);
if (ret) if (ret)
goto out_release; goto out_release;
...@@ -204,8 +204,7 @@ static int mfill_atomic_pte_copy(struct mm_struct *dst_mm, ...@@ -204,8 +204,7 @@ static int mfill_atomic_pte_copy(struct mm_struct *dst_mm,
goto out; goto out;
} }
static int mfill_atomic_pte_zeropage(struct mm_struct *dst_mm, static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd,
pmd_t *dst_pmd,
struct vm_area_struct *dst_vma, struct vm_area_struct *dst_vma,
unsigned long dst_addr) unsigned long dst_addr)
{ {
...@@ -217,7 +216,7 @@ static int mfill_atomic_pte_zeropage(struct mm_struct *dst_mm, ...@@ -217,7 +216,7 @@ static int mfill_atomic_pte_zeropage(struct mm_struct *dst_mm,
_dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr), _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
dst_vma->vm_page_prot)); dst_vma->vm_page_prot));
dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); dst_pte = pte_offset_map_lock(dst_vma->vm_mm, dst_pmd, dst_addr, &ptl);
if (dst_vma->vm_file) { if (dst_vma->vm_file) {
/* the shmem MAP_PRIVATE case requires checking the i_size */ /* the shmem MAP_PRIVATE case requires checking the i_size */
inode = dst_vma->vm_file->f_inode; inode = dst_vma->vm_file->f_inode;
...@@ -230,7 +229,7 @@ static int mfill_atomic_pte_zeropage(struct mm_struct *dst_mm, ...@@ -230,7 +229,7 @@ static int mfill_atomic_pte_zeropage(struct mm_struct *dst_mm,
ret = -EEXIST; ret = -EEXIST;
if (!pte_none(*dst_pte)) if (!pte_none(*dst_pte))
goto out_unlock; goto out_unlock;
set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); set_pte_at(dst_vma->vm_mm, dst_addr, dst_pte, _dst_pte);
/* No need to invalidate - it was non-present before */ /* No need to invalidate - it was non-present before */
update_mmu_cache(dst_vma, dst_addr, dst_pte); update_mmu_cache(dst_vma, dst_addr, dst_pte);
ret = 0; ret = 0;
...@@ -240,8 +239,7 @@ static int mfill_atomic_pte_zeropage(struct mm_struct *dst_mm, ...@@ -240,8 +239,7 @@ static int mfill_atomic_pte_zeropage(struct mm_struct *dst_mm,
} }
/* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */ /* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */
static int mfill_atomic_pte_continue(struct mm_struct *dst_mm, static int mfill_atomic_pte_continue(pmd_t *dst_pmd,
pmd_t *dst_pmd,
struct vm_area_struct *dst_vma, struct vm_area_struct *dst_vma,
unsigned long dst_addr, unsigned long dst_addr,
bool wp_copy) bool wp_copy)
...@@ -269,7 +267,7 @@ static int mfill_atomic_pte_continue(struct mm_struct *dst_mm, ...@@ -269,7 +267,7 @@ static int mfill_atomic_pte_continue(struct mm_struct *dst_mm,
goto out_release; goto out_release;
} }
ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr, ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
page, false, wp_copy); page, false, wp_copy);
if (ret) if (ret)
goto out_release; goto out_release;
...@@ -310,7 +308,7 @@ static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) ...@@ -310,7 +308,7 @@ static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
* mfill_atomic processing for HUGETLB vmas. Note that this routine is * mfill_atomic processing for HUGETLB vmas. Note that this routine is
* called with mmap_lock held, it will release mmap_lock before returning. * called with mmap_lock held, it will release mmap_lock before returning.
*/ */
static __always_inline ssize_t mfill_atomic_hugetlb(struct mm_struct *dst_mm, static __always_inline ssize_t mfill_atomic_hugetlb(
struct vm_area_struct *dst_vma, struct vm_area_struct *dst_vma,
unsigned long dst_start, unsigned long dst_start,
unsigned long src_start, unsigned long src_start,
...@@ -318,6 +316,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(struct mm_struct *dst_mm, ...@@ -318,6 +316,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(struct mm_struct *dst_mm,
enum mcopy_atomic_mode mode, enum mcopy_atomic_mode mode,
bool wp_copy) bool wp_copy)
{ {
struct mm_struct *dst_mm = dst_vma->vm_mm;
int vm_shared = dst_vma->vm_flags & VM_SHARED; int vm_shared = dst_vma->vm_flags & VM_SHARED;
ssize_t err; ssize_t err;
pte_t *dst_pte; pte_t *dst_pte;
...@@ -411,7 +410,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(struct mm_struct *dst_mm, ...@@ -411,7 +410,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(struct mm_struct *dst_mm,
goto out_unlock; goto out_unlock;
} }
err = hugetlb_mfill_atomic_pte(dst_mm, dst_pte, dst_vma, err = hugetlb_mfill_atomic_pte(dst_pte, dst_vma,
dst_addr, src_addr, mode, &page, dst_addr, src_addr, mode, &page,
wp_copy); wp_copy);
...@@ -463,8 +462,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(struct mm_struct *dst_mm, ...@@ -463,8 +462,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(struct mm_struct *dst_mm,
} }
#else /* !CONFIG_HUGETLB_PAGE */ #else /* !CONFIG_HUGETLB_PAGE */
/* fail at build time if gcc attempts to use this */ /* fail at build time if gcc attempts to use this */
extern ssize_t mfill_atomic_hugetlb(struct mm_struct *dst_mm, extern ssize_t mfill_atomic_hugetlb(struct vm_area_struct *dst_vma,
struct vm_area_struct *dst_vma,
unsigned long dst_start, unsigned long dst_start,
unsigned long src_start, unsigned long src_start,
unsigned long len, unsigned long len,
...@@ -472,8 +470,7 @@ extern ssize_t mfill_atomic_hugetlb(struct mm_struct *dst_mm, ...@@ -472,8 +470,7 @@ extern ssize_t mfill_atomic_hugetlb(struct mm_struct *dst_mm,
bool wp_copy); bool wp_copy);
#endif /* CONFIG_HUGETLB_PAGE */ #endif /* CONFIG_HUGETLB_PAGE */
static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm, static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd,
pmd_t *dst_pmd,
struct vm_area_struct *dst_vma, struct vm_area_struct *dst_vma,
unsigned long dst_addr, unsigned long dst_addr,
unsigned long src_addr, unsigned long src_addr,
...@@ -484,7 +481,7 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm, ...@@ -484,7 +481,7 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
ssize_t err; ssize_t err;
if (mode == MCOPY_ATOMIC_CONTINUE) { if (mode == MCOPY_ATOMIC_CONTINUE) {
return mfill_atomic_pte_continue(dst_mm, dst_pmd, dst_vma, return mfill_atomic_pte_continue(dst_pmd, dst_vma,
dst_addr, wp_copy); dst_addr, wp_copy);
} }
...@@ -500,14 +497,14 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm, ...@@ -500,14 +497,14 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
*/ */
if (!(dst_vma->vm_flags & VM_SHARED)) { if (!(dst_vma->vm_flags & VM_SHARED)) {
if (mode == MCOPY_ATOMIC_NORMAL) if (mode == MCOPY_ATOMIC_NORMAL)
err = mfill_atomic_pte_copy(dst_mm, dst_pmd, dst_vma, err = mfill_atomic_pte_copy(dst_pmd, dst_vma,
dst_addr, src_addr, page, dst_addr, src_addr, page,
wp_copy); wp_copy);
else else
err = mfill_atomic_pte_zeropage(dst_mm, dst_pmd, err = mfill_atomic_pte_zeropage(dst_pmd,
dst_vma, dst_addr); dst_vma, dst_addr);
} else { } else {
err = shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, err = shmem_mfill_atomic_pte(dst_pmd, dst_vma,
dst_addr, src_addr, dst_addr, src_addr,
mode != MCOPY_ATOMIC_NORMAL, mode != MCOPY_ATOMIC_NORMAL,
wp_copy, page); wp_copy, page);
...@@ -588,7 +585,7 @@ static __always_inline ssize_t mfill_atomic(struct mm_struct *dst_mm, ...@@ -588,7 +585,7 @@ static __always_inline ssize_t mfill_atomic(struct mm_struct *dst_mm,
* If this is a HUGETLB vma, pass off to appropriate routine * If this is a HUGETLB vma, pass off to appropriate routine
*/ */
if (is_vm_hugetlb_page(dst_vma)) if (is_vm_hugetlb_page(dst_vma))
return mfill_atomic_hugetlb(dst_mm, dst_vma, dst_start, return mfill_atomic_hugetlb(dst_vma, dst_start,
src_start, len, mcopy_mode, src_start, len, mcopy_mode,
wp_copy); wp_copy);
...@@ -641,7 +638,7 @@ static __always_inline ssize_t mfill_atomic(struct mm_struct *dst_mm, ...@@ -641,7 +638,7 @@ static __always_inline ssize_t mfill_atomic(struct mm_struct *dst_mm,
BUG_ON(pmd_none(*dst_pmd)); BUG_ON(pmd_none(*dst_pmd));
BUG_ON(pmd_trans_huge(*dst_pmd)); BUG_ON(pmd_trans_huge(*dst_pmd));
err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, err = mfill_atomic_pte(dst_pmd, dst_vma, dst_addr,
src_addr, &page, mcopy_mode, wp_copy); src_addr, &page, mcopy_mode, wp_copy);
cond_resched(); cond_resched();
...@@ -710,7 +707,7 @@ ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long start, ...@@ -710,7 +707,7 @@ ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long start,
mmap_changing, 0); mmap_changing, 0);
} }
long uffd_wp_range(struct mm_struct *dst_mm, struct vm_area_struct *dst_vma, long uffd_wp_range(struct vm_area_struct *dst_vma,
unsigned long start, unsigned long len, bool enable_wp) unsigned long start, unsigned long len, bool enable_wp)
{ {
unsigned int mm_cp_flags; unsigned int mm_cp_flags;
...@@ -732,7 +729,7 @@ long uffd_wp_range(struct mm_struct *dst_mm, struct vm_area_struct *dst_vma, ...@@ -732,7 +729,7 @@ long uffd_wp_range(struct mm_struct *dst_mm, struct vm_area_struct *dst_vma,
*/ */
if (!enable_wp && vma_wants_manual_pte_write_upgrade(dst_vma)) if (!enable_wp && vma_wants_manual_pte_write_upgrade(dst_vma))
mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE; mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE;
tlb_gather_mmu(&tlb, dst_mm); tlb_gather_mmu(&tlb, dst_vma->vm_mm);
ret = change_protection(&tlb, dst_vma, start, start + len, mm_cp_flags); ret = change_protection(&tlb, dst_vma, start, start + len, mm_cp_flags);
tlb_finish_mmu(&tlb); tlb_finish_mmu(&tlb);
...@@ -788,7 +785,7 @@ int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start, ...@@ -788,7 +785,7 @@ int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
_start = max(dst_vma->vm_start, start); _start = max(dst_vma->vm_start, start);
_end = min(dst_vma->vm_end, end); _end = min(dst_vma->vm_end, end);
err = uffd_wp_range(dst_mm, dst_vma, _start, _end - _start, enable_wp); err = uffd_wp_range(dst_vma, _start, _end - _start, enable_wp);
/* Return 0 on success, <0 on failures */ /* Return 0 on success, <0 on failures */
if (err < 0) if (err < 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment