Commit 7d64ae3a authored by Axel Rasmussen's avatar Axel Rasmussen Committed by Linus Torvalds

userfaultfd/shmem: modify shmem_mfill_atomic_pte to use install_pte()

In a previous commit, we added the mfill_atomic_install_pte() helper.
This helper does the job of setting up PTEs for an existing page, to map
it into a given VMA.  It deals with both the anon and shmem cases, as well
as the shared and private cases.

In other words, shmem_mfill_atomic_pte() duplicates a case it already
handles.  So, expose it, and let shmem_mfill_atomic_pte() use it directly,
to reduce code duplication.

This requires that we refactor shmem_mfill_atomic_pte() a bit:

Instead of doing accounting (shmem_recalc_inode() et al) part-way through
the PTE setup, do it afterward.  This frees up mfill_atomic_install_pte()
from having to care about this accounting, and means we don't need to e.g.
shmem_uncharge() in the error path.

A side effect is this switches shmem_mfill_atomic_pte() to use
lru_cache_add_inactive_or_unevictable() instead of just lru_cache_add().
This wrapper does some extra accounting in an exceptional case, if
appropriate, so it's actually the more correct thing to use.

Link: https://lkml.kernel.org/r/20210503180737.2487560-7-axelrasmussen@google.comSigned-off-by: default avatarAxel Rasmussen <axelrasmussen@google.com>
Reviewed-by: default avatarPeter Xu <peterx@redhat.com>
Acked-by: default avatarHugh Dickins <hughd@google.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Brian Geffon <bgeffon@google.com>
Cc: "Dr . David Alan Gilbert" <dgilbert@redhat.com>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: Joe Perches <joe@perches.com>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Lokesh Gidra <lokeshgidra@google.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: Mina Almasry <almasrymina@google.com>
Cc: Oliver Upton <oupton@google.com>
Cc: Shaohua Li <shli@fb.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Wang Qing <wangqing@vivo.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 964ab004
...@@ -53,6 +53,11 @@ enum mcopy_atomic_mode { ...@@ -53,6 +53,11 @@ enum mcopy_atomic_mode {
MCOPY_ATOMIC_CONTINUE, MCOPY_ATOMIC_CONTINUE,
}; };
extern int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr, struct page *page,
bool newly_allocated, bool wp_copy);
extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
unsigned long src_start, unsigned long len, unsigned long src_start, unsigned long len,
bool *mmap_changing, __u64 mode); bool *mmap_changing, __u64 mode);
......
...@@ -2376,14 +2376,11 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, ...@@ -2376,14 +2376,11 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
gfp_t gfp = mapping_gfp_mask(mapping); gfp_t gfp = mapping_gfp_mask(mapping);
pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
spinlock_t *ptl;
void *page_kaddr; void *page_kaddr;
struct page *page; struct page *page;
pte_t _dst_pte, *dst_pte;
int ret; int ret;
pgoff_t max_off; pgoff_t max_off;
ret = -ENOMEM;
if (!shmem_inode_acct_block(inode, 1)) { if (!shmem_inode_acct_block(inode, 1)) {
/* /*
* We may have got a page, returned -ENOENT triggering a retry, * We may have got a page, returned -ENOENT triggering a retry,
...@@ -2394,10 +2391,11 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, ...@@ -2394,10 +2391,11 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
put_page(*pagep); put_page(*pagep);
*pagep = NULL; *pagep = NULL;
} }
goto out; return -ENOMEM;
} }
if (!*pagep) { if (!*pagep) {
ret = -ENOMEM;
page = shmem_alloc_page(gfp, info, pgoff); page = shmem_alloc_page(gfp, info, pgoff);
if (!page) if (!page)
goto out_unacct_blocks; goto out_unacct_blocks;
...@@ -2412,9 +2410,9 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, ...@@ -2412,9 +2410,9 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
/* fallback to copy_from_user outside mmap_lock */ /* fallback to copy_from_user outside mmap_lock */
if (unlikely(ret)) { if (unlikely(ret)) {
*pagep = page; *pagep = page;
shmem_inode_unacct_blocks(inode, 1); ret = -ENOENT;
/* don't free the page */ /* don't free the page */
return -ENOENT; goto out_unacct_blocks;
} }
} else { /* ZEROPAGE */ } else { /* ZEROPAGE */
clear_highpage(page); clear_highpage(page);
...@@ -2440,32 +2438,10 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, ...@@ -2440,32 +2438,10 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
if (ret) if (ret)
goto out_release; goto out_release;
_dst_pte = mk_pte(page, dst_vma->vm_page_prot); ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
if (dst_vma->vm_flags & VM_WRITE) page, true, false);
_dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte)); if (ret)
else { goto out_delete_from_cache;
/*
* We don't set the pte dirty if the vma has no
* VM_WRITE permission, so mark the page dirty or it
* could be freed from under us. We could do it
* unconditionally before unlock_page(), but doing it
* only if VM_WRITE is not set is faster.
*/
set_page_dirty(page);
}
dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
ret = -EFAULT;
max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
if (unlikely(pgoff >= max_off))
goto out_release_unlock;
ret = -EEXIST;
if (!pte_none(*dst_pte))
goto out_release_unlock;
lru_cache_add(page);
spin_lock_irq(&info->lock); spin_lock_irq(&info->lock);
info->alloced++; info->alloced++;
...@@ -2473,27 +2449,17 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, ...@@ -2473,27 +2449,17 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
shmem_recalc_inode(inode); shmem_recalc_inode(inode);
spin_unlock_irq(&info->lock); spin_unlock_irq(&info->lock);
inc_mm_counter(dst_mm, mm_counter_file(page)); SetPageDirty(page);
page_add_file_rmap(page, false);
set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
/* No need to invalidate - it was non-present before */
update_mmu_cache(dst_vma, dst_addr, dst_pte);
pte_unmap_unlock(dst_pte, ptl);
unlock_page(page); unlock_page(page);
ret = 0; return 0;
out: out_delete_from_cache:
return ret;
out_release_unlock:
pte_unmap_unlock(dst_pte, ptl);
ClearPageDirty(page);
delete_from_page_cache(page); delete_from_page_cache(page);
out_release: out_release:
unlock_page(page); unlock_page(page);
put_page(page); put_page(page);
out_unacct_blocks: out_unacct_blocks:
shmem_inode_unacct_blocks(inode, 1); shmem_inode_unacct_blocks(inode, 1);
goto out; return ret;
} }
#endif /* CONFIG_USERFAULTFD */ #endif /* CONFIG_USERFAULTFD */
......
...@@ -51,18 +51,13 @@ struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm, ...@@ -51,18 +51,13 @@ struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm,
/* /*
* Install PTEs, to map dst_addr (within dst_vma) to page. * Install PTEs, to map dst_addr (within dst_vma) to page.
* *
* This function handles MCOPY_ATOMIC_CONTINUE (which is always file-backed), * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem
* whether or not dst_vma is VM_SHARED. It also handles the more general * and anon, and for both shared and private VMAs.
* MCOPY_ATOMIC_NORMAL case, when dst_vma is *not* VM_SHARED (it may be file
* backed, or not).
*
* Note that MCOPY_ATOMIC_NORMAL for a VM_SHARED dst_vma is handled by
* shmem_mcopy_atomic_pte instead.
*/ */
static int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
struct vm_area_struct *dst_vma, struct vm_area_struct *dst_vma,
unsigned long dst_addr, struct page *page, unsigned long dst_addr, struct page *page,
bool newly_allocated, bool wp_copy) bool newly_allocated, bool wp_copy)
{ {
int ret; int ret;
pte_t _dst_pte, *dst_pte; pte_t _dst_pte, *dst_pte;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment