Commit 162f73f4 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "5 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm: correct the comment when reclaimed pages exceed the scanned pages
  userfaultfd: shmem: handle coredumping in handle_userfault()
  mm: numa: avoid waiting on freed migrated pages
  swap: cond_resched in swap_cgroup_prepare()
  mm/memory-failure.c: use compound_head() flags for huge pages
parents 5ac447d2 d7143e31
...@@ -340,9 +340,28 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) ...@@ -340,9 +340,28 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
bool must_wait, return_to_userland; bool must_wait, return_to_userland;
long blocking_state; long blocking_state;
BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
ret = VM_FAULT_SIGBUS; ret = VM_FAULT_SIGBUS;
/*
* We don't do userfault handling for the final child pid update.
*
* We also don't do userfault handling during
* coredumping. hugetlbfs has the special
* follow_hugetlb_page() to skip missing pages in the
* FOLL_DUMP case, anon memory also checks for FOLL_DUMP with
* the no_page_table() helper in follow_page_mask(), but the
* shmem_vm_ops->fault method is invoked even during
* coredumping without mmap_sem and it ends up here.
*/
if (current->flags & (PF_EXITING|PF_DUMPCORE))
goto out;
/*
* Coredumping runs without mmap_sem so we can only check that
* the mmap_sem is held, if PF_DUMPCORE was not set.
*/
WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem));
ctx = vmf->vma->vm_userfaultfd_ctx.ctx; ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
if (!ctx) if (!ctx)
goto out; goto out;
...@@ -360,12 +379,6 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) ...@@ -360,12 +379,6 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
if (unlikely(ACCESS_ONCE(ctx->released))) if (unlikely(ACCESS_ONCE(ctx->released)))
goto out; goto out;
/*
* We don't do userfault handling for the final child pid update.
*/
if (current->flags & PF_EXITING)
goto out;
/* /*
* Check that we can return VM_FAULT_RETRY. * Check that we can return VM_FAULT_RETRY.
* *
......
...@@ -1426,8 +1426,11 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd) ...@@ -1426,8 +1426,11 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
*/ */
if (unlikely(pmd_trans_migrating(*vmf->pmd))) { if (unlikely(pmd_trans_migrating(*vmf->pmd))) {
page = pmd_page(*vmf->pmd); page = pmd_page(*vmf->pmd);
if (!get_page_unless_zero(page))
goto out_unlock;
spin_unlock(vmf->ptl); spin_unlock(vmf->ptl);
wait_on_page_locked(page); wait_on_page_locked(page);
put_page(page);
goto out; goto out;
} }
...@@ -1459,9 +1462,12 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd) ...@@ -1459,9 +1462,12 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
/* Migration could have started since the pmd_trans_migrating check */ /* Migration could have started since the pmd_trans_migrating check */
if (!page_locked) { if (!page_locked) {
page_nid = -1;
if (!get_page_unless_zero(page))
goto out_unlock;
spin_unlock(vmf->ptl); spin_unlock(vmf->ptl);
wait_on_page_locked(page); wait_on_page_locked(page);
page_nid = -1; put_page(page);
goto out; goto out;
} }
......
...@@ -1184,7 +1184,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags) ...@@ -1184,7 +1184,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
* page_remove_rmap() in try_to_unmap_one(). So to determine page status * page_remove_rmap() in try_to_unmap_one(). So to determine page status
* correctly, we save a copy of the page flags at this time. * correctly, we save a copy of the page flags at this time.
*/ */
page_flags = p->flags; if (PageHuge(p))
page_flags = hpage->flags;
else
page_flags = p->flags;
/* /*
* unpoison always clear PG_hwpoison inside page lock * unpoison always clear PG_hwpoison inside page lock
......
...@@ -48,6 +48,9 @@ static int swap_cgroup_prepare(int type) ...@@ -48,6 +48,9 @@ static int swap_cgroup_prepare(int type)
if (!page) if (!page)
goto not_enough_page; goto not_enough_page;
ctrl->map[idx] = page; ctrl->map[idx] = page;
if (!(idx % SWAP_CLUSTER_MAX))
cond_resched();
} }
return 0; return 0;
not_enough_page: not_enough_page:
......
...@@ -115,9 +115,9 @@ static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned, ...@@ -115,9 +115,9 @@ static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned,
unsigned long pressure = 0; unsigned long pressure = 0;
/* /*
* reclaimed can be greater than scanned in cases * reclaimed can be greater than scanned for things such as reclaimed
* like THP, where the scanned is 1 and reclaimed * slab pages. shrink_node() just adds reclaimed pages without a
* could be 512 * related increment to scanned pages.
*/ */
if (reclaimed >= scanned) if (reclaimed >= scanned)
goto out; goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment