Commit 7d5b3bfa authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds

mm: /proc/pid/clear_refs: avoid split_huge_page()

Currently pagewalker splits all THP pages on any clear_refs request.  It's
not necessary.  We can handle this on PMD level.

One side effect is that soft dirty will potentially see more dirty memory,
since we will mark whole THP page dirty at once.

Sanity checked with CRIU test suite. More testing is required.
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Reviewed-by: default avatarCyrill Gorcunov <gorcunov@openvz.org>
Cc: Pavel Emelyanov <xemul@parallels.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 48684a65
...@@ -739,10 +739,10 @@ struct clear_refs_private { ...@@ -739,10 +739,10 @@ struct clear_refs_private {
enum clear_refs_types type; enum clear_refs_types type;
}; };
#ifdef CONFIG_MEM_SOFT_DIRTY
static inline void clear_soft_dirty(struct vm_area_struct *vma, static inline void clear_soft_dirty(struct vm_area_struct *vma,
unsigned long addr, pte_t *pte) unsigned long addr, pte_t *pte)
{ {
#ifdef CONFIG_MEM_SOFT_DIRTY
/* /*
* The soft-dirty tracker uses #PF-s to catch writes * The soft-dirty tracker uses #PF-s to catch writes
* to pages, so write-protect the pte as well. See the * to pages, so write-protect the pte as well. See the
...@@ -759,9 +759,35 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma, ...@@ -759,9 +759,35 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
} }
set_pte_at(vma->vm_mm, addr, pte, ptent); set_pte_at(vma->vm_mm, addr, pte, ptent);
#endif
} }
static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp)
{
pmd_t pmd = *pmdp;
pmd = pmd_wrprotect(pmd);
pmd = pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
if (vma->vm_flags & VM_SOFTDIRTY)
vma->vm_flags &= ~VM_SOFTDIRTY;
set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
}
#else
static inline void clear_soft_dirty(struct vm_area_struct *vma,
unsigned long addr, pte_t *pte)
{
}
static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp)
{
}
#endif
static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk) unsigned long end, struct mm_walk *walk)
{ {
...@@ -771,7 +797,22 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, ...@@ -771,7 +797,22 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
spinlock_t *ptl; spinlock_t *ptl;
struct page *page; struct page *page;
split_huge_page_pmd(vma, addr, pmd); if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
clear_soft_dirty_pmd(vma, addr, pmd);
goto out;
}
page = pmd_page(*pmd);
/* Clear accessed and referenced bits. */
pmdp_test_and_clear_young(vma, addr, pmd);
ClearPageReferenced(page);
out:
spin_unlock(ptl);
return 0;
}
if (pmd_trans_unstable(pmd)) if (pmd_trans_unstable(pmd))
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment