Commit 5c64f52a authored by Naoya Horiguchi's avatar Naoya Horiguchi Committed by Linus Torvalds

clear_refs: remove clear_refs_private->vma and introduce clear_refs_test_walk()

clear_refs_write() has some prechecks to determine if we really walk over
a given vma.  Now we have a test_walk() callback to filter vmas, so let's
utilize it.
Signed-off-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Pavel Emelyanov <xemul@parallels.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 14eb6fdd
...@@ -736,7 +736,6 @@ enum clear_refs_types { ...@@ -736,7 +736,6 @@ enum clear_refs_types {
}; };
struct clear_refs_private { struct clear_refs_private {
struct vm_area_struct *vma;
enum clear_refs_types type; enum clear_refs_types type;
}; };
...@@ -767,7 +766,7 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, ...@@ -767,7 +766,7 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk) unsigned long end, struct mm_walk *walk)
{ {
struct clear_refs_private *cp = walk->private; struct clear_refs_private *cp = walk->private;
struct vm_area_struct *vma = cp->vma; struct vm_area_struct *vma = walk->vma;
pte_t *pte, ptent; pte_t *pte, ptent;
spinlock_t *ptl; spinlock_t *ptl;
struct page *page; struct page *page;
...@@ -801,6 +800,25 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, ...@@ -801,6 +800,25 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
return 0; return 0;
} }
static int clear_refs_test_walk(unsigned long start, unsigned long end,
struct mm_walk *walk)
{
struct clear_refs_private *cp = walk->private;
struct vm_area_struct *vma = walk->vma;
/*
* Writing 1 to /proc/pid/clear_refs affects all pages.
* Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
* Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
* Writing 4 to /proc/pid/clear_refs affects all pages.
*/
if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
return 1;
if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
return 1;
return 0;
}
static ssize_t clear_refs_write(struct file *file, const char __user *buf, static ssize_t clear_refs_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
...@@ -841,6 +859,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, ...@@ -841,6 +859,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
}; };
struct mm_walk clear_refs_walk = { struct mm_walk clear_refs_walk = {
.pmd_entry = clear_refs_pte_range, .pmd_entry = clear_refs_pte_range,
.test_walk = clear_refs_test_walk,
.mm = mm, .mm = mm,
.private = &cp, .private = &cp,
}; };
...@@ -860,28 +879,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, ...@@ -860,28 +879,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
} }
mmu_notifier_invalidate_range_start(mm, 0, -1); mmu_notifier_invalidate_range_start(mm, 0, -1);
} }
for (vma = mm->mmap; vma; vma = vma->vm_next) { walk_page_range(0, ~0UL, &clear_refs_walk);
cp.vma = vma;
if (is_vm_hugetlb_page(vma))
continue;
/*
* Writing 1 to /proc/pid/clear_refs affects all pages.
*
* Writing 2 to /proc/pid/clear_refs only affects
* Anonymous pages.
*
* Writing 3 to /proc/pid/clear_refs only affects file
* mapped pages.
*
* Writing 4 to /proc/pid/clear_refs affects all pages.
*/
if (type == CLEAR_REFS_ANON && vma->vm_file)
continue;
if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
continue;
walk_page_range(vma->vm_start, vma->vm_end,
&clear_refs_walk);
}
if (type == CLEAR_REFS_SOFT_DIRTY) if (type == CLEAR_REFS_SOFT_DIRTY)
mmu_notifier_invalidate_range_end(mm, 0, -1); mmu_notifier_invalidate_range_end(mm, 0, -1);
flush_tlb_mm(mm); flush_tlb_mm(mm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment