Commit f995ece2 authored by Naoya Horiguchi's avatar Naoya Horiguchi Committed by Linus Torvalds

pagemap: use walk->vma instead of calling find_vma()

Page table walker has the information of the current vma in mm_walk, so we
don't have to call find_vma() in each pagemap_(pte|hugetlb)_range() call
any longer.  Currently pagemap_pte_range() does vma loop itself, so this
patch reduces many lines of code.

NULL-vma check is omitted because we assume that we never run these
callbacks on any address outside vma.  And even if it were broken, NULL
pointer dereference would be detected, so we can get enough information
for debugging.
Signed-off-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Pavel Emelyanov <xemul@parallels.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 5c64f52a
...@@ -1047,15 +1047,13 @@ static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemap ...@@ -1047,15 +1047,13 @@ static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemap
static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
struct mm_walk *walk) struct mm_walk *walk)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma = walk->vma;
struct pagemapread *pm = walk->private; struct pagemapread *pm = walk->private;
spinlock_t *ptl; spinlock_t *ptl;
pte_t *pte, *orig_pte; pte_t *pte, *orig_pte;
int err = 0; int err = 0;
/* find the first VMA at or above 'addr' */ if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
vma = find_vma(walk->mm, addr);
if (vma && pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
int pmd_flags2; int pmd_flags2;
if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd)) if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd))
...@@ -1081,55 +1079,20 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, ...@@ -1081,55 +1079,20 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
if (pmd_trans_unstable(pmd)) if (pmd_trans_unstable(pmd))
return 0; return 0;
while (1) { /*
/* End of address space hole, which we mark as non-present. */ * We can assume that @vma always points to a valid one and @end never
unsigned long hole_end; * goes beyond vma->vm_end.
*/
if (vma) orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
hole_end = min(end, vma->vm_start); for (; addr < end; pte++, addr += PAGE_SIZE) {
else pagemap_entry_t pme;
hole_end = end;
for (; addr < hole_end; addr += PAGE_SIZE) {
pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
err = add_to_pagemap(addr, &pme, pm);
if (err)
return err;
}
if (!vma || vma->vm_start >= end)
break;
/*
* We can't possibly be in a hugetlb VMA. In general,
* for a mm_walk with a pmd_entry and a hugetlb_entry,
* the pmd_entry can only be called on addresses in a
* hugetlb if the walk starts in a non-hugetlb VMA and
* spans a hugepage VMA. Since pagemap_read walks are
* PMD-sized and PMD-aligned, this will never be true.
*/
BUG_ON(is_vm_hugetlb_page(vma));
/* Addresses in the VMA. */
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
for (; addr < min(end, vma->vm_end); pte++, addr += PAGE_SIZE) {
pagemap_entry_t pme;
pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
err = add_to_pagemap(addr, &pme, pm);
if (err)
break;
}
pte_unmap_unlock(orig_pte, ptl);
pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
err = add_to_pagemap(addr, &pme, pm);
if (err) if (err)
return err;
if (addr == end)
break; break;
vma = find_vma(walk->mm, addr);
} }
pte_unmap_unlock(orig_pte, ptl);
cond_resched(); cond_resched();
...@@ -1155,15 +1118,12 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask, ...@@ -1155,15 +1118,12 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
struct mm_walk *walk) struct mm_walk *walk)
{ {
struct pagemapread *pm = walk->private; struct pagemapread *pm = walk->private;
struct vm_area_struct *vma; struct vm_area_struct *vma = walk->vma;
int err = 0; int err = 0;
int flags2; int flags2;
pagemap_entry_t pme; pagemap_entry_t pme;
vma = find_vma(walk->mm, addr); if (vma->vm_flags & VM_SOFTDIRTY)
WARN_ON_ONCE(!vma);
if (vma && (vma->vm_flags & VM_SOFTDIRTY))
flags2 = __PM_SOFT_DIRTY; flags2 = __PM_SOFT_DIRTY;
else else
flags2 = 0; flags2 = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment