Commit 6c6d5280 authored by KOSAKI Motohiro's avatar KOSAKI Motohiro Committed by Linus Torvalds

pagewalk: don't look up vma if walk->hugetlb_entry is unused

Currently, walk_page_range() calls find_vma() every page table for walk
iteration.  but it's completely unnecessary if walk->hugetlb_entry is
unused.  And we don't have to assume find_vma() is a lightweight
operation.  So this patch checks the walk->hugetlb_entry and avoids the
find_vma() call if possible.

This patch also makes some cleanups.  1) remove ugly uninitialized_var()
and 2) #ifdef in function body.
Signed-off-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Hiroyuki Kamezawa <kamezawa.hiroyuki@gmail.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Matt Mackall <mpm@selenic.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4b6ddbf7
...@@ -126,7 +126,39 @@ static int walk_hugetlb_range(struct vm_area_struct *vma, ...@@ -126,7 +126,39 @@ static int walk_hugetlb_range(struct vm_area_struct *vma,
return 0; return 0;
} }
#endif
static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
{
struct vm_area_struct *vma;
/* We don't need vma lookup at all. */
if (!walk->hugetlb_entry)
return NULL;
VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
vma = find_vma(walk->mm, addr);
if (vma && vma->vm_start <= addr && is_vm_hugetlb_page(vma))
return vma;
return NULL;
}
#else /* CONFIG_HUGETLB_PAGE */
static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
{
return NULL;
}
static int walk_hugetlb_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
return 0;
}
#endif /* CONFIG_HUGETLB_PAGE */
/** /**
* walk_page_range - walk a memory map's page tables with a callback * walk_page_range - walk a memory map's page tables with a callback
...@@ -165,18 +197,17 @@ int walk_page_range(unsigned long addr, unsigned long end, ...@@ -165,18 +197,17 @@ int walk_page_range(unsigned long addr, unsigned long end,
pgd = pgd_offset(walk->mm, addr); pgd = pgd_offset(walk->mm, addr);
do { do {
struct vm_area_struct *uninitialized_var(vma); struct vm_area_struct *vma;
next = pgd_addr_end(addr, end); next = pgd_addr_end(addr, end);
#ifdef CONFIG_HUGETLB_PAGE
/* /*
* handle hugetlb vma individually because pagetable walk for * handle hugetlb vma individually because pagetable walk for
* the hugetlb page is dependent on the architecture and * the hugetlb page is dependent on the architecture and
* we can't handled it in the same manner as non-huge pages. * we can't handled it in the same manner as non-huge pages.
*/ */
vma = find_vma(walk->mm, addr); vma = hugetlb_vma(addr, walk);
if (vma && vma->vm_start <= addr && is_vm_hugetlb_page(vma)) { if (vma) {
if (vma->vm_end < next) if (vma->vm_end < next)
next = vma->vm_end; next = vma->vm_end;
/* /*
...@@ -189,7 +220,7 @@ int walk_page_range(unsigned long addr, unsigned long end, ...@@ -189,7 +220,7 @@ int walk_page_range(unsigned long addr, unsigned long end,
pgd = pgd_offset(walk->mm, next); pgd = pgd_offset(walk->mm, next);
continue; continue;
} }
#endif
if (pgd_none_or_clear_bad(pgd)) { if (pgd_none_or_clear_bad(pgd)) {
if (walk->pte_hole) if (walk->pte_hole)
err = walk->pte_hole(addr, next, walk); err = walk->pte_hole(addr, next, walk);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment