Commit 1e25a271 authored by Naoya Horiguchi's avatar Naoya Horiguchi Committed by Linus Torvalds

mincore: apply page table walker on do_mincore()

This patch makes do_mincore() use walk_page_vma(), which reduces many
lines of code by using common page table walk code.

[daeseok.youn@gmail.com: remove unneeded variable 'err']
Signed-off-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Pavel Emelyanov <xemul@parallels.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarDaeseok Youn <daeseok.youn@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7d5b3bfa
...@@ -1412,26 +1412,6 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -1412,26 +1412,6 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
return ret; return ret;
} }
int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
unsigned char *vec)
{
spinlock_t *ptl;
int ret = 0;
if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
/*
* All logical pages in the range are present
* if backed by a huge page.
*/
spin_unlock(ptl);
memset(vec, 1, (end - addr) >> PAGE_SHIFT);
ret = 1;
}
return ret;
}
int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
unsigned long old_addr, unsigned long old_addr,
unsigned long new_addr, unsigned long old_end, unsigned long new_addr, unsigned long old_end,
......
...@@ -19,38 +19,25 @@ ...@@ -19,38 +19,25 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
static void mincore_hugetlb_page_range(struct vm_area_struct *vma, static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
unsigned long addr, unsigned long end, unsigned long end, struct mm_walk *walk)
unsigned char *vec)
{ {
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
struct hstate *h;
h = hstate_vma(vma);
while (1) {
unsigned char present; unsigned char present;
pte_t *ptep; unsigned char *vec = walk->private;
/* /*
* Huge pages are always in RAM for now, but * Hugepages under user process are always in RAM and never
* theoretically it needs to be checked. * swapped out, but theoretically it needs to be checked.
*/ */
ptep = huge_pte_offset(current->mm, present = pte && !huge_pte_none(huge_ptep_get(pte));
addr & huge_page_mask(h)); for (; addr != end; vec++, addr += PAGE_SIZE)
present = ptep && !huge_pte_none(huge_ptep_get(ptep));
while (1) {
*vec = present; *vec = present;
vec++; walk->private = vec;
addr += PAGE_SIZE;
if (addr == end)
return;
/* check hugepage border */
if (!(addr & ~huge_page_mask(h)))
break;
}
}
#else #else
BUG(); BUG();
#endif #endif
return 0;
} }
/* /*
...@@ -94,9 +81,8 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff) ...@@ -94,9 +81,8 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
return present; return present;
} }
static void mincore_unmapped_range(struct vm_area_struct *vma, static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
unsigned long addr, unsigned long end, struct vm_area_struct *vma, unsigned char *vec)
unsigned char *vec)
{ {
unsigned long nr = (end - addr) >> PAGE_SHIFT; unsigned long nr = (end - addr) >> PAGE_SHIFT;
int i; int i;
...@@ -111,23 +97,44 @@ static void mincore_unmapped_range(struct vm_area_struct *vma, ...@@ -111,23 +97,44 @@ static void mincore_unmapped_range(struct vm_area_struct *vma,
for (i = 0; i < nr; i++) for (i = 0; i < nr; i++)
vec[i] = 0; vec[i] = 0;
} }
return nr;
}
static int mincore_unmapped_range(unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
walk->private += __mincore_unmapped_range(addr, end,
walk->vma, walk->private);
return 0;
} }
static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd, static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
unsigned long addr, unsigned long end, struct mm_walk *walk)
unsigned char *vec)
{ {
unsigned long next;
spinlock_t *ptl; spinlock_t *ptl;
struct vm_area_struct *vma = walk->vma;
pte_t *ptep; pte_t *ptep;
unsigned char *vec = walk->private;
int nr = (end - addr) >> PAGE_SHIFT;
if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
memset(vec, 1, nr);
spin_unlock(ptl);
goto out;
}
ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); if (pmd_trans_unstable(pmd)) {
do { __mincore_unmapped_range(addr, end, vma, vec);
goto out;
}
ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
for (; addr != end; ptep++, addr += PAGE_SIZE) {
pte_t pte = *ptep; pte_t pte = *ptep;
next = addr + PAGE_SIZE;
if (pte_none(pte)) if (pte_none(pte))
mincore_unmapped_range(vma, addr, next, vec); __mincore_unmapped_range(addr, addr + PAGE_SIZE,
vma, vec);
else if (pte_present(pte)) else if (pte_present(pte))
*vec = 1; *vec = 1;
else { /* pte is a swap entry */ else { /* pte is a swap entry */
...@@ -150,69 +157,12 @@ static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -150,69 +157,12 @@ static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
} }
} }
vec++; vec++;
} while (ptep++, addr = next, addr != end);
pte_unmap_unlock(ptep - 1, ptl);
}
static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud,
unsigned long addr, unsigned long end,
unsigned char *vec)
{
unsigned long next;
pmd_t *pmd;
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
if (pmd_trans_huge(*pmd)) {
if (mincore_huge_pmd(vma, pmd, addr, next, vec)) {
vec += (next - addr) >> PAGE_SHIFT;
continue;
}
/* fall through */
} }
if (pmd_none_or_trans_huge_or_clear_bad(pmd)) pte_unmap_unlock(ptep - 1, ptl);
mincore_unmapped_range(vma, addr, next, vec); out:
else walk->private += nr;
mincore_pte_range(vma, pmd, addr, next, vec); cond_resched();
vec += (next - addr) >> PAGE_SHIFT; return 0;
} while (pmd++, addr = next, addr != end);
}
static void mincore_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
unsigned long addr, unsigned long end,
unsigned char *vec)
{
unsigned long next;
pud_t *pud;
pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
mincore_unmapped_range(vma, addr, next, vec);
else
mincore_pmd_range(vma, pud, addr, next, vec);
vec += (next - addr) >> PAGE_SHIFT;
} while (pud++, addr = next, addr != end);
}
static void mincore_page_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long end,
unsigned char *vec)
{
unsigned long next;
pgd_t *pgd;
pgd = pgd_offset(vma->vm_mm, addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
mincore_unmapped_range(vma, addr, next, vec);
else
mincore_pud_range(vma, pgd, addr, next, vec);
vec += (next - addr) >> PAGE_SHIFT;
} while (pgd++, addr = next, addr != end);
} }
/* /*
...@@ -224,18 +174,22 @@ static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *v ...@@ -224,18 +174,22 @@ static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *v
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned long end; unsigned long end;
int err;
struct mm_walk mincore_walk = {
.pmd_entry = mincore_pte_range,
.pte_hole = mincore_unmapped_range,
.hugetlb_entry = mincore_hugetlb,
.private = vec,
};
vma = find_vma(current->mm, addr); vma = find_vma(current->mm, addr);
if (!vma || addr < vma->vm_start) if (!vma || addr < vma->vm_start)
return -ENOMEM; return -ENOMEM;
mincore_walk.mm = vma->vm_mm;
end = min(vma->vm_end, addr + (pages << PAGE_SHIFT)); end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
err = walk_page_range(addr, end, &mincore_walk);
if (is_vm_hugetlb_page(vma)) if (err < 0)
mincore_hugetlb_page_range(vma, addr, end, vec); return err;
else
mincore_page_range(vma, addr, end, vec);
return (end - addr) >> PAGE_SHIFT; return (end - addr) >> PAGE_SHIFT;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment