Commit e48293fd authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mincore: do nested page table walks

Do page table walks with the well-known nested loops we use in several
other places already.

This avoids doing full page table walks after every pte range and also
allows to handle unmapped areas bigger than one pte range in one go.
Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 25ef0e50
...@@ -144,6 +144,60 @@ static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -144,6 +144,60 @@ static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
pte_unmap_unlock(ptep - 1, ptl); pte_unmap_unlock(ptep - 1, ptl);
} }
static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud,
unsigned long addr, unsigned long end,
unsigned char *vec)
{
unsigned long next;
pmd_t *pmd;
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
if (pmd_none_or_clear_bad(pmd))
mincore_unmapped_range(vma, addr, next, vec);
else
mincore_pte_range(vma, pmd, addr, next, vec);
vec += (next - addr) >> PAGE_SHIFT;
} while (pmd++, addr = next, addr != end);
}
static void mincore_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
unsigned long addr, unsigned long end,
unsigned char *vec)
{
unsigned long next;
pud_t *pud;
pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
mincore_unmapped_range(vma, addr, next, vec);
else
mincore_pmd_range(vma, pud, addr, next, vec);
vec += (next - addr) >> PAGE_SHIFT;
} while (pud++, addr = next, addr != end);
}
static void mincore_page_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long end,
unsigned char *vec)
{
unsigned long next;
pgd_t *pgd;
pgd = pgd_offset(vma->vm_mm, addr);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
mincore_unmapped_range(vma, addr, next, vec);
else
mincore_pud_range(vma, pgd, addr, next, vec);
vec += (next - addr) >> PAGE_SHIFT;
} while (pgd++, addr = next, addr != end);
}
/* /*
* Do a chunk of "sys_mincore()". We've already checked * Do a chunk of "sys_mincore()". We've already checked
* all the arguments, we hold the mmap semaphore: we should * all the arguments, we hold the mmap semaphore: we should
...@@ -151,9 +205,6 @@ static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -151,9 +205,6 @@ static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
*/ */
static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec) static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
{ {
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned long end; unsigned long end;
...@@ -170,21 +221,11 @@ static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *v ...@@ -170,21 +221,11 @@ static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *v
end = pmd_addr_end(addr, end); end = pmd_addr_end(addr, end);
pgd = pgd_offset(vma->vm_mm, addr); if (is_vm_hugetlb_page(vma))
if (pgd_none_or_clear_bad(pgd)) mincore_hugetlb_page_range(vma, addr, end, vec);
goto none_mapped; else
pud = pud_offset(pgd, addr); mincore_page_range(vma, addr, end, vec);
if (pud_none_or_clear_bad(pud))
goto none_mapped;
pmd = pmd_offset(pud, addr);
if (pmd_none_or_clear_bad(pmd))
goto none_mapped;
mincore_pte_range(vma, pmd, addr, end, vec);
return (end - addr) >> PAGE_SHIFT;
none_mapped:
mincore_unmapped_range(vma, addr, end, vec);
return (end - addr) >> PAGE_SHIFT; return (end - addr) >> PAGE_SHIFT;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment