Commit 826fad1b authored by David Rientjes's avatar David Rientjes Committed by Linus Torvalds

smaps: extract pmd walker from smaps code

Extracts the pmd walker from smaps-specific code in fs/proc/task_mmu.c.

The new struct pmd_walker includes the struct vm_area_struct of the memory to
walk over.  Iteration begins at the vma->vm_start and completes at
vma->vm_end.  A pointer to another data structure may be stored in the private
field such as struct mem_size_stats, which acts as the smaps accumulator.  For
each pmd in the VMA, the action function is called with a pointer to its
struct vm_area_struct, a pointer to the pmd_t, its start and end addresses,
and the private field.

The interface for walking pmd's in a VMA for fs/proc/task_mmu.c is now:

	void for_each_pmd(struct vm_area_struct *vma,
			  void (*action)(struct vm_area_struct *vma,
					 pmd_t *pmd, unsigned long addr,
					 unsigned long end,
					 void *private),
			  void *private);

Since the pmd walker is now extracted from the smaps code, smaps_one_pmd() is
invoked for each pmd in the VMA.  Its behavior and efficiency is identical to
the existing implementation.

Cc: Hugh Dickins <hugh@veritas.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Christoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarDavid Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0013572b
...@@ -122,6 +122,13 @@ struct mem_size_stats ...@@ -122,6 +122,13 @@ struct mem_size_stats
unsigned long private_dirty; unsigned long private_dirty;
}; };
struct pmd_walker {
struct vm_area_struct *vma;
void *private;
void (*action)(struct vm_area_struct *, pmd_t *, unsigned long,
unsigned long, void *);
};
static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss) static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
{ {
struct proc_maps_private *priv = m->private; struct proc_maps_private *priv = m->private;
...@@ -204,16 +211,17 @@ static int show_map(struct seq_file *m, void *v) ...@@ -204,16 +211,17 @@ static int show_map(struct seq_file *m, void *v)
return show_map_internal(m, v, NULL); return show_map_internal(m, v, NULL);
} }
static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd, static void smaps_one_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end, unsigned long addr, unsigned long end,
struct mem_size_stats *mss) void *private)
{ {
struct mem_size_stats *mss = private;
pte_t *pte, ptent; pte_t *pte, ptent;
spinlock_t *ptl; spinlock_t *ptl;
struct page *page; struct page *page;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
do { for (; addr != end; pte++, addr += PAGE_SIZE) {
ptent = *pte; ptent = *pte;
if (!pte_present(ptent)) if (!pte_present(ptent))
continue; continue;
...@@ -235,57 +243,64 @@ static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -235,57 +243,64 @@ static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
else else
mss->private_clean += PAGE_SIZE; mss->private_clean += PAGE_SIZE;
} }
} while (pte++, addr += PAGE_SIZE, addr != end); }
pte_unmap_unlock(pte - 1, ptl); pte_unmap_unlock(pte - 1, ptl);
cond_resched(); cond_resched();
} }
static inline void smaps_pmd_range(struct vm_area_struct *vma, pud_t *pud, static inline void for_each_pmd_in_pud(struct pmd_walker *walker, pud_t *pud,
unsigned long addr, unsigned long end, unsigned long addr, unsigned long end)
struct mem_size_stats *mss)
{ {
pmd_t *pmd; pmd_t *pmd;
unsigned long next; unsigned long next;
pmd = pmd_offset(pud, addr); for (pmd = pmd_offset(pud, addr); addr != end;
do { pmd++, addr = next) {
next = pmd_addr_end(addr, end); next = pmd_addr_end(addr, end);
if (pmd_none_or_clear_bad(pmd)) if (pmd_none_or_clear_bad(pmd))
continue; continue;
smaps_pte_range(vma, pmd, addr, next, mss); walker->action(walker->vma, pmd, addr, next, walker->private);
} while (pmd++, addr = next, addr != end); }
} }
static inline void smaps_pud_range(struct vm_area_struct *vma, pgd_t *pgd, static inline void for_each_pud_in_pgd(struct pmd_walker *walker, pgd_t *pgd,
unsigned long addr, unsigned long end, unsigned long addr, unsigned long end)
struct mem_size_stats *mss)
{ {
pud_t *pud; pud_t *pud;
unsigned long next; unsigned long next;
pud = pud_offset(pgd, addr); for (pud = pud_offset(pgd, addr); addr != end;
do { pud++, addr = next) {
next = pud_addr_end(addr, end); next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud)) if (pud_none_or_clear_bad(pud))
continue; continue;
smaps_pmd_range(vma, pud, addr, next, mss); for_each_pmd_in_pud(walker, pud, addr, next);
} while (pud++, addr = next, addr != end); }
} }
static inline void smaps_pgd_range(struct vm_area_struct *vma, static inline void for_each_pmd(struct vm_area_struct *vma,
unsigned long addr, unsigned long end, void (*action)(struct vm_area_struct *, pmd_t *,
struct mem_size_stats *mss) unsigned long, unsigned long,
void *),
void *private)
{ {
unsigned long addr = vma->vm_start;
unsigned long end = vma->vm_end;
struct pmd_walker walker = {
.vma = vma,
.private = private,
.action = action,
};
pgd_t *pgd; pgd_t *pgd;
unsigned long next; unsigned long next;
pgd = pgd_offset(vma->vm_mm, addr); for (pgd = pgd_offset(vma->vm_mm, addr); addr != end;
do { pgd++, addr = next) {
next = pgd_addr_end(addr, end); next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd)) if (pgd_none_or_clear_bad(pgd))
continue; continue;
smaps_pud_range(vma, pgd, addr, next, mss); for_each_pud_in_pgd(&walker, pgd, addr, next);
} while (pgd++, addr = next, addr != end); }
} }
static int show_smap(struct seq_file *m, void *v) static int show_smap(struct seq_file *m, void *v)
...@@ -295,7 +310,7 @@ static int show_smap(struct seq_file *m, void *v) ...@@ -295,7 +310,7 @@ static int show_smap(struct seq_file *m, void *v)
memset(&mss, 0, sizeof mss); memset(&mss, 0, sizeof mss);
if (vma->vm_mm && !is_vm_hugetlb_page(vma)) if (vma->vm_mm && !is_vm_hugetlb_page(vma))
smaps_pgd_range(vma, vma->vm_start, vma->vm_end, &mss); for_each_pmd(vma, smaps_one_pmd, &mss);
return show_map_internal(m, v, &mss); return show_map_internal(m, v, &mss);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment