Commit 14eb6fdd authored by Naoya Horiguchi's avatar Naoya Horiguchi Committed by Linus Torvalds

smaps: remove mem_size_stats->vma and use walk_page_vma()

pagewalk.c can handle vma in itself, so we don't have to pass vma via
walk->private.  And show_smap() walks pages on vma basis, so using
walk_page_vma() is preferable.
Signed-off-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Pavel Emelyanov <xemul@parallels.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 900fc5f1
...@@ -436,7 +436,6 @@ const struct file_operations proc_tid_maps_operations = { ...@@ -436,7 +436,6 @@ const struct file_operations proc_tid_maps_operations = {
#ifdef CONFIG_PROC_PAGE_MONITOR #ifdef CONFIG_PROC_PAGE_MONITOR
struct mem_size_stats { struct mem_size_stats {
struct vm_area_struct *vma;
unsigned long resident; unsigned long resident;
unsigned long shared_clean; unsigned long shared_clean;
unsigned long shared_dirty; unsigned long shared_dirty;
...@@ -485,7 +484,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, ...@@ -485,7 +484,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
struct mm_walk *walk) struct mm_walk *walk)
{ {
struct mem_size_stats *mss = walk->private; struct mem_size_stats *mss = walk->private;
struct vm_area_struct *vma = mss->vma; struct vm_area_struct *vma = walk->vma;
struct page *page = NULL; struct page *page = NULL;
if (pte_present(*pte)) { if (pte_present(*pte)) {
...@@ -509,7 +508,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, ...@@ -509,7 +508,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
struct mm_walk *walk) struct mm_walk *walk)
{ {
struct mem_size_stats *mss = walk->private; struct mem_size_stats *mss = walk->private;
struct vm_area_struct *vma = mss->vma; struct vm_area_struct *vma = walk->vma;
struct page *page; struct page *page;
/* FOLL_DUMP will return -EFAULT on huge zero page */ /* FOLL_DUMP will return -EFAULT on huge zero page */
...@@ -530,8 +529,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, ...@@ -530,8 +529,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
struct mm_walk *walk) struct mm_walk *walk)
{ {
struct mem_size_stats *mss = walk->private; struct vm_area_struct *vma = walk->vma;
struct vm_area_struct *vma = mss->vma;
pte_t *pte; pte_t *pte;
spinlock_t *ptl; spinlock_t *ptl;
...@@ -623,10 +621,8 @@ static int show_smap(struct seq_file *m, void *v, int is_pid) ...@@ -623,10 +621,8 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
}; };
memset(&mss, 0, sizeof mss); memset(&mss, 0, sizeof mss);
mss.vma = vma;
/* mmap_sem is held in m_start */ /* mmap_sem is held in m_start */
if (vma->vm_mm && !is_vm_hugetlb_page(vma)) walk_page_vma(vma, &smaps_walk);
walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
show_map_vma(m, vma, is_pid); show_map_vma(m, vma, is_pid);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment