Commit f3dbd344 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

[PATCH] zoned vm counters: split NR_ANON_PAGES off from NR_FILE_MAPPED

The current NR_FILE_MAPPED is used by zone reclaim and the dirty load
calculation as the number of mapped pagecache pages.  However, that is not
true.  NR_FILE_MAPPED includes the mapped anonymous pages.  This patch
separates those and therefore allows an accurate tracking of the anonymous
pages per zone.

It then becomes possible to determine the number of unmapped pages per zone
and we can avoid scanning for unmapped pages if there are none.

Also it may now be possible to determine the mapped/unmapped ratio in
get_dirty_limit.  Isnt the number of anonymous pages irrelevant in that
calculation?

Note that this will change the meaning of the number of mapped pages reported
in /proc/vmstat /proc/meminfo and in the per node statistics.  This may affect
user space tools that monitor these counters!  NR_FILE_MAPPED works like
NR_FILE_DIRTY.  It is only valid for pagecache pages.
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent bf02cf4b
...@@ -71,6 +71,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf) ...@@ -71,6 +71,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
"Node %d Writeback: %8lu kB\n" "Node %d Writeback: %8lu kB\n"
"Node %d FilePages: %8lu kB\n" "Node %d FilePages: %8lu kB\n"
"Node %d Mapped: %8lu kB\n" "Node %d Mapped: %8lu kB\n"
"Node %d AnonPages: %8lu kB\n"
"Node %d Slab: %8lu kB\n", "Node %d Slab: %8lu kB\n",
nid, K(i.totalram), nid, K(i.totalram),
nid, K(i.freeram), nid, K(i.freeram),
...@@ -85,6 +86,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf) ...@@ -85,6 +86,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
nid, K(ps.nr_writeback), nid, K(ps.nr_writeback),
nid, K(node_page_state(nid, NR_FILE_PAGES)), nid, K(node_page_state(nid, NR_FILE_PAGES)),
nid, K(node_page_state(nid, NR_FILE_MAPPED)), nid, K(node_page_state(nid, NR_FILE_MAPPED)),
nid, K(node_page_state(nid, NR_ANON_PAGES)),
nid, K(ps.nr_slab)); nid, K(ps.nr_slab));
n += hugetlb_report_node_meminfo(nid, buf + n); n += hugetlb_report_node_meminfo(nid, buf + n);
return n; return n;
......
...@@ -168,6 +168,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off, ...@@ -168,6 +168,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
"SwapFree: %8lu kB\n" "SwapFree: %8lu kB\n"
"Dirty: %8lu kB\n" "Dirty: %8lu kB\n"
"Writeback: %8lu kB\n" "Writeback: %8lu kB\n"
"AnonPages: %8lu kB\n"
"Mapped: %8lu kB\n" "Mapped: %8lu kB\n"
"Slab: %8lu kB\n" "Slab: %8lu kB\n"
"CommitLimit: %8lu kB\n" "CommitLimit: %8lu kB\n"
...@@ -191,6 +192,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off, ...@@ -191,6 +192,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
K(i.freeswap), K(i.freeswap),
K(ps.nr_dirty), K(ps.nr_dirty),
K(ps.nr_writeback), K(ps.nr_writeback),
K(global_page_state(NR_ANON_PAGES)),
K(global_page_state(NR_FILE_MAPPED)), K(global_page_state(NR_FILE_MAPPED)),
K(ps.nr_slab), K(ps.nr_slab),
K(allowed), K(allowed),
......
...@@ -47,7 +47,8 @@ struct zone_padding { ...@@ -47,7 +47,8 @@ struct zone_padding {
#endif #endif
enum zone_stat_item { enum zone_stat_item {
NR_FILE_MAPPED, /* mapped into pagetables. NR_ANON_PAGES, /* Mapped anonymous pages */
NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
only modified from process context */ only modified from process context */
NR_FILE_PAGES, NR_FILE_PAGES,
NR_VM_ZONE_STAT_ITEMS }; NR_VM_ZONE_STAT_ITEMS };
......
...@@ -111,7 +111,8 @@ static void get_writeback_state(struct writeback_state *wbs) ...@@ -111,7 +111,8 @@ static void get_writeback_state(struct writeback_state *wbs)
{ {
wbs->nr_dirty = read_page_state(nr_dirty); wbs->nr_dirty = read_page_state(nr_dirty);
wbs->nr_unstable = read_page_state(nr_unstable); wbs->nr_unstable = read_page_state(nr_unstable);
wbs->nr_mapped = global_page_state(NR_FILE_MAPPED); wbs->nr_mapped = global_page_state(NR_FILE_MAPPED) +
global_page_state(NR_ANON_PAGES);
wbs->nr_writeback = read_page_state(nr_writeback); wbs->nr_writeback = read_page_state(nr_writeback);
} }
......
...@@ -455,7 +455,7 @@ static void __page_set_anon_rmap(struct page *page, ...@@ -455,7 +455,7 @@ static void __page_set_anon_rmap(struct page *page,
* nr_mapped state can be updated without turning off * nr_mapped state can be updated without turning off
* interrupts because it is not modified via interrupt. * interrupts because it is not modified via interrupt.
*/ */
__inc_zone_page_state(page, NR_FILE_MAPPED); __inc_zone_page_state(page, NR_ANON_PAGES);
} }
/** /**
...@@ -531,7 +531,8 @@ void page_remove_rmap(struct page *page) ...@@ -531,7 +531,8 @@ void page_remove_rmap(struct page *page)
*/ */
if (page_test_and_clear_dirty(page)) if (page_test_and_clear_dirty(page))
set_page_dirty(page); set_page_dirty(page);
__dec_zone_page_state(page, NR_FILE_MAPPED); __dec_zone_page_state(page,
PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
} }
} }
......
...@@ -742,7 +742,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, ...@@ -742,7 +742,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
* how much memory * how much memory
* is mapped. * is mapped.
*/ */
mapped_ratio = (global_page_state(NR_FILE_MAPPED) * 100) / mapped_ratio = ((global_page_state(NR_FILE_MAPPED) +
global_page_state(NR_ANON_PAGES)) * 100) /
vm_total_pages; vm_total_pages;
/* /*
......
...@@ -395,6 +395,7 @@ struct seq_operations fragmentation_op = { ...@@ -395,6 +395,7 @@ struct seq_operations fragmentation_op = {
static char *vmstat_text[] = { static char *vmstat_text[] = {
/* Zoned VM counters */ /* Zoned VM counters */
"nr_anon_pages",
"nr_mapped", "nr_mapped",
"nr_file_pages", "nr_file_pages",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment