Commit c720c50a authored by Andrew Morton's avatar Andrew Morton Committed by Jaroslav Kysela

[PATCH] vm accounting fixes and addition

- /proc/vmstat:pageoutrun and /proc/vmstat:allocstall are always
  identical.  Rework this so that

  - "allocstall" is the number of times a page allocator ran diect reclaim

  - "pageoutrun" is the number of times kswapd ran page reclaim

- Add a new stat: "pgrotated".  The number of pages which were
  rotated to the tail of the LRU for immediate reclaim by
  rotate_reclaimable_page().

- Document things a bit.
parent 54cbdcfd
...@@ -79,35 +79,36 @@ ...@@ -79,35 +79,36 @@
* allowed. * allowed.
*/ */
struct page_state { struct page_state {
unsigned long nr_dirty; unsigned long nr_dirty; /* Dirty writeable pages */
unsigned long nr_writeback; unsigned long nr_writeback; /* Pages under writeback */
unsigned long nr_pagecache; unsigned long nr_pagecache; /* Pages in pagecache */
unsigned long nr_page_table_pages; unsigned long nr_page_table_pages;/* Pages used for pagetables */
unsigned long nr_reverse_maps; unsigned long nr_reverse_maps; /* includes PageDirect */
unsigned long nr_mapped; unsigned long nr_mapped; /* mapped into pagetables */
unsigned long nr_slab; unsigned long nr_slab; /* In slab */
#define GET_PAGE_STATE_LAST nr_slab #define GET_PAGE_STATE_LAST nr_slab
/* /*
* The below are zeroed by get_page_state(). Use get_full_page_state() * The below are zeroed by get_page_state(). Use get_full_page_state()
* to add up all these. * to add up all these.
*/ */
unsigned long pgpgin; unsigned long pgpgin; /* Disk reads */
unsigned long pgpgout; unsigned long pgpgout; /* Disk writes */
unsigned long pswpin; unsigned long pswpin; /* swap reads */
unsigned long pswpout; unsigned long pswpout; /* swap writes */
unsigned long pgalloc; unsigned long pgalloc; /* page allocations */
unsigned long pgfree; unsigned long pgfree; /* page freeings */
unsigned long pgactivate; unsigned long pgactivate; /* pages moved inactive->active */
unsigned long pgdeactivate; unsigned long pgdeactivate; /* pages moved active->inactive */
unsigned long pgfault; unsigned long pgfault; /* faults (major+minor) */
unsigned long pgmajfault; unsigned long pgmajfault; /* faults (major only) */
unsigned long pgscan; unsigned long pgscan; /* pages scanned by page reclaim */
unsigned long pgrefill; unsigned long pgrefill; /* inspected in refill_inactive_zone */
unsigned long pgsteal; unsigned long pgsteal; /* total pages reclaimed */
unsigned long kswapd_steal; unsigned long kswapd_steal; /* pages reclaimed by kswapd */
unsigned long pageoutrun; unsigned long pageoutrun; /* kswapd's calls to page reclaim */
unsigned long allocstall; unsigned long allocstall; /* direct reclaim calls */
unsigned long pgrotated; /* pages rotated to tail of the LRU */
} ____cacheline_aligned; } ____cacheline_aligned;
DECLARE_PER_CPU(struct page_state, page_states); DECLARE_PER_CPU(struct page_state, page_states);
......
...@@ -513,7 +513,6 @@ __alloc_pages(unsigned int gfp_mask, unsigned int order, ...@@ -513,7 +513,6 @@ __alloc_pages(unsigned int gfp_mask, unsigned int order,
if (!wait) if (!wait)
goto nopage; goto nopage;
inc_page_state(allocstall);
current->flags |= PF_MEMALLOC; current->flags |= PF_MEMALLOC;
try_to_free_pages(classzone, gfp_mask, order); try_to_free_pages(classzone, gfp_mask, order);
current->flags &= ~PF_MEMALLOC; current->flags &= ~PF_MEMALLOC;
...@@ -1354,6 +1353,7 @@ static char *vmstat_text[] = { ...@@ -1354,6 +1353,7 @@ static char *vmstat_text[] = {
"kswapd_steal", "kswapd_steal",
"pageoutrun", "pageoutrun",
"allocstall", "allocstall",
"pgrotated",
}; };
static void *vmstat_start(struct seq_file *m, loff_t *pos) static void *vmstat_start(struct seq_file *m, loff_t *pos)
......
...@@ -61,6 +61,7 @@ int rotate_reclaimable_page(struct page *page) ...@@ -61,6 +61,7 @@ int rotate_reclaimable_page(struct page *page)
if (PageLRU(page) && !PageActive(page)) { if (PageLRU(page) && !PageActive(page)) {
list_del(&page->lru); list_del(&page->lru);
list_add_tail(&page->lru, &zone->inactive_list); list_add_tail(&page->lru, &zone->inactive_list);
inc_page_state(pgrotated);
} }
if (!TestClearPageWriteback(page)) if (!TestClearPageWriteback(page))
BUG(); BUG();
......
...@@ -797,7 +797,7 @@ try_to_free_pages(struct zone *classzone, ...@@ -797,7 +797,7 @@ try_to_free_pages(struct zone *classzone,
const int nr_pages = SWAP_CLUSTER_MAX; const int nr_pages = SWAP_CLUSTER_MAX;
int nr_reclaimed = 0; int nr_reclaimed = 0;
inc_page_state(pageoutrun); inc_page_state(allocstall);
for (priority = DEF_PRIORITY; priority >= 0; priority--) { for (priority = DEF_PRIORITY; priority >= 0; priority--) {
int total_scanned = 0; int total_scanned = 0;
...@@ -853,6 +853,8 @@ static int balance_pgdat(pg_data_t *pgdat, int nr_pages, struct page_state *ps) ...@@ -853,6 +853,8 @@ static int balance_pgdat(pg_data_t *pgdat, int nr_pages, struct page_state *ps)
int priority; int priority;
int i; int i;
inc_page_state(pageoutrun);
for (priority = DEF_PRIORITY; priority; priority--) { for (priority = DEF_PRIORITY; priority; priority--) {
int all_zones_ok = 1; int all_zones_ok = 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment