Commit b29422e3 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] add stats for page reclaim via inode freeing

pagecache can be reclaimed via the page LRU and via prune_icache.  We
currently don't know how much reclaim is happening via each.

The patch adds instrumentation to display the number of pages which were
freed via prune_icache.  This is displayed in /proc/vmstat:pginodesteal and
/proc/vmstat:kswapd_inodesteal.

Turns out that under some workloads (well, dbench at least), fully half of
page reclaim is via the unused inode list.  Which seems quite OK to me.
parent f5585f5d
......@@ -16,6 +16,7 @@
#include <linux/backing-dev.h>
#include <linux/wait.h>
#include <linux/hash.h>
#include <linux/swap.h>
#include <linux/security.h>
/*
......@@ -392,6 +393,7 @@ static void prune_icache(int nr_to_scan)
LIST_HEAD(freeable);
int nr_pruned = 0;
int nr_scanned;
unsigned long reap = 0;
spin_lock(&inode_lock);
for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) {
......@@ -410,7 +412,7 @@ static void prune_icache(int nr_to_scan)
__iget(inode);
spin_unlock(&inode_lock);
if (remove_inode_buffers(inode))
invalidate_inode_pages(&inode->i_data);
reap += invalidate_inode_pages(&inode->i_data);
iput(inode);
spin_lock(&inode_lock);
......@@ -428,6 +430,10 @@ static void prune_icache(int nr_to_scan)
inodes_stat.nr_unused -= nr_pruned;
spin_unlock(&inode_lock);
dispose_list(&freeable);
if (current_is_kswapd)
mod_page_state(kswapd_inodesteal, reap);
else
mod_page_state(pginodesteal, reap);
}
/*
......
......@@ -1102,9 +1102,9 @@ extern int full_check_disk_change(struct block_device *);
extern int __check_disk_change(dev_t);
extern int invalidate_inodes(struct super_block *);
extern int invalidate_device(kdev_t, int);
extern void invalidate_mapping_pages(struct address_space *mapping,
unsigned long invalidate_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t end);
extern void invalidate_inode_pages(struct address_space *mapping);
unsigned long invalidate_inode_pages(struct address_space *mapping);
extern void invalidate_inode_pages2(struct address_space *mapping);
extern void write_inode_now(struct inode *, int);
extern int filemap_fdatawrite(struct address_space *);
......
......@@ -97,15 +97,20 @@ struct page_state {
unsigned long pswpin; /* swap reads */
unsigned long pswpout; /* swap writes */
unsigned long pgalloc; /* page allocations */
unsigned long pgfree; /* page freeings */
unsigned long pgactivate; /* pages moved inactive->active */
unsigned long pgdeactivate; /* pages moved active->inactive */
unsigned long pgfault; /* faults (major+minor) */
unsigned long pgmajfault; /* faults (major only) */
unsigned long pgscan; /* pages scanned by page reclaim */
unsigned long pgrefill; /* inspected in refill_inactive_zone */
unsigned long pgsteal; /* total pages reclaimed */
unsigned long pginodesteal; /* pages reclaimed via inode freeing */
unsigned long kswapd_steal; /* pages reclaimed by kswapd */
unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
unsigned long pageoutrun; /* kswapd's calls to page reclaim */
unsigned long allocstall; /* direct reclaim calls */
unsigned long pgrotated; /* pages rotated to tail of the LRU */
......
......@@ -1379,15 +1379,20 @@ static char *vmstat_text[] = {
"pswpin",
"pswpout",
"pgalloc",
"pgfree",
"pgactivate",
"pgdeactivate",
"pgfault",
"pgmajfault",
"pgscan",
"pgrefill",
"pgsteal",
"pginodesteal",
"kswapd_steal",
"kswapd_inodesteal",
"pageoutrun",
"allocstall",
"pgrotated",
......
......@@ -64,24 +64,25 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
* ->page_lock. That provides exclusion against the __set_page_dirty
* functions.
*/
static void
static int
invalidate_complete_page(struct address_space *mapping, struct page *page)
{
if (page->mapping != mapping)
return;
return 0;
if (PagePrivate(page) && !try_to_release_page(page, 0))
return;
return 0;
write_lock(&mapping->page_lock);
if (PageDirty(page)) {
write_unlock(&mapping->page_lock);
} else {
__remove_from_page_cache(page);
write_unlock(&mapping->page_lock);
ClearPageUptodate(page);
page_cache_release(page); /* pagecache ref */
return 0;
}
__remove_from_page_cache(page);
write_unlock(&mapping->page_lock);
ClearPageUptodate(page);
page_cache_release(page); /* pagecache ref */
return 1;
}
/**
......@@ -189,11 +190,12 @@ void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
* invalidate pages which are dirty, locked, under writeback or mapped into
* pagetables.
*/
void invalidate_mapping_pages(struct address_space *mapping,
unsigned long invalidate_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t end)
{
struct pagevec pvec;
pgoff_t next = start;
unsigned long ret = 0;
int i;
pagevec_init(&pvec, 0);
......@@ -213,18 +215,19 @@ void invalidate_mapping_pages(struct address_space *mapping,
goto unlock;
if (page_mapped(page))
goto unlock;
invalidate_complete_page(mapping, page);
ret += invalidate_complete_page(mapping, page);
unlock:
unlock_page(page);
}
pagevec_release(&pvec);
cond_resched();
}
return ret;
}
void invalidate_inode_pages(struct address_space *mapping)
unsigned long invalidate_inode_pages(struct address_space *mapping)
{
invalidate_mapping_pages(mapping, 0, ~0UL);
return invalidate_mapping_pages(mapping, 0, ~0UL);
}
/**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment