Commit 228c3d15 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] lru_add_active(): for starting pages on the active list

This is the first in a series of patches which tune up the 2.5
performance under heavy swap loads.

Throughput on stupid swapstormy tests is increased by 1.5x to 3x.
Still about 20% behind 2.4 with multithreaded tests.  That is not
easily fixable - the virtual scan tends to apply a form of load
control: particular processes are heavily swapped out so the others can
get ahead.  With 2.5 all processes make very even progress and much
more swapping is needed.  It's on par with 2.4 for single-process
swapstorms.


In this patch:

The code which tries to start mapped pages out on the active list
doesn't work very well.  It uses an "is it mapped into pagetables"
test.  Which doesn't work for, say, swap readahead pages.  They are not
mapped into pagetables when they are spilled onto the LRU.

So create a new `lru_cache_add_active()' function for deferred addition
of pages to their active list.

Also move mark_page_accessed() from filemap.c to swap.c where all
similar functions live.  And teach it to not try to move pages which
are in the deferred-addition list onto the active list.  That won't
work, and it's bogusly clearing PageReferenced in that case.

The deferred-addition lists are a pest.  But lru_cache_add used to be
really expensive in sime workloads on some machines.  Must persist.
parent e735f278
......@@ -20,6 +20,7 @@ void __pagevec_release(struct pagevec *pvec);
void __pagevec_release_nonlru(struct pagevec *pvec);
void __pagevec_free(struct pagevec *pvec);
void __pagevec_lru_add(struct pagevec *pvec);
void __pagevec_lru_add_active(struct pagevec *pvec);
void lru_add_drain(void);
void pagevec_deactivate_inactive(struct pagevec *pvec);
void pagevec_strip(struct pagevec *pvec);
......
......@@ -156,7 +156,7 @@ extern int FASTCALL(page_over_rsslimit(struct page *));
/* linux/mm/swap.c */
extern void FASTCALL(lru_cache_add(struct page *));
extern void FASTCALL(lru_cache_add_active(struct page *));
extern void FASTCALL(activate_page(struct page *));
extern void swap_setup(void);
......
......@@ -549,24 +549,6 @@ grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
return page;
}
/*
* Mark a page as having seen activity.
*
* inactive,unreferenced -> inactive,referenced
* inactive,referenced -> active,unreferenced
* active,unreferenced -> active,referenced
*/
void mark_page_accessed(struct page *page)
{
if (!PageActive(page) && PageReferenced(page)) {
activate_page(page);
ClearPageReferenced(page);
return;
} else if (!PageReferenced(page)) {
SetPageReferenced(page);
}
}
/*
* This is a generic file read routine, and uses the
* inode->i_op->readpage() function for the actual low-level
......
......@@ -22,6 +22,7 @@
#include <linux/mm_inline.h>
#include <linux/buffer_head.h>
#include <linux/prefetch.h>
#include <linux/percpu.h>
/* How many pages do we try to swap or page in/out together? */
int page_cluster;
......@@ -43,15 +44,33 @@ void activate_page(struct page *page)
spin_unlock_irq(&zone->lru_lock);
}
/*
* Mark a page as having seen activity.
*
* inactive,unreferenced -> inactive,referenced
* inactive,referenced -> active,unreferenced
* active,unreferenced -> active,referenced
*/
void mark_page_accessed(struct page *page)
{
if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) {
activate_page(page);
ClearPageReferenced(page);
} else if (!PageReferenced(page)) {
SetPageReferenced(page);
}
}
/**
* lru_cache_add: add a page to the page lists
* @page: the page to add
*/
static struct pagevec lru_add_pvecs[NR_CPUS];
static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs) = { 0, };
static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs) = { 0, };
void lru_cache_add(struct page *page)
{
struct pagevec *pvec = &lru_add_pvecs[get_cpu()];
struct pagevec *pvec = &per_cpu(lru_add_pvecs, get_cpu());
page_cache_get(page);
if (!pagevec_add(pvec, page))
......@@ -59,12 +78,26 @@ void lru_cache_add(struct page *page)
put_cpu();
}
void lru_cache_add_active(struct page *page)
{
struct pagevec *pvec = &per_cpu(lru_add_active_pvecs, get_cpu());
page_cache_get(page);
if (!pagevec_add(pvec, page))
__pagevec_lru_add_active(pvec);
put_cpu();
}
void lru_add_drain(void)
{
struct pagevec *pvec = &lru_add_pvecs[get_cpu()];
int cpu = get_cpu();
struct pagevec *pvec = &per_cpu(lru_add_pvecs, cpu);
if (pagevec_count(pvec))
__pagevec_lru_add(pvec);
pvec = &per_cpu(lru_add_active_pvecs, cpu);
if (pagevec_count(pvec))
__pagevec_lru_add_active(pvec);
put_cpu();
}
......@@ -198,8 +231,6 @@ void pagevec_deactivate_inactive(struct pagevec *pvec)
/*
* Add the passed pages to the LRU, then drop the caller's refcount
* on them. Reinitialises the caller's pagevec.
*
* Mapped pages go onto the active list.
*/
void __pagevec_lru_add(struct pagevec *pvec)
{
......@@ -218,13 +249,33 @@ void __pagevec_lru_add(struct pagevec *pvec)
}
if (TestSetPageLRU(page))
BUG();
if (page_mapped(page)) {
if (TestSetPageActive(page))
BUG();
add_page_to_active_list(zone, page);
} else {
add_page_to_inactive_list(zone, page);
add_page_to_inactive_list(zone, page);
}
if (zone)
spin_unlock_irq(&zone->lru_lock);
pagevec_release(pvec);
}
void __pagevec_lru_add_active(struct pagevec *pvec)
{
int i;
struct zone *zone = NULL;
for (i = 0; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i];
struct zone *pagezone = page_zone(page);
if (pagezone != zone) {
if (zone)
spin_unlock_irq(&zone->lru_lock);
zone = pagezone;
spin_lock_irq(&zone->lru_lock);
}
if (TestSetPageLRU(page))
BUG();
if (TestSetPageActive(page))
BUG();
add_page_to_active_list(zone, page);
}
if (zone)
spin_unlock_irq(&zone->lru_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment