Commit 8419c318 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

[PATCH] SwapMig: CONFIG_MIGRATION fixes

Move move_to_lru, putback_lru_pages and isolate_lru in section surrounded by
CONFIG_MIGRATION saving some codesize for single processor kernels.
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 39743889
...@@ -175,10 +175,9 @@ extern int try_to_free_pages(struct zone **, gfp_t); ...@@ -175,10 +175,9 @@ extern int try_to_free_pages(struct zone **, gfp_t);
extern int shrink_all_memory(int); extern int shrink_all_memory(int);
extern int vm_swappiness; extern int vm_swappiness;
#ifdef CONFIG_MIGRATION
extern int isolate_lru_page(struct page *p); extern int isolate_lru_page(struct page *p);
extern int putback_lru_pages(struct list_head *l); extern int putback_lru_pages(struct list_head *l);
#ifdef CONFIG_MIGRATION
extern int migrate_pages(struct list_head *l, struct list_head *t); extern int migrate_pages(struct list_head *l, struct list_head *t);
#endif #endif
......
...@@ -569,6 +569,40 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc) ...@@ -569,6 +569,40 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
} }
#ifdef CONFIG_MIGRATION #ifdef CONFIG_MIGRATION
static inline void move_to_lru(struct page *page)
{
list_del(&page->lru);
if (PageActive(page)) {
/*
* lru_cache_add_active checks that
* the PG_active bit is off.
*/
ClearPageActive(page);
lru_cache_add_active(page);
} else {
lru_cache_add(page);
}
put_page(page);
}
/*
* Add isolated pages on the list back to the LRU
*
* returns the number of pages put back.
*/
int putback_lru_pages(struct list_head *l)
{
struct page *page;
struct page *page2;
int count = 0;
list_for_each_entry_safe(page, page2, l, lru) {
move_to_lru(page);
count++;
}
return count;
}
/* /*
* swapout a single page * swapout a single page
* page is locked upon entry, unlocked on exit * page is locked upon entry, unlocked on exit
...@@ -709,6 +743,48 @@ int migrate_pages(struct list_head *l, struct list_head *t) ...@@ -709,6 +743,48 @@ int migrate_pages(struct list_head *l, struct list_head *t)
return nr_failed + retry; return nr_failed + retry;
} }
static void lru_add_drain_per_cpu(void *dummy)
{
lru_add_drain();
}
/*
* Isolate one page from the LRU lists and put it on the
* indicated list. Do necessary cache draining if the
* page is not on the LRU lists yet.
*
* Result:
* 0 = page not on LRU list
* 1 = page removed from LRU list and added to the specified list.
* -ENOENT = page is being freed elsewhere.
*/
int isolate_lru_page(struct page *page)
{
int rc = 0;
struct zone *zone = page_zone(page);
redo:
spin_lock_irq(&zone->lru_lock);
rc = __isolate_lru_page(page);
if (rc == 1) {
if (PageActive(page))
del_page_from_active_list(zone, page);
else
del_page_from_inactive_list(zone, page);
}
spin_unlock_irq(&zone->lru_lock);
if (rc == 0) {
/*
* Maybe this page is still waiting for a cpu to drain it
* from one of the lru lists?
*/
rc = schedule_on_each_cpu(lru_add_drain_per_cpu, NULL);
if (rc == 0 && PageLRU(page))
goto redo;
}
return rc;
}
#endif #endif
/* /*
...@@ -758,48 +834,6 @@ static int isolate_lru_pages(int nr_to_scan, struct list_head *src, ...@@ -758,48 +834,6 @@ static int isolate_lru_pages(int nr_to_scan, struct list_head *src,
return nr_taken; return nr_taken;
} }
static void lru_add_drain_per_cpu(void *dummy)
{
lru_add_drain();
}
/*
* Isolate one page from the LRU lists and put it on the
* indicated list. Do necessary cache draining if the
* page is not on the LRU lists yet.
*
* Result:
* 0 = page not on LRU list
* 1 = page removed from LRU list and added to the specified list.
* -ENOENT = page is being freed elsewhere.
*/
int isolate_lru_page(struct page *page)
{
int rc = 0;
struct zone *zone = page_zone(page);
redo:
spin_lock_irq(&zone->lru_lock);
rc = __isolate_lru_page(page);
if (rc == 1) {
if (PageActive(page))
del_page_from_active_list(zone, page);
else
del_page_from_inactive_list(zone, page);
}
spin_unlock_irq(&zone->lru_lock);
if (rc == 0) {
/*
* Maybe this page is still waiting for a cpu to drain it
* from one of the lru lists?
*/
rc = schedule_on_each_cpu(lru_add_drain_per_cpu, NULL);
if (rc == 0 && PageLRU(page))
goto redo;
}
return rc;
}
/* /*
* shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed
*/ */
...@@ -865,40 +899,6 @@ static void shrink_cache(struct zone *zone, struct scan_control *sc) ...@@ -865,40 +899,6 @@ static void shrink_cache(struct zone *zone, struct scan_control *sc)
pagevec_release(&pvec); pagevec_release(&pvec);
} }
static inline void move_to_lru(struct page *page)
{
list_del(&page->lru);
if (PageActive(page)) {
/*
* lru_cache_add_active checks that
* the PG_active bit is off.
*/
ClearPageActive(page);
lru_cache_add_active(page);
} else {
lru_cache_add(page);
}
put_page(page);
}
/*
* Add isolated pages on the list back to the LRU
*
* returns the number of pages put back.
*/
int putback_lru_pages(struct list_head *l)
{
struct page *page;
struct page *page2;
int count = 0;
list_for_each_entry_safe(page, page2, l, lru) {
move_to_lru(page);
count++;
}
return count;
}
/* /*
* This moves pages from the active list to the inactive list. * This moves pages from the active list to the inactive list.
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment