Commit 7cbe34cf authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

[PATCH] Swap Migration V5: Add CONFIG_MIGRATION for page migration support

Include page migration if the system is NUMA or having a memory model that
allows distinct areas of memory (SPARSEMEM, DISCONTIGMEM).

And:
- Only include lru_add_drain_per_cpu if building for an SMP system.
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 49d2e9cc
...@@ -178,7 +178,9 @@ extern int vm_swappiness; ...@@ -178,7 +178,9 @@ extern int vm_swappiness;
extern int isolate_lru_page(struct page *p); extern int isolate_lru_page(struct page *p);
extern int putback_lru_pages(struct list_head *l); extern int putback_lru_pages(struct list_head *l);
#ifdef CONFIG_MIGRATION
extern int migrate_pages(struct list_head *l, struct list_head *t); extern int migrate_pages(struct list_head *l, struct list_head *t);
#endif
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
/* linux/mm/shmem.c */ /* linux/mm/shmem.c */
......
...@@ -132,3 +132,10 @@ config SPLIT_PTLOCK_CPUS ...@@ -132,3 +132,10 @@ config SPLIT_PTLOCK_CPUS
default "4096" if ARM && !CPU_CACHE_VIPT default "4096" if ARM && !CPU_CACHE_VIPT
default "4096" if PARISC && !PA20 default "4096" if PARISC && !PA20
default "4" default "4"
#
# support for page migration
#
config MIGRATION
def_bool y if NUMA || SPARSEMEM || DISCONTIGMEM
depends on SWAP
...@@ -568,6 +568,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc) ...@@ -568,6 +568,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
return reclaimed; return reclaimed;
} }
#ifdef CONFIG_MIGRATION
/* /*
* swapout a single page * swapout a single page
* page is locked upon entry, unlocked on exit * page is locked upon entry, unlocked on exit
...@@ -656,8 +657,9 @@ int migrate_pages(struct list_head *l, struct list_head *t) ...@@ -656,8 +657,9 @@ int migrate_pages(struct list_head *l, struct list_head *t)
/* /*
* Skip locked pages during the first two passes to give the * Skip locked pages during the first two passes to give the
* functions holding the lock time to release the page. Later we use * functions holding the lock time to release the page. Later we
* lock_page to have a higher chance of acquiring the lock. * use lock_page() to have a higher chance of acquiring the
* lock.
*/ */
if (pass > 2) if (pass > 2)
lock_page(page); lock_page(page);
...@@ -669,15 +671,15 @@ int migrate_pages(struct list_head *l, struct list_head *t) ...@@ -669,15 +671,15 @@ int migrate_pages(struct list_head *l, struct list_head *t)
* Only wait on writeback if we have already done a pass where * Only wait on writeback if we have already done a pass where
* we we may have triggered writeouts for lots of pages. * we we may have triggered writeouts for lots of pages.
*/ */
if (pass > 0) if (pass > 0) {
wait_on_page_writeback(page); wait_on_page_writeback(page);
else } else {
if (PageWriteback(page)) { if (PageWriteback(page)) {
unlock_page(page); unlock_page(page);
goto retry_later; goto retry_later;
} }
}
#ifdef CONFIG_SWAP
if (PageAnon(page) && !PageSwapCache(page)) { if (PageAnon(page) && !PageSwapCache(page)) {
if (!add_to_swap(page)) { if (!add_to_swap(page)) {
unlock_page(page); unlock_page(page);
...@@ -686,16 +688,15 @@ int migrate_pages(struct list_head *l, struct list_head *t) ...@@ -686,16 +688,15 @@ int migrate_pages(struct list_head *l, struct list_head *t)
continue; continue;
} }
} }
#endif /* CONFIG_SWAP */
/* /*
* Page is properly locked and writeback is complete. * Page is properly locked and writeback is complete.
* Try to migrate the page. * Try to migrate the page.
*/ */
if (swap_page(page)) { if (!swap_page(page))
continue;
retry_later: retry_later:
retry++; retry++;
}
} }
if (retry && pass++ < 10) if (retry && pass++ < 10)
goto redo; goto redo;
...@@ -708,6 +709,7 @@ int migrate_pages(struct list_head *l, struct list_head *t) ...@@ -708,6 +709,7 @@ int migrate_pages(struct list_head *l, struct list_head *t)
return nr_failed + retry; return nr_failed + retry;
} }
#endif
/* /*
* zone->lru_lock is heavily contended. Some of the functions that * zone->lru_lock is heavily contended. Some of the functions that
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment