Commit c3fcf8a5 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

[PATCH] page migration cleanup: extract try_to_unmap from migration functions

Extract try_to_unmap and rename remove_references -> move_mapping

try_to_unmap() may significantly change the page state by for example setting
the dirty bit.  It is therefore best to unmap in migrate_pages() before
calling any migration functions.

migrate_page_remove_references() will then only move the new page in place of
the old page in the mapping.  Rename the function to
migrate_page_move_mapping().

This allows us to get rid of the special unmapping for the fallback path.
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 5b5c7120
...@@ -166,15 +166,14 @@ static int swap_page(struct page *page) ...@@ -166,15 +166,14 @@ static int swap_page(struct page *page)
} }
/* /*
* Remove references for a page and establish the new page with the correct * Replace the page in the mapping.
* basic settings to be able to stop accesses to the page.
* *
* The number of remaining references must be: * The number of remaining references must be:
* 1 for anonymous pages without a mapping * 1 for anonymous pages without a mapping
* 2 for pages with a mapping * 2 for pages with a mapping
* 3 for pages with a mapping and PagePrivate set. * 3 for pages with a mapping and PagePrivate set.
*/ */
static int migrate_page_remove_references(struct page *newpage, static int migrate_page_move_mapping(struct page *newpage,
struct page *page) struct page *page)
{ {
struct address_space *mapping = page_mapping(page); struct address_space *mapping = page_mapping(page);
...@@ -183,35 +182,6 @@ static int migrate_page_remove_references(struct page *newpage, ...@@ -183,35 +182,6 @@ static int migrate_page_remove_references(struct page *newpage,
if (!mapping) if (!mapping)
return -EAGAIN; return -EAGAIN;
/*
* Establish swap ptes for anonymous pages or destroy pte
* maps for files.
*
* In order to reestablish file backed mappings the fault handlers
* will take the radix tree_lock which may then be used to stop
* processses from accessing this page until the new page is ready.
*
* A process accessing via a swap pte (an anonymous page) will take a
* page_lock on the old page which will block the process until the
* migration attempt is complete. At that time the PageSwapCache bit
* will be examined. If the page was migrated then the PageSwapCache
* bit will be clear and the operation to retrieve the page will be
* retried which will find the new page in the radix tree. Then a new
* direct mapping may be generated based on the radix tree contents.
*
* If the page was not migrated then the PageSwapCache bit
* is still set and the operation may continue.
*/
if (try_to_unmap(page, 1) == SWAP_FAIL)
/* A vma has VM_LOCKED set -> permanent failure */
return -EPERM;
/*
* Give up if we were unable to remove all mappings.
*/
if (page_mapcount(page))
return -EAGAIN;
write_lock_irq(&mapping->tree_lock); write_lock_irq(&mapping->tree_lock);
radix_pointer = (struct page **)radix_tree_lookup_slot( radix_pointer = (struct page **)radix_tree_lookup_slot(
...@@ -310,7 +280,7 @@ int migrate_page(struct page *newpage, struct page *page) ...@@ -310,7 +280,7 @@ int migrate_page(struct page *newpage, struct page *page)
BUG_ON(PageWriteback(page)); /* Writeback must be complete */ BUG_ON(PageWriteback(page)); /* Writeback must be complete */
rc = migrate_page_remove_references(newpage, page); rc = migrate_page_move_mapping(newpage, page);
if (rc) if (rc)
return rc; return rc;
...@@ -349,7 +319,7 @@ int buffer_migrate_page(struct page *newpage, struct page *page) ...@@ -349,7 +319,7 @@ int buffer_migrate_page(struct page *newpage, struct page *page)
head = page_buffers(page); head = page_buffers(page);
rc = migrate_page_remove_references(newpage, page); rc = migrate_page_move_mapping(newpage, page);
if (rc) if (rc)
return rc; return rc;
...@@ -481,6 +451,33 @@ int migrate_pages(struct list_head *from, struct list_head *to, ...@@ -481,6 +451,33 @@ int migrate_pages(struct list_head *from, struct list_head *to,
newpage = lru_to_page(to); newpage = lru_to_page(to);
lock_page(newpage); lock_page(newpage);
/*
* Establish swap ptes for anonymous pages or destroy pte
* maps for files.
*
* In order to reestablish file backed mappings the fault handlers
* will take the radix tree_lock which may then be used to stop
* processses from accessing this page until the new page is ready.
*
* A process accessing via a swap pte (an anonymous page) will take a
* page_lock on the old page which will block the process until the
* migration attempt is complete. At that time the PageSwapCache bit
* will be examined. If the page was migrated then the PageSwapCache
* bit will be clear and the operation to retrieve the page will be
* retried which will find the new page in the radix tree. Then a new
* direct mapping may be generated based on the radix tree contents.
*
* If the page was not migrated then the PageSwapCache bit
* is still set and the operation may continue.
*/
rc = -EPERM;
if (try_to_unmap(page, 1) == SWAP_FAIL)
/* A vma has VM_LOCKED set -> permanent failure */
goto unlock_both;
rc = -EAGAIN;
if (page_mapped(page))
goto unlock_both;
/* /*
* Pages are properly locked and writeback is complete. * Pages are properly locked and writeback is complete.
* Try to migrate the page. * Try to migrate the page.
...@@ -501,17 +498,6 @@ int migrate_pages(struct list_head *from, struct list_head *to, ...@@ -501,17 +498,6 @@ int migrate_pages(struct list_head *from, struct list_head *to,
goto unlock_both; goto unlock_both;
} }
/* Make sure the dirty bit is up to date */
if (try_to_unmap(page, 1) == SWAP_FAIL) {
rc = -EPERM;
goto unlock_both;
}
if (page_mapcount(page)) {
rc = -EAGAIN;
goto unlock_both;
}
/* /*
* Default handling if a filesystem does not provide * Default handling if a filesystem does not provide
* a migration function. We can only migrate clean * a migration function. We can only migrate clean
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment