Commit 7db7671f authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

mm: page migration trylock newpage at same level as oldpage

Clean up page migration a little by moving the trylock of newpage from
move_to_new_page() into __unmap_and_move(), where the old page has been
locked.  Adjust unmap_and_move_huge_page() and balloon_page_migrate()
accordingly.

But make one kind-of-functional change on the way: whereas trylock of
newpage used to BUG() if it failed, now simply return -EAGAIN if so.
Cutting out BUG()s is good, right?  But, to be honest, this is really to
extend the usefulness of the custom put_new_page feature, allowing a pool
of new pages to be shared perhaps with racing uses.

Use an "else" instead of that "skip_unmap" label.
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Sasha Levin <sasha.levin@oracle.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: default avatarRafael Aquini <aquini@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2def7424
...@@ -199,23 +199,17 @@ int balloon_page_migrate(struct page *newpage, ...@@ -199,23 +199,17 @@ int balloon_page_migrate(struct page *newpage,
struct balloon_dev_info *balloon = balloon_page_device(page); struct balloon_dev_info *balloon = balloon_page_device(page);
int rc = -EAGAIN; int rc = -EAGAIN;
/* VM_BUG_ON_PAGE(!PageLocked(page), page);
* Block others from accessing the 'newpage' when we get around to VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
* establishing additional references. We should be the only one
* holding a reference to the 'newpage' at this point.
*/
BUG_ON(!trylock_page(newpage));
if (WARN_ON(!__is_movable_balloon_page(page))) { if (WARN_ON(!__is_movable_balloon_page(page))) {
dump_page(page, "not movable balloon page"); dump_page(page, "not movable balloon page");
unlock_page(newpage);
return rc; return rc;
} }
if (balloon && balloon->migratepage) if (balloon && balloon->migratepage)
rc = balloon->migratepage(balloon, newpage, page, mode); rc = balloon->migratepage(balloon, newpage, page, mode);
unlock_page(newpage);
return rc; return rc;
} }
#endif /* CONFIG_BALLOON_COMPACTION */ #endif /* CONFIG_BALLOON_COMPACTION */
...@@ -727,13 +727,8 @@ static int move_to_new_page(struct page *newpage, struct page *page, ...@@ -727,13 +727,8 @@ static int move_to_new_page(struct page *newpage, struct page *page,
struct address_space *mapping; struct address_space *mapping;
int rc; int rc;
/* VM_BUG_ON_PAGE(!PageLocked(page), page);
* Block others from accessing the page when we get around to VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
* establishing additional references. We are the only one
* holding a reference to the new page at this point.
*/
if (!trylock_page(newpage))
BUG();
/* Prepare mapping for the new page.*/ /* Prepare mapping for the new page.*/
newpage->index = page->index; newpage->index = page->index;
...@@ -774,9 +769,6 @@ static int move_to_new_page(struct page *newpage, struct page *page, ...@@ -774,9 +769,6 @@ static int move_to_new_page(struct page *newpage, struct page *page,
remove_migration_ptes(page, newpage); remove_migration_ptes(page, newpage);
page->mapping = NULL; page->mapping = NULL;
} }
unlock_page(newpage);
return rc; return rc;
} }
...@@ -861,6 +853,17 @@ static int __unmap_and_move(struct page *page, struct page *newpage, ...@@ -861,6 +853,17 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
} }
} }
/*
* Block others from accessing the new page when we get around to
* establishing additional references. We are usually the only one
* holding a reference to newpage at this point. We used to have a BUG
* here if trylock_page(newpage) fails, but would like to allow for
* cases where there might be a race with the previous use of newpage.
* This is much like races on refcount of oldpage: just don't BUG().
*/
if (unlikely(!trylock_page(newpage)))
goto out_unlock;
if (unlikely(isolated_balloon_page(page))) { if (unlikely(isolated_balloon_page(page))) {
/* /*
* A ballooned page does not need any special attention from * A ballooned page does not need any special attention from
...@@ -870,7 +873,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage, ...@@ -870,7 +873,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
* the page migration right away (proteced by page lock). * the page migration right away (proteced by page lock).
*/ */
rc = balloon_page_migrate(newpage, page, mode); rc = balloon_page_migrate(newpage, page, mode);
goto out_unlock; goto out_unlock_both;
} }
/* /*
...@@ -889,30 +892,27 @@ static int __unmap_and_move(struct page *page, struct page *newpage, ...@@ -889,30 +892,27 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
VM_BUG_ON_PAGE(PageAnon(page), page); VM_BUG_ON_PAGE(PageAnon(page), page);
if (page_has_private(page)) { if (page_has_private(page)) {
try_to_free_buffers(page); try_to_free_buffers(page);
goto out_unlock; goto out_unlock_both;
}
goto skip_unmap;
} }
} else if (page_mapped(page)) {
/* Establish migration ptes or remove ptes */ /* Establish migration ptes */
if (page_mapped(page)) {
try_to_unmap(page, try_to_unmap(page,
TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
page_was_mapped = 1; page_was_mapped = 1;
} }
skip_unmap:
if (!page_mapped(page)) if (!page_mapped(page))
rc = move_to_new_page(newpage, page, page_was_mapped, mode); rc = move_to_new_page(newpage, page, page_was_mapped, mode);
if (rc && page_was_mapped) if (rc && page_was_mapped)
remove_migration_ptes(page, page); remove_migration_ptes(page, page);
out_unlock_both:
unlock_page(newpage);
out_unlock:
/* Drop an anon_vma reference if we took one */ /* Drop an anon_vma reference if we took one */
if (anon_vma) if (anon_vma)
put_anon_vma(anon_vma); put_anon_vma(anon_vma);
out_unlock:
unlock_page(page); unlock_page(page);
out: out:
return rc; return rc;
...@@ -1056,6 +1056,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, ...@@ -1056,6 +1056,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
if (PageAnon(hpage)) if (PageAnon(hpage))
anon_vma = page_get_anon_vma(hpage); anon_vma = page_get_anon_vma(hpage);
if (unlikely(!trylock_page(new_hpage)))
goto put_anon;
if (page_mapped(hpage)) { if (page_mapped(hpage)) {
try_to_unmap(hpage, try_to_unmap(hpage,
TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
...@@ -1068,6 +1071,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, ...@@ -1068,6 +1071,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
if (rc != MIGRATEPAGE_SUCCESS && page_was_mapped) if (rc != MIGRATEPAGE_SUCCESS && page_was_mapped)
remove_migration_ptes(hpage, hpage); remove_migration_ptes(hpage, hpage);
unlock_page(new_hpage);
put_anon:
if (anon_vma) if (anon_vma)
put_anon_vma(anon_vma); put_anon_vma(anon_vma);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment