Commit c33db292 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

migrate: convert unmap_and_move_huge_page() to use folios

Saves several calls to compound_head() and removes a couple of uses of
page->lru.

Link: https://lkml.kernel.org/r/20220902194653.1739778-52-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 682a71a1
...@@ -1263,7 +1263,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, ...@@ -1263,7 +1263,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
if (!hugepage_migration_supported(page_hstate(hpage))) if (!hugepage_migration_supported(page_hstate(hpage)))
return -ENOSYS; return -ENOSYS;
if (page_count(hpage) == 1) { if (folio_ref_count(src) == 1) {
/* page was freed from under us. So we are done. */ /* page was freed from under us. So we are done. */
putback_active_hugepage(hpage); putback_active_hugepage(hpage);
return MIGRATEPAGE_SUCCESS; return MIGRATEPAGE_SUCCESS;
...@@ -1274,7 +1274,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, ...@@ -1274,7 +1274,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
return -ENOMEM; return -ENOMEM;
dst = page_folio(new_hpage); dst = page_folio(new_hpage);
if (!trylock_page(hpage)) { if (!folio_trylock(src)) {
if (!force) if (!force)
goto out; goto out;
switch (mode) { switch (mode) {
...@@ -1284,29 +1284,29 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, ...@@ -1284,29 +1284,29 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
default: default:
goto out; goto out;
} }
lock_page(hpage); folio_lock(src);
} }
/* /*
* Check for pages which are in the process of being freed. Without * Check for pages which are in the process of being freed. Without
* page_mapping() set, hugetlbfs specific move page routine will not * folio_mapping() set, hugetlbfs specific move page routine will not
* be called and we could leak usage counts for subpools. * be called and we could leak usage counts for subpools.
*/ */
if (hugetlb_page_subpool(hpage) && !page_mapping(hpage)) { if (hugetlb_page_subpool(hpage) && !folio_mapping(src)) {
rc = -EBUSY; rc = -EBUSY;
goto out_unlock; goto out_unlock;
} }
if (PageAnon(hpage)) if (folio_test_anon(src))
anon_vma = page_get_anon_vma(hpage); anon_vma = page_get_anon_vma(&src->page);
if (unlikely(!trylock_page(new_hpage))) if (unlikely(!folio_trylock(dst)))
goto put_anon; goto put_anon;
if (page_mapped(hpage)) { if (folio_mapped(src)) {
enum ttu_flags ttu = 0; enum ttu_flags ttu = 0;
if (!PageAnon(hpage)) { if (!folio_test_anon(src)) {
/* /*
* In shared mappings, try_to_unmap could potentially * In shared mappings, try_to_unmap could potentially
* call huge_pmd_unshare. Because of this, take * call huge_pmd_unshare. Because of this, take
...@@ -1327,7 +1327,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, ...@@ -1327,7 +1327,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
i_mmap_unlock_write(mapping); i_mmap_unlock_write(mapping);
} }
if (!page_mapped(hpage)) if (!folio_mapped(src))
rc = move_to_new_folio(dst, src, mode); rc = move_to_new_folio(dst, src, mode);
if (page_was_mapped) if (page_was_mapped)
...@@ -1335,7 +1335,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, ...@@ -1335,7 +1335,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
rc == MIGRATEPAGE_SUCCESS ? dst : src, false); rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
unlock_put_anon: unlock_put_anon:
unlock_page(new_hpage); folio_unlock(dst);
put_anon: put_anon:
if (anon_vma) if (anon_vma)
...@@ -1347,12 +1347,12 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, ...@@ -1347,12 +1347,12 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
} }
out_unlock: out_unlock:
unlock_page(hpage); folio_unlock(src);
out: out:
if (rc == MIGRATEPAGE_SUCCESS) if (rc == MIGRATEPAGE_SUCCESS)
putback_active_hugepage(hpage); putback_active_hugepage(hpage);
else if (rc != -EAGAIN) else if (rc != -EAGAIN)
list_move_tail(&hpage->lru, ret); list_move_tail(&src->lru, ret);
/* /*
* If migration was not successful and there's a freeing callback, use * If migration was not successful and there's a freeing callback, use
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment