Commit 5b205c7f authored by David Hildenbrand's avatar David Hildenbrand Committed by Andrew Morton

mm/migrate_device: page_remove_rmap() -> folio_remove_rmap_pte()

Let's convert migrate_vma_collect_pmd().  While at it, perform more folio
conversion.

Link: https://lkml.kernel.org/r/20231220224504.646757-30-david@redhat.comSigned-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Yin Fengwei <fengwei.yin@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent c4626503
...@@ -107,6 +107,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, ...@@ -107,6 +107,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
for (; addr < end; addr += PAGE_SIZE, ptep++) { for (; addr < end; addr += PAGE_SIZE, ptep++) {
unsigned long mpfn = 0, pfn; unsigned long mpfn = 0, pfn;
struct folio *folio;
struct page *page; struct page *page;
swp_entry_t entry; swp_entry_t entry;
pte_t pte; pte_t pte;
...@@ -168,41 +169,43 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, ...@@ -168,41 +169,43 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
} }
/* /*
* By getting a reference on the page we pin it and that blocks * By getting a reference on the folio we pin it and that blocks
* any kind of migration. Side effect is that it "freezes" the * any kind of migration. Side effect is that it "freezes" the
* pte. * pte.
* *
* We drop this reference after isolating the page from the lru * We drop this reference after isolating the folio from the lru
* for non device page (device page are not on the lru and thus * for non device folio (device folio are not on the lru and thus
* can't be dropped from it). * can't be dropped from it).
*/ */
get_page(page); folio = page_folio(page);
folio_get(folio);
/* /*
* We rely on trylock_page() to avoid deadlock between * We rely on folio_trylock() to avoid deadlock between
* concurrent migrations where each is waiting on the others * concurrent migrations where each is waiting on the others
* page lock. If we can't immediately lock the page we fail this * folio lock. If we can't immediately lock the folio we fail this
* migration as it is only best effort anyway. * migration as it is only best effort anyway.
* *
* If we can lock the page it's safe to set up a migration entry * If we can lock the folio it's safe to set up a migration entry
* now. In the common case where the page is mapped once in a * now. In the common case where the folio is mapped once in a
* single process setting up the migration entry now is an * single process setting up the migration entry now is an
* optimisation to avoid walking the rmap later with * optimisation to avoid walking the rmap later with
* try_to_migrate(). * try_to_migrate().
*/ */
if (trylock_page(page)) { if (folio_trylock(folio)) {
bool anon_exclusive; bool anon_exclusive;
pte_t swp_pte; pte_t swp_pte;
flush_cache_page(vma, addr, pte_pfn(pte)); flush_cache_page(vma, addr, pte_pfn(pte));
anon_exclusive = PageAnon(page) && PageAnonExclusive(page); anon_exclusive = folio_test_anon(folio) &&
PageAnonExclusive(page);
if (anon_exclusive) { if (anon_exclusive) {
pte = ptep_clear_flush(vma, addr, ptep); pte = ptep_clear_flush(vma, addr, ptep);
if (page_try_share_anon_rmap(page)) { if (page_try_share_anon_rmap(page)) {
set_pte_at(mm, addr, ptep, pte); set_pte_at(mm, addr, ptep, pte);
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
mpfn = 0; mpfn = 0;
goto next; goto next;
} }
...@@ -214,7 +217,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, ...@@ -214,7 +217,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
/* Set the dirty flag on the folio now the pte is gone. */ /* Set the dirty flag on the folio now the pte is gone. */
if (pte_dirty(pte)) if (pte_dirty(pte))
folio_mark_dirty(page_folio(page)); folio_mark_dirty(folio);
/* Setup special migration page table entry */ /* Setup special migration page table entry */
if (mpfn & MIGRATE_PFN_WRITE) if (mpfn & MIGRATE_PFN_WRITE)
...@@ -248,16 +251,16 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, ...@@ -248,16 +251,16 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
/* /*
* This is like regular unmap: we remove the rmap and * This is like regular unmap: we remove the rmap and
* drop page refcount. Page won't be freed, as we took * drop the folio refcount. The folio won't be freed, as
* a reference just above. * we took a reference just above.
*/ */
page_remove_rmap(page, vma, false); folio_remove_rmap_pte(folio, page, vma);
put_page(page); folio_put(folio);
if (pte_present(pte)) if (pte_present(pte))
unmapped++; unmapped++;
} else { } else {
put_page(page); folio_put(folio);
mpfn = 0; mpfn = 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment