Commit 8c3328f1 authored by Jérôme Glisse's avatar Jérôme Glisse Committed by Linus Torvalds

mm/migrate: migrate_vma() unmap page from vma while collecting pages

Common case for migration of virtual address range is page are map only
once inside the vma in which migration is taking place.  Because we
already walk the CPU page table for that range we can directly do the
unmap there and setup special migration swap entry.

Link: http://lkml.kernel.org/r/20170817000548.32038-16-jglisse@redhat.comSigned-off-by: default avatarJérôme Glisse <jglisse@redhat.com>
Signed-off-by: default avatarEvgeny Baskakov <ebaskakov@nvidia.com>
Signed-off-by: default avatarJohn Hubbard <jhubbard@nvidia.com>
Signed-off-by: default avatarMark Hairgrove <mhairgrove@nvidia.com>
Signed-off-by: default avatarSherry Cheung <SCheung@nvidia.com>
Signed-off-by: default avatarSubhash Gutti <sgutti@nvidia.com>
Cc: Aneesh Kumar <aneesh.kumar@linux.vnet.ibm.com>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: David Nellans <dnellans@nvidia.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Bob Liu <liubo95@huawei.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8763cb45
...@@ -2149,7 +2149,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, ...@@ -2149,7 +2149,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
struct migrate_vma *migrate = walk->private; struct migrate_vma *migrate = walk->private;
struct vm_area_struct *vma = walk->vma; struct vm_area_struct *vma = walk->vma;
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
unsigned long addr = start; unsigned long addr = start, unmapped = 0;
spinlock_t *ptl; spinlock_t *ptl;
pte_t *ptep; pte_t *ptep;
...@@ -2194,9 +2194,12 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, ...@@ -2194,9 +2194,12 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
return migrate_vma_collect_hole(start, end, walk); return migrate_vma_collect_hole(start, end, walk);
ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
arch_enter_lazy_mmu_mode();
for (; addr < end; addr += PAGE_SIZE, ptep++) { for (; addr < end; addr += PAGE_SIZE, ptep++) {
unsigned long mpfn, pfn; unsigned long mpfn, pfn;
struct page *page; struct page *page;
swp_entry_t entry;
pte_t pte; pte_t pte;
pte = *ptep; pte = *ptep;
...@@ -2228,11 +2231,44 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, ...@@ -2228,11 +2231,44 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE; mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0; mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
/*
* Optimize for the common case where page is only mapped once
* in one process. If we can lock the page, then we can safely
* set up a special migration page table entry now.
*/
if (trylock_page(page)) {
pte_t swp_pte;
mpfn |= MIGRATE_PFN_LOCKED;
ptep_get_and_clear(mm, addr, ptep);
/* Setup special migration page table entry */
entry = make_migration_entry(page, pte_write(pte));
swp_pte = swp_entry_to_pte(entry);
if (pte_soft_dirty(pte))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
set_pte_at(mm, addr, ptep, swp_pte);
/*
* This is like regular unmap: we remove the rmap and
* drop page refcount. Page won't be freed, as we took
* a reference just above.
*/
page_remove_rmap(page, false);
put_page(page);
unmapped++;
}
next: next:
migrate->src[migrate->npages++] = mpfn; migrate->src[migrate->npages++] = mpfn;
} }
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(ptep - 1, ptl); pte_unmap_unlock(ptep - 1, ptl);
/* Only flush the TLB if we actually modified any entries */
if (unmapped)
flush_tlb_range(walk->vma, start, end);
return 0; return 0;
} }
...@@ -2257,7 +2293,13 @@ static void migrate_vma_collect(struct migrate_vma *migrate) ...@@ -2257,7 +2293,13 @@ static void migrate_vma_collect(struct migrate_vma *migrate)
mm_walk.mm = migrate->vma->vm_mm; mm_walk.mm = migrate->vma->vm_mm;
mm_walk.private = migrate; mm_walk.private = migrate;
mmu_notifier_invalidate_range_start(mm_walk.mm,
migrate->start,
migrate->end);
walk_page_range(migrate->start, migrate->end, &mm_walk); walk_page_range(migrate->start, migrate->end, &mm_walk);
mmu_notifier_invalidate_range_end(mm_walk.mm,
migrate->start,
migrate->end);
migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT); migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
} }
...@@ -2305,32 +2347,37 @@ static bool migrate_vma_check_page(struct page *page) ...@@ -2305,32 +2347,37 @@ static bool migrate_vma_check_page(struct page *page)
static void migrate_vma_prepare(struct migrate_vma *migrate) static void migrate_vma_prepare(struct migrate_vma *migrate)
{ {
const unsigned long npages = migrate->npages; const unsigned long npages = migrate->npages;
const unsigned long start = migrate->start;
unsigned long addr, i, restore = 0;
bool allow_drain = true; bool allow_drain = true;
unsigned long i;
lru_add_drain(); lru_add_drain();
for (i = 0; (i < npages) && migrate->cpages; i++) { for (i = 0; (i < npages) && migrate->cpages; i++) {
struct page *page = migrate_pfn_to_page(migrate->src[i]); struct page *page = migrate_pfn_to_page(migrate->src[i]);
bool remap = true;
if (!page) if (!page)
continue; continue;
/* if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) {
* Because we are migrating several pages there can be /*
* a deadlock between 2 concurrent migration where each * Because we are migrating several pages there can be
* are waiting on each other page lock. * a deadlock between 2 concurrent migration where each
* * are waiting on each other page lock.
* Make migrate_vma() a best effort thing and backoff *
* for any page we can not lock right away. * Make migrate_vma() a best effort thing and backoff
*/ * for any page we can not lock right away.
if (!trylock_page(page)) { */
migrate->src[i] = 0; if (!trylock_page(page)) {
migrate->cpages--; migrate->src[i] = 0;
put_page(page); migrate->cpages--;
continue; put_page(page);
continue;
}
remap = false;
migrate->src[i] |= MIGRATE_PFN_LOCKED;
} }
migrate->src[i] |= MIGRATE_PFN_LOCKED;
if (!PageLRU(page) && allow_drain) { if (!PageLRU(page) && allow_drain) {
/* Drain CPU's pagevec */ /* Drain CPU's pagevec */
...@@ -2339,21 +2386,50 @@ static void migrate_vma_prepare(struct migrate_vma *migrate) ...@@ -2339,21 +2386,50 @@ static void migrate_vma_prepare(struct migrate_vma *migrate)
} }
if (isolate_lru_page(page)) { if (isolate_lru_page(page)) {
migrate->src[i] = 0; if (remap) {
unlock_page(page); migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
migrate->cpages--; migrate->cpages--;
put_page(page); restore++;
} else {
migrate->src[i] = 0;
unlock_page(page);
migrate->cpages--;
put_page(page);
}
continue; continue;
} }
if (!migrate_vma_check_page(page)) { if (!migrate_vma_check_page(page)) {
migrate->src[i] = 0; if (remap) {
unlock_page(page); migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
migrate->cpages--; migrate->cpages--;
restore++;
putback_lru_page(page); get_page(page);
putback_lru_page(page);
} else {
migrate->src[i] = 0;
unlock_page(page);
migrate->cpages--;
putback_lru_page(page);
}
} }
} }
for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) {
struct page *page = migrate_pfn_to_page(migrate->src[i]);
if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
continue;
remove_migration_pte(page, migrate->vma, addr, page);
migrate->src[i] = 0;
unlock_page(page);
put_page(page);
restore--;
}
} }
/* /*
...@@ -2380,12 +2456,19 @@ static void migrate_vma_unmap(struct migrate_vma *migrate) ...@@ -2380,12 +2456,19 @@ static void migrate_vma_unmap(struct migrate_vma *migrate)
if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE)) if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
continue; continue;
try_to_unmap(page, flags); if (page_mapped(page)) {
if (page_mapped(page) || !migrate_vma_check_page(page)) { try_to_unmap(page, flags);
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; if (page_mapped(page))
migrate->cpages--; goto restore;
restore++;
} }
if (migrate_vma_check_page(page))
continue;
restore:
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
migrate->cpages--;
restore++;
} }
for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) { for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment