Commit ab09243a authored by Alistair Popple's avatar Alistair Popple Committed by Linus Torvalds

mm/migrate.c: remove MIGRATE_PFN_LOCKED

MIGRATE_PFN_LOCKED is used to indicate to migrate_vma_prepare() that a
source page was already locked during migrate_vma_collect().  If it
wasn't then the a second attempt is made to lock the page.  However if
the first attempt failed it's unlikely a second attempt will succeed,
and the retry adds complexity.  So clean this up by removing the retry
and MIGRATE_PFN_LOCKED flag.

Destination pages are also meant to have the MIGRATE_PFN_LOCKED flag
set, but nothing actually checks that.

Link: https://lkml.kernel.org/r/20211025041608.289017-1-apopple@nvidia.comSigned-off-by: default avatarAlistair Popple <apopple@nvidia.com>
Reviewed-by: default avatarRalph Campbell <rcampbell@nvidia.com>
Acked-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Ben Skeggs <bskeggs@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0ef02462
...@@ -360,7 +360,7 @@ between device driver specific code and shared common code: ...@@ -360,7 +360,7 @@ between device driver specific code and shared common code:
system memory page, locks the page with ``lock_page()``, and fills in the system memory page, locks the page with ``lock_page()``, and fills in the
``dst`` array entry with:: ``dst`` array entry with::
dst[i] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; dst[i] = migrate_pfn(page_to_pfn(dpage));
Now that the driver knows that this page is being migrated, it can Now that the driver knows that this page is being migrated, it can
invalidate device private MMU mappings and copy device private memory invalidate device private MMU mappings and copy device private memory
......
...@@ -560,7 +560,7 @@ static int __kvmppc_svm_page_out(struct vm_area_struct *vma, ...@@ -560,7 +560,7 @@ static int __kvmppc_svm_page_out(struct vm_area_struct *vma,
gpa, 0, page_shift); gpa, 0, page_shift);
if (ret == U_SUCCESS) if (ret == U_SUCCESS)
*mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED; *mig.dst = migrate_pfn(pfn);
else { else {
unlock_page(dpage); unlock_page(dpage);
__free_page(dpage); __free_page(dpage);
...@@ -774,7 +774,7 @@ static int kvmppc_svm_page_in(struct vm_area_struct *vma, ...@@ -774,7 +774,7 @@ static int kvmppc_svm_page_in(struct vm_area_struct *vma,
} }
} }
*mig.dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; *mig.dst = migrate_pfn(page_to_pfn(dpage));
migrate_vma_pages(&mig); migrate_vma_pages(&mig);
out_finalize: out_finalize:
migrate_vma_finalize(&mig); migrate_vma_finalize(&mig);
......
...@@ -317,7 +317,6 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, ...@@ -317,7 +317,6 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]); migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
svm_migrate_get_vram_page(prange, migrate->dst[i]); svm_migrate_get_vram_page(prange, migrate->dst[i]);
migrate->dst[i] = migrate_pfn(migrate->dst[i]); migrate->dst[i] = migrate_pfn(migrate->dst[i]);
migrate->dst[i] |= MIGRATE_PFN_LOCKED;
src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE, src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
DMA_TO_DEVICE); DMA_TO_DEVICE);
r = dma_mapping_error(dev, src[i]); r = dma_mapping_error(dev, src[i]);
...@@ -610,7 +609,6 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, ...@@ -610,7 +609,6 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
dst[i] >> PAGE_SHIFT, page_to_pfn(dpage)); dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
migrate->dst[i] = migrate_pfn(page_to_pfn(dpage)); migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
migrate->dst[i] |= MIGRATE_PFN_LOCKED;
j++; j++;
} }
......
...@@ -166,7 +166,7 @@ static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm, ...@@ -166,7 +166,7 @@ static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
goto error_dma_unmap; goto error_dma_unmap;
mutex_unlock(&svmm->mutex); mutex_unlock(&svmm->mutex);
args->dst[0] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; args->dst[0] = migrate_pfn(page_to_pfn(dpage));
return 0; return 0;
error_dma_unmap: error_dma_unmap:
...@@ -602,7 +602,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm, ...@@ -602,7 +602,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT); ((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
if (src & MIGRATE_PFN_WRITE) if (src & MIGRATE_PFN_WRITE)
*pfn |= NVIF_VMM_PFNMAP_V0_W; *pfn |= NVIF_VMM_PFNMAP_V0_W;
return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; return migrate_pfn(page_to_pfn(dpage));
out_dma_unmap: out_dma_unmap:
dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL); dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
......
...@@ -110,7 +110,6 @@ static inline int migrate_misplaced_page(struct page *page, ...@@ -110,7 +110,6 @@ static inline int migrate_misplaced_page(struct page *page,
*/ */
#define MIGRATE_PFN_VALID (1UL << 0) #define MIGRATE_PFN_VALID (1UL << 0)
#define MIGRATE_PFN_MIGRATE (1UL << 1) #define MIGRATE_PFN_MIGRATE (1UL << 1)
#define MIGRATE_PFN_LOCKED (1UL << 2)
#define MIGRATE_PFN_WRITE (1UL << 3) #define MIGRATE_PFN_WRITE (1UL << 3)
#define MIGRATE_PFN_SHIFT 6 #define MIGRATE_PFN_SHIFT 6
......
...@@ -613,8 +613,7 @@ static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args, ...@@ -613,8 +613,7 @@ static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args,
*/ */
rpage->zone_device_data = dmirror; rpage->zone_device_data = dmirror;
*dst = migrate_pfn(page_to_pfn(dpage)) | *dst = migrate_pfn(page_to_pfn(dpage));
MIGRATE_PFN_LOCKED;
if ((*src & MIGRATE_PFN_WRITE) || if ((*src & MIGRATE_PFN_WRITE) ||
(!spage && args->vma->vm_flags & VM_WRITE)) (!spage && args->vma->vm_flags & VM_WRITE))
*dst |= MIGRATE_PFN_WRITE; *dst |= MIGRATE_PFN_WRITE;
...@@ -1137,7 +1136,7 @@ static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args, ...@@ -1137,7 +1136,7 @@ static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
lock_page(dpage); lock_page(dpage);
xa_erase(&dmirror->pt, addr >> PAGE_SHIFT); xa_erase(&dmirror->pt, addr >> PAGE_SHIFT);
copy_highpage(dpage, spage); copy_highpage(dpage, spage);
*dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED; *dst = migrate_pfn(page_to_pfn(dpage));
if (*src & MIGRATE_PFN_WRITE) if (*src & MIGRATE_PFN_WRITE)
*dst |= MIGRATE_PFN_WRITE; *dst |= MIGRATE_PFN_WRITE;
} }
......
...@@ -2362,7 +2362,6 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, ...@@ -2362,7 +2362,6 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
* can't be dropped from it). * can't be dropped from it).
*/ */
get_page(page); get_page(page);
migrate->cpages++;
/* /*
* Optimize for the common case where page is only mapped once * Optimize for the common case where page is only mapped once
...@@ -2372,7 +2371,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, ...@@ -2372,7 +2371,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
if (trylock_page(page)) { if (trylock_page(page)) {
pte_t swp_pte; pte_t swp_pte;
mpfn |= MIGRATE_PFN_LOCKED; migrate->cpages++;
ptep_get_and_clear(mm, addr, ptep); ptep_get_and_clear(mm, addr, ptep);
/* Setup special migration page table entry */ /* Setup special migration page table entry */
...@@ -2406,6 +2405,9 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, ...@@ -2406,6 +2405,9 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
if (pte_present(pte)) if (pte_present(pte))
unmapped++; unmapped++;
} else {
put_page(page);
mpfn = 0;
} }
next: next:
...@@ -2510,15 +2512,17 @@ static bool migrate_vma_check_page(struct page *page) ...@@ -2510,15 +2512,17 @@ static bool migrate_vma_check_page(struct page *page)
} }
/* /*
* migrate_vma_prepare() - lock pages and isolate them from the lru * migrate_vma_unmap() - replace page mapping with special migration pte entry
* @migrate: migrate struct containing all migration information * @migrate: migrate struct containing all migration information
* *
* This locks pages that have been collected by migrate_vma_collect(). Once each * Isolate pages from the LRU and replace mappings (CPU page table pte) with a
* page is locked it is isolated from the lru (for non-device pages). Finally, * special migration pte entry and check if it has been pinned. Pinned pages are
* the ref taken by migrate_vma_collect() is dropped, as locked pages cannot be * restored because we cannot migrate them.
* migrated by concurrent kernel threads. *
* This is the last step before we call the device driver callback to allocate
* destination memory and copy contents of original page over to new page.
*/ */
static void migrate_vma_prepare(struct migrate_vma *migrate) static void migrate_vma_unmap(struct migrate_vma *migrate)
{ {
const unsigned long npages = migrate->npages; const unsigned long npages = migrate->npages;
const unsigned long start = migrate->start; const unsigned long start = migrate->start;
...@@ -2527,32 +2531,12 @@ static void migrate_vma_prepare(struct migrate_vma *migrate) ...@@ -2527,32 +2531,12 @@ static void migrate_vma_prepare(struct migrate_vma *migrate)
lru_add_drain(); lru_add_drain();
for (i = 0; (i < npages) && migrate->cpages; i++) { for (i = 0; i < npages; i++) {
struct page *page = migrate_pfn_to_page(migrate->src[i]); struct page *page = migrate_pfn_to_page(migrate->src[i]);
bool remap = true;
if (!page) if (!page)
continue; continue;
if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) {
/*
* Because we are migrating several pages there can be
* a deadlock between 2 concurrent migration where each
* are waiting on each other page lock.
*
* Make migrate_vma() a best effort thing and backoff
* for any page we can not lock right away.
*/
if (!trylock_page(page)) {
migrate->src[i] = 0;
migrate->cpages--;
put_page(page);
continue;
}
remap = false;
migrate->src[i] |= MIGRATE_PFN_LOCKED;
}
/* ZONE_DEVICE pages are not on LRU */ /* ZONE_DEVICE pages are not on LRU */
if (!is_zone_device_page(page)) { if (!is_zone_device_page(page)) {
if (!PageLRU(page) && allow_drain) { if (!PageLRU(page) && allow_drain) {
...@@ -2562,16 +2546,9 @@ static void migrate_vma_prepare(struct migrate_vma *migrate) ...@@ -2562,16 +2546,9 @@ static void migrate_vma_prepare(struct migrate_vma *migrate)
} }
if (isolate_lru_page(page)) { if (isolate_lru_page(page)) {
if (remap) { migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; migrate->cpages--;
migrate->cpages--; restore++;
restore++;
} else {
migrate->src[i] = 0;
unlock_page(page);
migrate->cpages--;
put_page(page);
}
continue; continue;
} }
...@@ -2579,80 +2556,20 @@ static void migrate_vma_prepare(struct migrate_vma *migrate) ...@@ -2579,80 +2556,20 @@ static void migrate_vma_prepare(struct migrate_vma *migrate)
put_page(page); put_page(page);
} }
if (!migrate_vma_check_page(page)) { if (page_mapped(page))
if (remap) { try_to_migrate(page, 0);
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
migrate->cpages--;
restore++;
if (!is_zone_device_page(page)) {
get_page(page);
putback_lru_page(page);
}
} else {
migrate->src[i] = 0;
unlock_page(page);
migrate->cpages--;
if (!is_zone_device_page(page)) if (page_mapped(page) || !migrate_vma_check_page(page)) {
putback_lru_page(page); if (!is_zone_device_page(page)) {
else get_page(page);
put_page(page); putback_lru_page(page);
} }
}
}
for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) {
struct page *page = migrate_pfn_to_page(migrate->src[i]);
if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
continue;
remove_migration_pte(page, migrate->vma, addr, page); migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
migrate->cpages--;
migrate->src[i] = 0; restore++;
unlock_page(page);
put_page(page);
restore--;
}
}
/*
* migrate_vma_unmap() - replace page mapping with special migration pte entry
* @migrate: migrate struct containing all migration information
*
* Replace page mapping (CPU page table pte) with a special migration pte entry
* and check again if it has been pinned. Pinned pages are restored because we
* cannot migrate them.
*
* This is the last step before we call the device driver callback to allocate
* destination memory and copy contents of original page over to new page.
*/
static void migrate_vma_unmap(struct migrate_vma *migrate)
{
const unsigned long npages = migrate->npages;
const unsigned long start = migrate->start;
unsigned long addr, i, restore = 0;
for (i = 0; i < npages; i++) {
struct page *page = migrate_pfn_to_page(migrate->src[i]);
if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
continue; continue;
if (page_mapped(page)) {
try_to_migrate(page, 0);
if (page_mapped(page))
goto restore;
} }
if (migrate_vma_check_page(page))
continue;
restore:
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
migrate->cpages--;
restore++;
} }
for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) { for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) {
...@@ -2665,12 +2582,8 @@ static void migrate_vma_unmap(struct migrate_vma *migrate) ...@@ -2665,12 +2582,8 @@ static void migrate_vma_unmap(struct migrate_vma *migrate)
migrate->src[i] = 0; migrate->src[i] = 0;
unlock_page(page); unlock_page(page);
put_page(page);
restore--; restore--;
if (is_zone_device_page(page))
put_page(page);
else
putback_lru_page(page);
} }
} }
...@@ -2693,8 +2606,8 @@ static void migrate_vma_unmap(struct migrate_vma *migrate) ...@@ -2693,8 +2606,8 @@ static void migrate_vma_unmap(struct migrate_vma *migrate)
* it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE
* flag set). Once these are allocated and copied, the caller must update each * flag set). Once these are allocated and copied, the caller must update each
* corresponding entry in the dst array with the pfn value of the destination * corresponding entry in the dst array with the pfn value of the destination
* page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_LOCKED flags set * page and with MIGRATE_PFN_VALID. Destination pages must be locked via
* (destination pages must have their struct pages locked, via lock_page()). * lock_page().
* *
* Note that the caller does not have to migrate all the pages that are marked * Note that the caller does not have to migrate all the pages that are marked
* with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from
...@@ -2763,8 +2676,6 @@ int migrate_vma_setup(struct migrate_vma *args) ...@@ -2763,8 +2676,6 @@ int migrate_vma_setup(struct migrate_vma *args)
migrate_vma_collect(args); migrate_vma_collect(args);
if (args->cpages)
migrate_vma_prepare(args);
if (args->cpages) if (args->cpages)
migrate_vma_unmap(args); migrate_vma_unmap(args);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment