Commit a010d98a authored by Alex Sierra's avatar Alex Sierra Committed by Alex Deucher

drm/amdkfd: set owner ref to svm range prefault

svm_range_prefault is called right before migrations to VRAM,
to make sure pages are resident in system memory before the migration.
With partial migrations, this reference is used by hmm range get pages
to avoid migrating pages that are already in the same VRAM domain.
Signed-off-by: default avatarAlex Sierra <alex.sierra@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 8c21fc49
...@@ -471,7 +471,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, ...@@ -471,7 +471,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
prange->start, prange->last, best_loc); prange->start, prange->last, best_loc);
/* FIXME: workaround for page locking bug with invalid pages */ /* FIXME: workaround for page locking bug with invalid pages */
svm_range_prefault(prange, mm); svm_range_prefault(prange, mm, SVM_ADEV_PGMAP_OWNER(adev));
start = prange->start << PAGE_SHIFT; start = prange->start << PAGE_SHIFT;
end = (prange->last + 1) << PAGE_SHIFT; end = (prange->last + 1) << PAGE_SHIFT;
......
...@@ -2717,7 +2717,8 @@ svm_range_best_prefetch_location(struct svm_range *prange) ...@@ -2717,7 +2717,8 @@ svm_range_best_prefetch_location(struct svm_range *prange)
/* FIXME: This is a workaround for page locking bug when some pages are /* FIXME: This is a workaround for page locking bug when some pages are
* invalid during migration to VRAM * invalid during migration to VRAM
*/ */
void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm) void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm,
void *owner)
{ {
struct hmm_range *hmm_range; struct hmm_range *hmm_range;
int r; int r;
...@@ -2728,7 +2729,7 @@ void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm) ...@@ -2728,7 +2729,7 @@ void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm)
r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL, r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
prange->start << PAGE_SHIFT, prange->start << PAGE_SHIFT,
prange->npages, &hmm_range, prange->npages, &hmm_range,
false, true, NULL); false, true, owner);
if (!r) { if (!r) {
amdgpu_hmm_range_get_pages_done(hmm_range); amdgpu_hmm_range_get_pages_done(hmm_range);
prange->validated_once = true; prange->validated_once = true;
......
...@@ -176,7 +176,8 @@ void schedule_deferred_list_work(struct svm_range_list *svms); ...@@ -176,7 +176,8 @@ void schedule_deferred_list_work(struct svm_range_list *svms);
void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr, void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
unsigned long offset, unsigned long npages); unsigned long offset, unsigned long npages);
void svm_range_free_dma_mappings(struct svm_range *prange); void svm_range_free_dma_mappings(struct svm_range *prange);
void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm); void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm,
void *owner);
struct kfd_process_device * struct kfd_process_device *
svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev); svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment