Commit 1fc160cf authored by Alex Sierra's avatar Alex Sierra Committed by Alex Deucher

drm/amdgpu: get owner ref in validate and map

Get the proper owner reference for amdgpu_hmm_range_get_pages function.
This is useful for partial migrations. To avoid migrating back to
system memory, VRAM pages, that are accessible by all devices in the
same memory domain.
Ex. multiple devices in the same hive.
Signed-off-by: default avatarAlex Sierra <alex.sierra@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent a010d98a
...@@ -1337,6 +1337,17 @@ static void svm_range_unreserve_bos(struct svm_validate_context *ctx) ...@@ -1337,6 +1337,17 @@ static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list); ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
} }
static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
{
struct kfd_process_device *pdd;
struct amdgpu_device *adev;
pdd = kfd_process_device_from_gpuidx(p, gpuidx);
adev = (struct amdgpu_device *)pdd->dev->kgd;
return SVM_ADEV_PGMAP_OWNER(adev);
}
/* /*
* Validation+GPU mapping with concurrent invalidation (MMU notifiers) * Validation+GPU mapping with concurrent invalidation (MMU notifiers)
* *
...@@ -1367,6 +1378,9 @@ static int svm_range_validate_and_map(struct mm_struct *mm, ...@@ -1367,6 +1378,9 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
{ {
struct svm_validate_context ctx; struct svm_validate_context ctx;
struct hmm_range *hmm_range; struct hmm_range *hmm_range;
struct kfd_process *p;
void *owner;
int32_t idx;
int r = 0; int r = 0;
ctx.process = container_of(prange->svms, struct kfd_process, svms); ctx.process = container_of(prange->svms, struct kfd_process, svms);
...@@ -1413,10 +1427,19 @@ static int svm_range_validate_and_map(struct mm_struct *mm, ...@@ -1413,10 +1427,19 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
svm_range_reserve_bos(&ctx); svm_range_reserve_bos(&ctx);
if (!prange->actual_loc) { if (!prange->actual_loc) {
p = container_of(prange->svms, struct kfd_process, svms);
owner = kfd_svm_page_owner(p, find_first_bit(ctx.bitmap,
MAX_GPU_INSTANCE));
for_each_set_bit(idx, ctx.bitmap, MAX_GPU_INSTANCE) {
if (kfd_svm_page_owner(p, idx) != owner) {
owner = NULL;
break;
}
}
r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL, r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
prange->start << PAGE_SHIFT, prange->start << PAGE_SHIFT,
prange->npages, &hmm_range, prange->npages, &hmm_range,
false, true, NULL); false, true, owner);
if (r) { if (r) {
pr_debug("failed %d to get svm range pages\n", r); pr_debug("failed %d to get svm range pages\n", r);
goto unreserve_out; goto unreserve_out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment