Commit 915d3eec authored by Philip Yang's avatar Philip Yang Committed by Alex Deucher

drm/amdgpu: replace get_user_pages with HMM mirror helpers

Use HMM helper function hmm_vma_fault() to get physical pages backing
userptr and start CPU page table update track of those pages. Then use
hmm_vma_range_done() to check if those pages are updated before
amdgpu_cs_submit for gfx or before user queues are resumed for kfd.

If userptr pages are updated, for gfx, amdgpu_cs_ioctl will restart
from scratch, for kfd, restore worker is rescheduled to retry.

HMM simplify the CPU page table concurrent update check, so remove
guptasklock, mmu_invalidations, last_set_pages fields from
amdgpu_ttm_tt struct.

HMM does not pin the page (increase page ref count), so remove related
operations like release_pages(), put_page(), mark_page_dirty().
Signed-off-by: default avatarPhilip Yang <Philip.Yang@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 8dd69e69
...@@ -61,7 +61,6 @@ struct kgd_mem { ...@@ -61,7 +61,6 @@ struct kgd_mem {
atomic_t invalid; atomic_t invalid;
struct amdkfd_process_info *process_info; struct amdkfd_process_info *process_info;
struct page **user_pages;
struct amdgpu_sync sync; struct amdgpu_sync sync;
......
...@@ -499,28 +499,12 @@ static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm, ...@@ -499,28 +499,12 @@ static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
goto out; goto out;
} }
/* If no restore worker is running concurrently, user_pages ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, bo->tbo.ttm->pages);
* should not be allocated
*/
WARN(mem->user_pages, "Leaking user_pages array");
mem->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
sizeof(struct page *),
GFP_KERNEL | __GFP_ZERO);
if (!mem->user_pages) {
pr_err("%s: Failed to allocate pages array\n", __func__);
ret = -ENOMEM;
goto unregister_out;
}
ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, mem->user_pages);
if (ret) { if (ret) {
pr_err("%s: Failed to get user pages: %d\n", __func__, ret); pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
goto free_out; goto unregister_out;
} }
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, mem->user_pages);
ret = amdgpu_bo_reserve(bo, true); ret = amdgpu_bo_reserve(bo, true);
if (ret) { if (ret) {
pr_err("%s: Failed to reserve BO\n", __func__); pr_err("%s: Failed to reserve BO\n", __func__);
...@@ -533,11 +517,7 @@ static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm, ...@@ -533,11 +517,7 @@ static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
amdgpu_bo_unreserve(bo); amdgpu_bo_unreserve(bo);
release_out: release_out:
if (ret) amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
release_pages(mem->user_pages, bo->tbo.ttm->num_pages);
free_out:
kvfree(mem->user_pages);
mem->user_pages = NULL;
unregister_out: unregister_out:
if (ret) if (ret)
amdgpu_mn_unregister(bo); amdgpu_mn_unregister(bo);
...@@ -596,7 +576,6 @@ static int reserve_bo_and_vm(struct kgd_mem *mem, ...@@ -596,7 +576,6 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
ctx->kfd_bo.priority = 0; ctx->kfd_bo.priority = 0;
ctx->kfd_bo.tv.bo = &bo->tbo; ctx->kfd_bo.tv.bo = &bo->tbo;
ctx->kfd_bo.tv.num_shared = 1; ctx->kfd_bo.tv.num_shared = 1;
ctx->kfd_bo.user_pages = NULL;
list_add(&ctx->kfd_bo.tv.head, &ctx->list); list_add(&ctx->kfd_bo.tv.head, &ctx->list);
amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]); amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
...@@ -660,7 +639,6 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem, ...@@ -660,7 +639,6 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
ctx->kfd_bo.priority = 0; ctx->kfd_bo.priority = 0;
ctx->kfd_bo.tv.bo = &bo->tbo; ctx->kfd_bo.tv.bo = &bo->tbo;
ctx->kfd_bo.tv.num_shared = 1; ctx->kfd_bo.tv.num_shared = 1;
ctx->kfd_bo.user_pages = NULL;
list_add(&ctx->kfd_bo.tv.head, &ctx->list); list_add(&ctx->kfd_bo.tv.head, &ctx->list);
i = 0; i = 0;
...@@ -1275,15 +1253,6 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( ...@@ -1275,15 +1253,6 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
list_del(&bo_list_entry->head); list_del(&bo_list_entry->head);
mutex_unlock(&process_info->lock); mutex_unlock(&process_info->lock);
/* Free user pages if necessary */
if (mem->user_pages) {
pr_debug("%s: Freeing user_pages array\n", __func__);
if (mem->user_pages[0])
release_pages(mem->user_pages,
mem->bo->tbo.ttm->num_pages);
kvfree(mem->user_pages);
}
ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx); ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
...@@ -1757,25 +1726,11 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, ...@@ -1757,25 +1726,11 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
bo = mem->bo; bo = mem->bo;
if (!mem->user_pages) {
mem->user_pages =
kvmalloc_array(bo->tbo.ttm->num_pages,
sizeof(struct page *),
GFP_KERNEL | __GFP_ZERO);
if (!mem->user_pages) {
pr_err("%s: Failed to allocate pages array\n",
__func__);
return -ENOMEM;
}
} else if (mem->user_pages[0]) {
release_pages(mem->user_pages, bo->tbo.ttm->num_pages);
}
/* Get updated user pages */ /* Get updated user pages */
ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
mem->user_pages); bo->tbo.ttm->pages);
if (ret) { if (ret) {
mem->user_pages[0] = NULL; bo->tbo.ttm->pages[0] = NULL;
pr_info("%s: Failed to get user pages: %d\n", pr_info("%s: Failed to get user pages: %d\n",
__func__, ret); __func__, ret);
/* Pretend it succeeded. It will fail later /* Pretend it succeeded. It will fail later
...@@ -1784,12 +1739,6 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, ...@@ -1784,12 +1739,6 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
* stalled user mode queues. * stalled user mode queues.
*/ */
} }
/* Mark the BO as valid unless it was invalidated
* again concurrently
*/
if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
return -EAGAIN;
} }
return 0; return 0;
...@@ -1819,7 +1768,8 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) ...@@ -1819,7 +1768,8 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
GFP_KERNEL); GFP_KERNEL);
if (!pd_bo_list_entries) { if (!pd_bo_list_entries) {
pr_err("%s: Failed to allocate PD BO list entries\n", __func__); pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
return -ENOMEM; ret = -ENOMEM;
goto out_no_mem;
} }
INIT_LIST_HEAD(&resv_list); INIT_LIST_HEAD(&resv_list);
...@@ -1843,7 +1793,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) ...@@ -1843,7 +1793,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates); ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
WARN(!list_empty(&duplicates), "Duplicates should be empty"); WARN(!list_empty(&duplicates), "Duplicates should be empty");
if (ret) if (ret)
goto out; goto out_free;
amdgpu_sync_create(&sync); amdgpu_sync_create(&sync);
...@@ -1859,10 +1809,8 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) ...@@ -1859,10 +1809,8 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
bo = mem->bo; bo = mem->bo;
/* Copy pages array and validate the BO if we got user pages */ /* Validate the BO if we got user pages */
if (mem->user_pages[0]) { if (bo->tbo.ttm->pages[0]) {
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
mem->user_pages);
amdgpu_bo_placement_from_domain(bo, mem->domain); amdgpu_bo_placement_from_domain(bo, mem->domain);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret) { if (ret) {
...@@ -1871,16 +1819,16 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) ...@@ -1871,16 +1819,16 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
} }
} }
/* Validate succeeded, now the BO owns the pages, free
* our copy of the pointer array. Put this BO back on
* the userptr_valid_list. If we need to revalidate
* it, we need to start from scratch.
*/
kvfree(mem->user_pages);
mem->user_pages = NULL;
list_move_tail(&mem->validate_list.head, list_move_tail(&mem->validate_list.head,
&process_info->userptr_valid_list); &process_info->userptr_valid_list);
/* Stop HMM track the userptr update. We dont check the return
* value for concurrent CPU page table update because we will
* reschedule the restore worker if process_info->evicted_bos
* is updated.
*/
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
/* Update mapping. If the BO was not validated /* Update mapping. If the BO was not validated
* (because we couldn't get user pages), this will * (because we couldn't get user pages), this will
* clear the page table entries, which will result in * clear the page table entries, which will result in
...@@ -1910,8 +1858,15 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) ...@@ -1910,8 +1858,15 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
ttm_eu_backoff_reservation(&ticket, &resv_list); ttm_eu_backoff_reservation(&ticket, &resv_list);
amdgpu_sync_wait(&sync, false); amdgpu_sync_wait(&sync, false);
amdgpu_sync_free(&sync); amdgpu_sync_free(&sync);
out: out_free:
kfree(pd_bo_list_entries); kfree(pd_bo_list_entries);
out_no_mem:
list_for_each_entry_safe(mem, tmp_mem,
&process_info->userptr_inval_list,
validate_list.head) {
bo = mem->bo;
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
}
return ret; return ret;
} }
......
...@@ -36,7 +36,7 @@ struct amdgpu_bo_list_entry { ...@@ -36,7 +36,7 @@ struct amdgpu_bo_list_entry {
struct amdgpu_bo_va *bo_va; struct amdgpu_bo_va *bo_va;
uint32_t priority; uint32_t priority;
struct page **user_pages; struct page **user_pages;
int user_invalidated; bool user_invalidated;
}; };
struct amdgpu_bo_list { struct amdgpu_bo_list {
......
...@@ -52,7 +52,6 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, ...@@ -52,7 +52,6 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
p->uf_entry.tv.bo = &bo->tbo; p->uf_entry.tv.bo = &bo->tbo;
/* One for TTM and one for the CS job */ /* One for TTM and one for the CS job */
p->uf_entry.tv.num_shared = 2; p->uf_entry.tv.num_shared = 2;
p->uf_entry.user_pages = NULL;
drm_gem_object_put_unlocked(gobj); drm_gem_object_put_unlocked(gobj);
...@@ -540,14 +539,14 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, ...@@ -540,14 +539,14 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
if (usermm && usermm != current->mm) if (usermm && usermm != current->mm)
return -EPERM; return -EPERM;
/* Check if we have user pages and nobody bound the BO already */ if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) &&
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) && lobj->user_invalidated && lobj->user_pages) {
lobj->user_pages) {
amdgpu_bo_placement_from_domain(bo, amdgpu_bo_placement_from_domain(bo,
AMDGPU_GEM_DOMAIN_CPU); AMDGPU_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r) if (r)
return r; return r;
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
lobj->user_pages); lobj->user_pages);
binding_userptr = true; binding_userptr = true;
...@@ -578,7 +577,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -578,7 +577,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
struct amdgpu_bo *gds; struct amdgpu_bo *gds;
struct amdgpu_bo *gws; struct amdgpu_bo *gws;
struct amdgpu_bo *oa; struct amdgpu_bo *oa;
unsigned tries = 10;
int r; int r;
INIT_LIST_HEAD(&p->validated); INIT_LIST_HEAD(&p->validated);
...@@ -614,79 +612,45 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -614,79 +612,45 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent) if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
list_add(&p->uf_entry.tv.head, &p->validated); list_add(&p->uf_entry.tv.head, &p->validated);
while (1) { /* Get userptr backing pages. If pages are updated after registered
struct list_head need_pages; * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
* amdgpu_ttm_backend_bind() to flush and invalidate new pages
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, */
&duplicates); amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
if (unlikely(r != 0)) { struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
if (r != -ERESTARTSYS) bool userpage_invalidated = false;
DRM_ERROR("ttm_eu_reserve_buffers failed.\n"); int i;
goto error_free_pages;
} e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
sizeof(struct page *),
INIT_LIST_HEAD(&need_pages); GFP_KERNEL | __GFP_ZERO);
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { if (!e->user_pages) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); DRM_ERROR("calloc failure\n");
return -ENOMEM;
if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
&e->user_invalidated) && e->user_pages) {
/* We acquired a page array, but somebody
* invalidated it. Free it and try again
*/
release_pages(e->user_pages,
bo->tbo.ttm->num_pages);
kvfree(e->user_pages);
e->user_pages = NULL;
}
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
!e->user_pages) {
list_del(&e->tv.head);
list_add(&e->tv.head, &need_pages);
amdgpu_bo_unreserve(bo);
}
} }
if (list_empty(&need_pages)) r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, e->user_pages);
break; if (r) {
kvfree(e->user_pages);
/* Unreserve everything again. */ e->user_pages = NULL;
ttm_eu_backoff_reservation(&p->ticket, &p->validated); return r;
/* We tried too many times, just abort */
if (!--tries) {
r = -EDEADLK;
DRM_ERROR("deadlock in %s\n", __func__);
goto error_free_pages;
} }
/* Fill the page arrays for all userptrs. */ for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
list_for_each_entry(e, &need_pages, tv.head) { if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
struct ttm_tt *ttm = e->tv.bo->ttm; userpage_invalidated = true;
break;
e->user_pages = kvmalloc_array(ttm->num_pages,
sizeof(struct page*),
GFP_KERNEL | __GFP_ZERO);
if (!e->user_pages) {
r = -ENOMEM;
DRM_ERROR("calloc failure in %s\n", __func__);
goto error_free_pages;
}
r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
if (r) {
DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
kvfree(e->user_pages);
e->user_pages = NULL;
goto error_free_pages;
} }
} }
e->user_invalidated = userpage_invalidated;
}
/* And try again. */ r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
list_splice(&need_pages, &p->validated); &duplicates);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
goto out;
} }
amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold, amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
...@@ -755,17 +719,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -755,17 +719,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
error_validate: error_validate:
if (r) if (r)
ttm_eu_backoff_reservation(&p->ticket, &p->validated); ttm_eu_backoff_reservation(&p->ticket, &p->validated);
out:
error_free_pages:
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
if (!e->user_pages)
continue;
release_pages(e->user_pages, e->tv.bo->ttm->num_pages);
kvfree(e->user_pages);
}
return r; return r;
} }
...@@ -1224,7 +1178,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, ...@@ -1224,7 +1178,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
struct amdgpu_bo_list_entry *e; struct amdgpu_bo_list_entry *e;
struct amdgpu_job *job; struct amdgpu_job *job;
uint64_t seq; uint64_t seq;
int r; int r;
job = p->job; job = p->job;
...@@ -1234,15 +1187,23 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, ...@@ -1234,15 +1187,23 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
if (r) if (r)
goto error_unlock; goto error_unlock;
/* No memory allocation is allowed while holding the mn lock */ /* No memory allocation is allowed while holding the mn lock.
* p->mn is hold until amdgpu_cs_submit is finished and fence is added
* to BOs.
*/
amdgpu_mn_lock(p->mn); amdgpu_mn_lock(p->mn);
/* If userptr are invalidated after amdgpu_cs_parser_bos(), return
* -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
*/
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) { r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
r = -ERESTARTSYS; }
goto error_abort; if (r) {
} r = -EAGAIN;
goto error_abort;
} }
job->owner = p->filp; job->owner = p->filp;
...@@ -1338,6 +1299,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -1338,6 +1299,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
out: out:
amdgpu_cs_parser_fini(&parser, r, reserved_buffers); amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
return r; return r;
} }
......
...@@ -329,26 +329,24 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, ...@@ -329,26 +329,24 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
r = amdgpu_bo_reserve(bo, true); r = amdgpu_bo_reserve(bo, true);
if (r) if (r)
goto free_pages; goto user_pages_done;
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
amdgpu_bo_unreserve(bo); amdgpu_bo_unreserve(bo);
if (r) if (r)
goto free_pages; goto user_pages_done;
} }
r = drm_gem_handle_create(filp, gobj, &handle); r = drm_gem_handle_create(filp, gobj, &handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_put_unlocked(gobj);
if (r) if (r)
return r; goto user_pages_done;
args->handle = handle; args->handle = handle;
return 0;
free_pages: user_pages_done:
release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages); if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
release_object: release_object:
drm_gem_object_put_unlocked(gobj); drm_gem_object_put_unlocked(gobj);
......
...@@ -220,8 +220,6 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node, ...@@ -220,8 +220,6 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
true, false, MAX_SCHEDULE_TIMEOUT); true, false, MAX_SCHEDULE_TIMEOUT);
if (r <= 0) if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r); DRM_ERROR("(%ld) failed to wait for user bo\n", r);
amdgpu_ttm_tt_mark_user_pages(bo->tbo.ttm);
} }
} }
...@@ -502,3 +500,26 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) ...@@ -502,3 +500,26 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
mutex_unlock(&adev->mn_lock); mutex_unlock(&adev->mn_lock);
} }
/* flags used by HMM internal, not related to CPU/GPU PTE flags */
static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = {
(1 << 0), /* HMM_PFN_VALID */
(1 << 1), /* HMM_PFN_WRITE */
0 /* HMM_PFN_DEVICE_PRIVATE */
};
static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = {
0xfffffffffffffffeUL, /* HMM_PFN_ERROR */
0, /* HMM_PFN_NONE */
0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */
};
void amdgpu_hmm_init_range(struct hmm_range *range)
{
if (range) {
range->flags = hmm_range_flags;
range->values = hmm_range_values;
range->pfn_shift = PAGE_SHIFT;
range->pfns = NULL;
INIT_LIST_HEAD(&range->list);
}
}
...@@ -25,9 +25,10 @@ ...@@ -25,9 +25,10 @@
#define __AMDGPU_MN_H__ #define __AMDGPU_MN_H__
/* /*
* MMU Notifier * HMM mirror
*/ */
struct amdgpu_mn; struct amdgpu_mn;
struct hmm_range;
enum amdgpu_mn_type { enum amdgpu_mn_type {
AMDGPU_MN_TYPE_GFX, AMDGPU_MN_TYPE_GFX,
...@@ -41,6 +42,7 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev, ...@@ -41,6 +42,7 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
enum amdgpu_mn_type type); enum amdgpu_mn_type type);
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr); int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
void amdgpu_mn_unregister(struct amdgpu_bo *bo); void amdgpu_mn_unregister(struct amdgpu_bo *bo);
void amdgpu_hmm_init_range(struct hmm_range *range);
#else #else
static inline void amdgpu_mn_lock(struct amdgpu_mn *mn) {} static inline void amdgpu_mn_lock(struct amdgpu_mn *mn) {}
static inline void amdgpu_mn_unlock(struct amdgpu_mn *mn) {} static inline void amdgpu_mn_unlock(struct amdgpu_mn *mn) {}
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/iommu.h> #include <linux/iommu.h>
#include <linux/hmm.h>
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_object.h" #include "amdgpu_object.h"
#include "amdgpu_trace.h" #include "amdgpu_trace.h"
...@@ -705,98 +706,102 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, ...@@ -705,98 +706,102 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
/* /*
* TTM backend functions. * TTM backend functions.
*/ */
struct amdgpu_ttm_gup_task_list {
struct list_head list;
struct task_struct *task;
};
struct amdgpu_ttm_tt { struct amdgpu_ttm_tt {
struct ttm_dma_tt ttm; struct ttm_dma_tt ttm;
u64 offset; u64 offset;
uint64_t userptr; uint64_t userptr;
struct task_struct *usertask; struct task_struct *usertask;
uint32_t userflags; uint32_t userflags;
spinlock_t guptasklock; struct hmm_range range;
struct list_head guptasks;
atomic_t mmu_invalidations;
uint32_t last_set_pages;
}; };
/** /**
* amdgpu_ttm_tt_get_user_pages - Pin pages of memory pointed to by a USERPTR * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
* pointer to memory * memory and start HMM tracking CPU page table update
* *
* Called by amdgpu_gem_userptr_ioctl() and amdgpu_cs_parser_bos(). * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
* This provides a wrapper around the get_user_pages() call to provide * once afterwards to stop HMM tracking
* device accessible pages that back user memory.
*/ */
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
{ {
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
struct mm_struct *mm = gtt->usertask->mm; struct mm_struct *mm = gtt->usertask->mm;
unsigned int flags = 0; unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
unsigned pinned = 0; struct hmm_range *range = &gtt->range;
int r; int r = 0, i;
if (!mm) /* Happens during process shutdown */ if (!mm) /* Happens during process shutdown */
return -ESRCH; return -ESRCH;
if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY)) amdgpu_hmm_init_range(range);
flags |= FOLL_WRITE;
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) { range->vma = find_vma(mm, gtt->userptr);
/* if (!range_in_vma(range->vma, gtt->userptr, end))
* check that we only use anonymous memory to prevent problems r = -EFAULT;
* with writeback else if ((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
*/ range->vma->vm_file)
unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; r = -EPERM;
struct vm_area_struct *vma; if (r)
goto out;
vma = find_vma(mm, gtt->userptr); range->pfns = kvmalloc_array(ttm->num_pages, sizeof(uint64_t),
if (!vma || vma->vm_file || vma->vm_end < end) { GFP_KERNEL);
up_read(&mm->mmap_sem); if (range->pfns == NULL) {
return -EPERM; r = -ENOMEM;
} goto out;
} }
range->start = gtt->userptr;
range->end = end;
/* loop enough times using contiguous pages of memory */ range->pfns[0] = range->flags[HMM_PFN_VALID];
do { range->pfns[0] |= amdgpu_ttm_tt_is_readonly(ttm) ?
unsigned num_pages = ttm->num_pages - pinned; 0 : range->flags[HMM_PFN_WRITE];
uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; for (i = 1; i < ttm->num_pages; i++)
struct page **p = pages + pinned; range->pfns[i] = range->pfns[0];
struct amdgpu_ttm_gup_task_list guptask;
guptask.task = current; /* This may trigger page table update */
spin_lock(&gtt->guptasklock); r = hmm_vma_fault(range, true);
list_add(&guptask.list, &gtt->guptasks); if (r)
spin_unlock(&gtt->guptasklock); goto out_free_pfns;
if (mm == current->mm) up_read(&mm->mmap_sem);
r = get_user_pages(userptr, num_pages, flags, p, NULL);
else
r = get_user_pages_remote(gtt->usertask,
mm, userptr, num_pages,
flags, p, NULL, NULL);
spin_lock(&gtt->guptasklock); for (i = 0; i < ttm->num_pages; i++)
list_del(&guptask.list); pages[i] = hmm_pfn_to_page(range, range->pfns[i]);
spin_unlock(&gtt->guptasklock);
if (r < 0) return 0;
goto release_pages;
pinned += r; out_free_pfns:
kvfree(range->pfns);
range->pfns = NULL;
out:
up_read(&mm->mmap_sem);
return r;
}
} while (pinned < ttm->num_pages); /**
* amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change
* Check if the pages backing this ttm range have been invalidated
*
* Returns: true if pages are still valid
*/
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
bool r = false;
up_read(&mm->mmap_sem); if (!gtt || !gtt->userptr)
return 0; return false;
WARN_ONCE(!gtt->range.pfns, "No user pages to check\n");
if (gtt->range.pfns) {
r = hmm_vma_range_done(&gtt->range);
kvfree(gtt->range.pfns);
gtt->range.pfns = NULL;
}
release_pages:
release_pages(pages, pinned);
up_read(&mm->mmap_sem);
return r; return r;
} }
...@@ -809,16 +814,10 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) ...@@ -809,16 +814,10 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
*/ */
void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages) void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
{ {
struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned i; unsigned i;
gtt->last_set_pages = atomic_read(&gtt->mmu_invalidations); for (i = 0; i < ttm->num_pages; ++i)
for (i = 0; i < ttm->num_pages; ++i) {
if (ttm->pages[i])
put_page(ttm->pages[i]);
ttm->pages[i] = pages ? pages[i] : NULL; ttm->pages[i] = pages ? pages[i] : NULL;
}
} }
/** /**
...@@ -903,10 +902,11 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) ...@@ -903,10 +902,11 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
/* unmap the pages mapped to the device */ /* unmap the pages mapped to the device */
dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
/* mark the pages as dirty */
amdgpu_ttm_tt_mark_user_pages(ttm);
sg_free_table(ttm->sg); sg_free_table(ttm->sg);
if (gtt->range.pfns &&
ttm->pages[0] == hmm_pfn_to_page(&gtt->range, gtt->range.pfns[0]))
WARN_ONCE(1, "Missing get_user_page_done\n");
} }
int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
...@@ -1256,11 +1256,6 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, ...@@ -1256,11 +1256,6 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
gtt->usertask = current->group_leader; gtt->usertask = current->group_leader;
get_task_struct(gtt->usertask); get_task_struct(gtt->usertask);
spin_lock_init(&gtt->guptasklock);
INIT_LIST_HEAD(&gtt->guptasks);
atomic_set(&gtt->mmu_invalidations, 0);
gtt->last_set_pages = 0;
return 0; return 0;
} }
...@@ -1289,7 +1284,6 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, ...@@ -1289,7 +1284,6 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
unsigned long end) unsigned long end)
{ {
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
struct amdgpu_ttm_gup_task_list *entry;
unsigned long size; unsigned long size;
if (gtt == NULL || !gtt->userptr) if (gtt == NULL || !gtt->userptr)
...@@ -1302,48 +1296,20 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, ...@@ -1302,48 +1296,20 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
if (gtt->userptr > end || gtt->userptr + size <= start) if (gtt->userptr > end || gtt->userptr + size <= start)
return false; return false;
/* Search the lists of tasks that hold this mapping and see
* if current is one of them. If it is return false.
*/
spin_lock(&gtt->guptasklock);
list_for_each_entry(entry, &gtt->guptasks, list) {
if (entry->task == current) {
spin_unlock(&gtt->guptasklock);
return false;
}
}
spin_unlock(&gtt->guptasklock);
atomic_inc(&gtt->mmu_invalidations);
return true; return true;
} }
/** /**
* amdgpu_ttm_tt_userptr_invalidated - Has the ttm_tt object been invalidated? * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
*/ */
bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
int *last_invalidated)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
int prev_invalidated = *last_invalidated;
*last_invalidated = atomic_read(&gtt->mmu_invalidations);
return prev_invalidated != *last_invalidated;
}
/**
* amdgpu_ttm_tt_userptr_needs_pages - Have the pages backing this ttm_tt object
* been invalidated since the last time they've been set?
*/
bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
{ {
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
if (gtt == NULL || !gtt->userptr) if (gtt == NULL || !gtt->userptr)
return false; return false;
return atomic_read(&gtt->mmu_invalidations) != gtt->last_set_pages; return true;
} }
/** /**
......
...@@ -102,6 +102,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo); ...@@ -102,6 +102,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo); int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm);
void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages); void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages);
void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm); void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm);
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
...@@ -112,7 +113,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, ...@@ -112,7 +113,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
unsigned long end); unsigned long end);
bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
int *last_invalidated); int *last_invalidated);
bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm); bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm);
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem); uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem);
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment