Commit e43b5ec0 authored by Jerome Glisse's avatar Jerome Glisse Committed by Alex Deucher

drm/radeon: fence virtual address and free it once idle v4

Virtual address need to be fenced to know when we can safely remove it.
This patch also properly clear the pagetable. Previously it was
serouisly broken.

Kernel 3.5/3.4 need a similar patch but adapted for difference in mutex locking.

v2: For to update pagetable when unbinding bo (don't bailout if
    bo_va->valid is true).
v3: Add kernel 3.5/3.4 comment.
v4: Fix compilation warnings.
Signed-off-by: default avatarJerome Glisse <jglisse@redhat.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 69b62ad8
...@@ -300,6 +300,7 @@ struct radeon_bo_va { ...@@ -300,6 +300,7 @@ struct radeon_bo_va {
uint64_t soffset; uint64_t soffset;
uint64_t eoffset; uint64_t eoffset;
uint32_t flags; uint32_t flags;
struct radeon_fence *fence;
bool valid; bool valid;
}; };
......
...@@ -278,6 +278,30 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) ...@@ -278,6 +278,30 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return 0; return 0;
} }
static void radeon_bo_vm_fence_va(struct radeon_cs_parser *parser,
struct radeon_fence *fence)
{
struct radeon_fpriv *fpriv = parser->filp->driver_priv;
struct radeon_vm *vm = &fpriv->vm;
struct radeon_bo_list *lobj;
if (parser->chunk_ib_idx == -1) {
return;
}
if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) {
return;
}
list_for_each_entry(lobj, &parser->validated, tv.head) {
struct radeon_bo_va *bo_va;
struct radeon_bo *rbo = lobj->bo;
bo_va = radeon_bo_va(rbo, vm);
radeon_fence_unref(&bo_va->fence);
bo_va->fence = radeon_fence_ref(fence);
}
}
/** /**
* cs_parser_fini() - clean parser states * cs_parser_fini() - clean parser states
* @parser: parser structure holding parsing context. * @parser: parser structure holding parsing context.
...@@ -290,11 +314,14 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) ...@@ -290,11 +314,14 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
{ {
unsigned i; unsigned i;
if (!error) if (!error) {
/* fence all bo va before ttm_eu_fence_buffer_objects so bo are still reserved */
radeon_bo_vm_fence_va(parser, parser->ib.fence);
ttm_eu_fence_buffer_objects(&parser->validated, ttm_eu_fence_buffer_objects(&parser->validated,
parser->ib.fence); parser->ib.fence);
else } else {
ttm_eu_backoff_reservation(&parser->validated); ttm_eu_backoff_reservation(&parser->validated);
}
if (parser->relocs != NULL) { if (parser->relocs != NULL) {
for (i = 0; i < parser->nrelocs; i++) { for (i = 0; i < parser->nrelocs; i++) {
...@@ -388,7 +415,6 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, ...@@ -388,7 +415,6 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
if (parser->chunk_ib_idx == -1) if (parser->chunk_ib_idx == -1)
return 0; return 0;
if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
return 0; return 0;
......
...@@ -814,7 +814,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, ...@@ -814,7 +814,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
return -EINVAL; return -EINVAL;
} }
if (bo_va->valid) if (bo_va->valid && mem)
return 0; return 0;
ngpu_pages = radeon_bo_ngpu_pages(bo); ngpu_pages = radeon_bo_ngpu_pages(bo);
...@@ -859,11 +859,27 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev, ...@@ -859,11 +859,27 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
struct radeon_bo *bo) struct radeon_bo *bo)
{ {
struct radeon_bo_va *bo_va; struct radeon_bo_va *bo_va;
int r;
bo_va = radeon_bo_va(bo, vm); bo_va = radeon_bo_va(bo, vm);
if (bo_va == NULL) if (bo_va == NULL)
return 0; return 0;
/* wait for va use to end */
while (bo_va->fence) {
r = radeon_fence_wait(bo_va->fence, false);
if (r) {
DRM_ERROR("error while waiting for fence: %d\n", r);
}
if (r == -EDEADLK) {
r = radeon_gpu_reset(rdev);
if (!r)
continue;
}
break;
}
radeon_fence_unref(&bo_va->fence);
mutex_lock(&rdev->vm_manager.lock); mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex); mutex_lock(&vm->mutex);
radeon_vm_bo_update_pte(rdev, vm, bo, NULL); radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
...@@ -952,12 +968,15 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) ...@@ -952,12 +968,15 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
radeon_vm_unbind_locked(rdev, vm); radeon_vm_unbind_locked(rdev, vm);
mutex_unlock(&rdev->vm_manager.lock); mutex_unlock(&rdev->vm_manager.lock);
/* remove all bo */ /* remove all bo at this point non are busy any more because unbind
* waited for the last vm fence to signal
*/
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (!r) { if (!r) {
bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm); bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm);
list_del_init(&bo_va->bo_list); list_del_init(&bo_va->bo_list);
list_del_init(&bo_va->vm_list); list_del_init(&bo_va->vm_list);
radeon_fence_unref(&bo_va->fence);
radeon_bo_unreserve(rdev->ring_tmp_bo.bo); radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
kfree(bo_va); kfree(bo_va);
} }
...@@ -969,6 +988,7 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) ...@@ -969,6 +988,7 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
r = radeon_bo_reserve(bo_va->bo, false); r = radeon_bo_reserve(bo_va->bo, false);
if (!r) { if (!r) {
list_del_init(&bo_va->bo_list); list_del_init(&bo_va->bo_list);
radeon_fence_unref(&bo_va->fence);
radeon_bo_unreserve(bo_va->bo); radeon_bo_unreserve(bo_va->bo);
kfree(bo_va); kfree(bo_va);
} }
......
...@@ -134,25 +134,16 @@ void radeon_gem_object_close(struct drm_gem_object *obj, ...@@ -134,25 +134,16 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
struct radeon_device *rdev = rbo->rdev; struct radeon_device *rdev = rbo->rdev;
struct radeon_fpriv *fpriv = file_priv->driver_priv; struct radeon_fpriv *fpriv = file_priv->driver_priv;
struct radeon_vm *vm = &fpriv->vm; struct radeon_vm *vm = &fpriv->vm;
struct radeon_bo_va *bo_va, *tmp;
if (rdev->family < CHIP_CAYMAN) { if (rdev->family < CHIP_CAYMAN) {
return; return;
} }
if (radeon_bo_reserve(rbo, false)) { if (radeon_bo_reserve(rbo, false)) {
dev_err(rdev->dev, "leaking bo va because we fail to reserve bo\n");
return; return;
} }
list_for_each_entry_safe(bo_va, tmp, &rbo->va, bo_list) { radeon_vm_bo_rmv(rdev, vm, rbo);
if (bo_va->vm == vm) {
/* remove from this vm address space */
mutex_lock(&vm->mutex);
list_del(&bo_va->vm_list);
mutex_unlock(&vm->mutex);
list_del(&bo_va->bo_list);
kfree(bo_va);
}
}
radeon_bo_unreserve(rbo); radeon_bo_unreserve(rbo);
} }
......
...@@ -52,11 +52,7 @@ void radeon_bo_clear_va(struct radeon_bo *bo) ...@@ -52,11 +52,7 @@ void radeon_bo_clear_va(struct radeon_bo *bo)
list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) { list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
/* remove from all vm address space */ /* remove from all vm address space */
mutex_lock(&bo_va->vm->mutex); radeon_vm_bo_rmv(bo->rdev, bo_va->vm, bo);
list_del(&bo_va->vm_list);
mutex_unlock(&bo_va->vm->mutex);
list_del(&bo_va->bo_list);
kfree(bo_va);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment