Commit 1f727182 authored by Thomas Hellström's avatar Thomas Hellström Committed by Rodrigo Vivi

drm/xe: Convert remaining instances of ttm_eu_reserve_buffers to drm_exec

The VM_BIND functionality and vma destruction was locking
potentially multiple dma_resv objects using the
ttm_eu_reserve_buffers() function. Rework those to use the drm_exec
helper, taking care that any calls to xe_bo_validate() ends up
inside an unsealed locking transaction.

v4:
- Remove an unbalanced xe_bo_put() (igt and Matthew Brost)
v5:
- Rebase conflict
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230908091716.36984-7-thomas.hellstrom@linux.intel.comSigned-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 2714d509
......@@ -1129,29 +1129,20 @@ int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
static void xe_vma_destroy_unlocked(struct xe_vma *vma)
{
struct ttm_validate_buffer tv[2];
struct ww_acquire_ctx ww;
struct xe_bo *bo = xe_vma_bo(vma);
LIST_HEAD(objs);
LIST_HEAD(dups);
struct drm_exec exec;
int err;
memset(tv, 0, sizeof(tv));
tv[0].bo = xe_vm_ttm_bo(xe_vma_vm(vma));
list_add(&tv[0].head, &objs);
if (bo) {
tv[1].bo = &xe_bo_get(bo)->ttm;
list_add(&tv[1].head, &objs);
drm_exec_init(&exec, 0);
drm_exec_until_all_locked(&exec) {
err = xe_vm_prepare_vma(&exec, vma, 0);
drm_exec_retry_on_contention(&exec);
if (XE_WARN_ON(err))
break;
}
err = ttm_eu_reserve_buffers(&ww, &objs, false, &dups);
XE_WARN_ON(err);
xe_vma_destroy(vma, NULL);
ttm_eu_backoff_reservation(&ww, &objs);
if (bo)
xe_bo_put(bo);
drm_exec_fini(&exec);
}
struct xe_vma *
......@@ -2142,21 +2133,6 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
#define VM_BIND_OP(op) (op & 0xffff)
struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm)
{
int idx = vm->flags & XE_VM_FLAG_MIGRATION ?
XE_VM_FLAG_TILE_ID(vm->flags) : 0;
/* Safe to use index 0 as all BO in the VM share a single dma-resv lock */
return &vm->pt_root[idx]->bo->ttm;
}
static void xe_vm_tv_populate(struct xe_vm *vm, struct ttm_validate_buffer *tv)
{
tv->num_shared = 1;
tv->bo = xe_vm_ttm_bo(vm);
}
static void vm_set_async_error(struct xe_vm *vm, int err)
{
lockdep_assert_held(&vm->lock);
......@@ -2668,42 +2644,16 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
return err;
}
static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
struct xe_vma_op *op)
static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
struct xe_vma *vma, struct xe_vma_op *op)
{
LIST_HEAD(objs);
LIST_HEAD(dups);
struct ttm_validate_buffer tv_bo, tv_vm;
struct ww_acquire_ctx ww;
struct xe_bo *vbo;
int err;
lockdep_assert_held_write(&vm->lock);
xe_vm_tv_populate(vm, &tv_vm);
list_add_tail(&tv_vm.head, &objs);
vbo = xe_vma_bo(vma);
if (vbo) {
/*
* An unbind can drop the last reference to the BO and
* the BO is needed for ttm_eu_backoff_reservation so
* take a reference here.
*/
xe_bo_get(vbo);
if (!vbo->vm) {
tv_bo.bo = &vbo->ttm;
tv_bo.num_shared = 1;
list_add(&tv_bo.head, &objs);
}
}
again:
err = ttm_eu_reserve_buffers(&ww, &objs, true, &dups);
if (err) {
xe_bo_put(vbo);
err = xe_vm_prepare_vma(exec, vma, 1);
if (err)
return err;
}
xe_vm_assert_held(vm);
xe_bo_assert_held(xe_vma_bo(vma));
......@@ -2782,17 +2732,36 @@ static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
XE_WARN_ON("NOT POSSIBLE");
}
ttm_eu_backoff_reservation(&ww, &objs);
if (err)
trace_xe_vma_fail(vma);
return err;
}
static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
struct xe_vma_op *op)
{
struct drm_exec exec;
int err;
retry_userptr:
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
drm_exec_until_all_locked(&exec) {
err = op_execute(&exec, vm, vma, op);
drm_exec_retry_on_contention(&exec);
if (err)
break;
}
drm_exec_fini(&exec);
if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
lockdep_assert_held_write(&vm->lock);
err = xe_vma_userptr_pin_pages(vma);
if (!err)
goto again;
}
xe_bo_put(vbo);
goto retry_userptr;
if (err)
trace_xe_vma_fail(vma);
}
return err;
}
......
......@@ -180,8 +180,6 @@ int xe_vm_async_fence_wait_start(struct dma_fence *fence);
extern struct ttm_device_funcs xe_ttm_funcs;
struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm);
static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
{
XE_WARN_ON(!xe_vm_in_compute_mode(vm));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment