Commit a00e7e3f authored by Thomas Hellström's avatar Thomas Hellström Committed by Lucas De Marchi

drm/xe: Rework rebinding

Instead of handling the vm's rebind fence separately,
which is error prone if they are not strictly ordered,
attach rebind fences as kernel fences to the vm's resv.

Fixes: dd08ebf6 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: <stable@vger.kernel.org> # v6.8+
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240327091136.3271-3-thomas.hellstrom@linux.intel.com
(cherry picked from commit 5a091aff)
Signed-off-by: default avatarLucas De Marchi <lucas.demarchi@intel.com>
parent 3c88b8f4
...@@ -152,7 +152,6 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -152,7 +152,6 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct drm_exec *exec = &vm_exec.exec; struct drm_exec *exec = &vm_exec.exec;
u32 i, num_syncs = 0, num_ufence = 0; u32 i, num_syncs = 0, num_ufence = 0;
struct xe_sched_job *job; struct xe_sched_job *job;
struct dma_fence *rebind_fence;
struct xe_vm *vm; struct xe_vm *vm;
bool write_locked, skip_retry = false; bool write_locked, skip_retry = false;
ktime_t end = 0; ktime_t end = 0;
...@@ -294,35 +293,11 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -294,35 +293,11 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
* Rebind any invalidated userptr or evicted BOs in the VM, non-compute * Rebind any invalidated userptr or evicted BOs in the VM, non-compute
* VM mode only. * VM mode only.
*/ */
rebind_fence = xe_vm_rebind(vm, false); err = xe_vm_rebind(vm, false);
if (IS_ERR(rebind_fence)) { if (err)
err = PTR_ERR(rebind_fence);
goto err_put_job; goto err_put_job;
}
/*
* We store the rebind_fence in the VM so subsequent execs don't get
* scheduled before the rebinds of userptrs / evicted BOs is complete.
*/
if (rebind_fence) {
dma_fence_put(vm->rebind_fence);
vm->rebind_fence = rebind_fence;
}
if (vm->rebind_fence) {
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&vm->rebind_fence->flags)) {
dma_fence_put(vm->rebind_fence);
vm->rebind_fence = NULL;
} else {
dma_fence_get(vm->rebind_fence);
err = drm_sched_job_add_dependency(&job->drm,
vm->rebind_fence);
if (err)
goto err_put_job;
}
}
/* Wait behind munmap style rebinds */ /* Wait behind rebinds */
if (!xe_vm_in_lr_mode(vm)) { if (!xe_vm_in_lr_mode(vm)) {
err = drm_sched_job_add_resv_dependencies(&job->drm, err = drm_sched_job_add_resv_dependencies(&job->drm,
xe_vm_resv(vm), xe_vm_resv(vm),
......
...@@ -1299,7 +1299,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue ...@@ -1299,7 +1299,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue
} }
/* add shared fence now for pagetable delayed destroy */ /* add shared fence now for pagetable delayed destroy */
dma_resv_add_fence(xe_vm_resv(vm), fence, !rebind && dma_resv_add_fence(xe_vm_resv(vm), fence, rebind ||
last_munmap_rebind ? last_munmap_rebind ?
DMA_RESV_USAGE_KERNEL : DMA_RESV_USAGE_KERNEL :
DMA_RESV_USAGE_BOOKKEEP); DMA_RESV_USAGE_BOOKKEEP);
......
...@@ -522,7 +522,6 @@ static void preempt_rebind_work_func(struct work_struct *w) ...@@ -522,7 +522,6 @@ static void preempt_rebind_work_func(struct work_struct *w)
{ {
struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work); struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
struct drm_exec exec; struct drm_exec exec;
struct dma_fence *rebind_fence;
unsigned int fence_count = 0; unsigned int fence_count = 0;
LIST_HEAD(preempt_fences); LIST_HEAD(preempt_fences);
ktime_t end = 0; ktime_t end = 0;
...@@ -568,18 +567,11 @@ static void preempt_rebind_work_func(struct work_struct *w) ...@@ -568,18 +567,11 @@ static void preempt_rebind_work_func(struct work_struct *w)
if (err) if (err)
goto out_unlock; goto out_unlock;
rebind_fence = xe_vm_rebind(vm, true); err = xe_vm_rebind(vm, true);
if (IS_ERR(rebind_fence)) { if (err)
err = PTR_ERR(rebind_fence);
goto out_unlock; goto out_unlock;
}
if (rebind_fence) {
dma_fence_wait(rebind_fence, false);
dma_fence_put(rebind_fence);
}
/* Wait on munmap style VM unbinds */ /* Wait on rebinds and munmap style VM unbinds */
wait = dma_resv_wait_timeout(xe_vm_resv(vm), wait = dma_resv_wait_timeout(xe_vm_resv(vm),
DMA_RESV_USAGE_KERNEL, DMA_RESV_USAGE_KERNEL,
false, MAX_SCHEDULE_TIMEOUT); false, MAX_SCHEDULE_TIMEOUT);
...@@ -773,14 +765,14 @@ xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q, ...@@ -773,14 +765,14 @@ xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
struct xe_sync_entry *syncs, u32 num_syncs, struct xe_sync_entry *syncs, u32 num_syncs,
bool first_op, bool last_op); bool first_op, bool last_op);
struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker) int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
{ {
struct dma_fence *fence = NULL; struct dma_fence *fence;
struct xe_vma *vma, *next; struct xe_vma *vma, *next;
lockdep_assert_held(&vm->lock); lockdep_assert_held(&vm->lock);
if (xe_vm_in_lr_mode(vm) && !rebind_worker) if (xe_vm_in_lr_mode(vm) && !rebind_worker)
return NULL; return 0;
xe_vm_assert_held(vm); xe_vm_assert_held(vm);
list_for_each_entry_safe(vma, next, &vm->rebind_list, list_for_each_entry_safe(vma, next, &vm->rebind_list,
...@@ -788,17 +780,17 @@ struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker) ...@@ -788,17 +780,17 @@ struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
xe_assert(vm->xe, vma->tile_present); xe_assert(vm->xe, vma->tile_present);
list_del_init(&vma->combined_links.rebind); list_del_init(&vma->combined_links.rebind);
dma_fence_put(fence);
if (rebind_worker) if (rebind_worker)
trace_xe_vma_rebind_worker(vma); trace_xe_vma_rebind_worker(vma);
else else
trace_xe_vma_rebind_exec(vma); trace_xe_vma_rebind_exec(vma);
fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false); fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false);
if (IS_ERR(fence)) if (IS_ERR(fence))
return fence; return PTR_ERR(fence);
dma_fence_put(fence);
} }
return fence; return 0;
} }
static void xe_vma_free(struct xe_vma *vma) static void xe_vma_free(struct xe_vma *vma)
...@@ -1589,7 +1581,6 @@ static void vm_destroy_work_func(struct work_struct *w) ...@@ -1589,7 +1581,6 @@ static void vm_destroy_work_func(struct work_struct *w)
XE_WARN_ON(vm->pt_root[id]); XE_WARN_ON(vm->pt_root[id]);
trace_xe_vm_free(vm); trace_xe_vm_free(vm);
dma_fence_put(vm->rebind_fence);
kfree(vm); kfree(vm);
} }
......
...@@ -207,7 +207,7 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm); ...@@ -207,7 +207,7 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
int xe_vm_userptr_check_repin(struct xe_vm *vm); int xe_vm_userptr_check_repin(struct xe_vm *vm);
struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker); int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
int xe_vm_invalidate_vma(struct xe_vma *vma); int xe_vm_invalidate_vma(struct xe_vma *vma);
......
...@@ -177,9 +177,6 @@ struct xe_vm { ...@@ -177,9 +177,6 @@ struct xe_vm {
*/ */
struct list_head rebind_list; struct list_head rebind_list;
/** @rebind_fence: rebind fence from execbuf */
struct dma_fence *rebind_fence;
/** /**
* @destroy_work: worker to destroy VM, needed as a dma_fence signaling * @destroy_work: worker to destroy VM, needed as a dma_fence signaling
* from an irq context can be last put and the destroy needs to be able * from an irq context can be last put and the destroy needs to be able
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment