Commit 8e41443e authored by Thomas Hellström's avatar Thomas Hellström Committed by Rodrigo Vivi

drm/xe/vm: Defer vm rebind until next exec if nothing to execute

If all compute engines of a vm in compute mode are idle,
defer a rebind to the next exec to avoid the VM unnecessarily trying
to make memory resident and compete with other VMs for available
memory space.
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 7cba3396
...@@ -364,6 +364,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -364,6 +364,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
&job->drm.s_fence->finished); &job->drm.s_fence->finished);
xe_sched_job_push(job); xe_sched_job_push(job);
xe_vm_reactivate_rebind(vm);
err_repin: err_repin:
if (!xe_vm_no_dma_fences(vm)) if (!xe_vm_no_dma_fences(vm))
......
...@@ -226,6 +226,19 @@ static int wait_for_existing_preempt_fences(struct xe_vm *vm) ...@@ -226,6 +226,19 @@ static int wait_for_existing_preempt_fences(struct xe_vm *vm)
return 0; return 0;
} }
static bool xe_vm_is_idle(struct xe_vm *vm)
{
struct xe_engine *e;
xe_vm_assert_held(vm);
list_for_each_entry(e, &vm->preempt.engines, compute.link) {
if (!xe_engine_is_idle(e))
return false;
}
return true;
}
static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list) static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
{ {
struct list_head *link; struct list_head *link;
...@@ -548,6 +561,11 @@ static void preempt_rebind_work_func(struct work_struct *w) ...@@ -548,6 +561,11 @@ static void preempt_rebind_work_func(struct work_struct *w)
if (err) if (err)
goto out_unlock_outer; goto out_unlock_outer;
if (xe_vm_is_idle(vm)) {
vm->preempt.rebind_deactivated = true;
goto out_unlock;
}
/* Fresh preempt fences already installed. Everyting is running. */ /* Fresh preempt fences already installed. Everyting is running. */
if (!preempt_fences_waiting(vm)) if (!preempt_fences_waiting(vm))
goto out_unlock; goto out_unlock;
......
...@@ -98,6 +98,23 @@ extern struct ttm_device_funcs xe_ttm_funcs; ...@@ -98,6 +98,23 @@ extern struct ttm_device_funcs xe_ttm_funcs;
struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm); struct ttm_buffer_object *xe_vm_ttm_bo(struct xe_vm *vm);
/**
* xe_vm_reactivate_rebind() - Reactivate the rebind functionality on compute
* vms.
* @vm: The vm.
*
* If the rebind functionality on a compute vm was disabled due
* to nothing to execute. Reactivate it and run the rebind worker.
* This function should be called after submitting a batch to a compute vm.
*/
static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
{
if (xe_vm_in_compute_mode(vm) && vm->preempt.rebind_deactivated) {
vm->preempt.rebind_deactivated = false;
queue_work(system_unbound_wq, &vm->preempt.rebind_work);
}
}
static inline bool xe_vma_is_userptr(struct xe_vma *vma) static inline bool xe_vma_is_userptr(struct xe_vma *vma)
{ {
return !vma->bo; return !vma->bo;
......
...@@ -293,6 +293,11 @@ struct xe_vm { ...@@ -293,6 +293,11 @@ struct xe_vm {
struct list_head engines; struct list_head engines;
/** @num_engines: number user engines attached to this VM */ /** @num_engines: number user engines attached to this VM */
int num_engines; int num_engines;
/**
* @rebind_deactivated: Whether rebind has been temporarily deactivated
* due to no work available. Protected by the vm resv.
*/
bool rebind_deactivated;
/** /**
* @rebind_work: worker to rebind invalidated userptrs / evicted * @rebind_work: worker to rebind invalidated userptrs / evicted
* BOs * BOs
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment