Commit 19c02225 authored by Brian Welty's avatar Brian Welty Committed by Thomas Hellström

drm/xe: Fix modifying exec_queue priority in xe_migrate_init

After exec_queue has been created, we cannot simply modify q->priority.
This needs to be done by the backend via q->ops.  However in this case,
it would be more efficient to simply pass a flag when creating the
exec_queue and set the desired priority upfront during queue creation.

To that end: new flag EXEC_QUEUE_FLAG_HIGH_PRIORITY is introduced.
The priority field is moved to be with other scheduling properties and
is now exec_queue.sched_props.priority. This is no longer set to initial
value by the backend, but is now set within __xe_exec_queue_create().

Fixes: b4eecedc ("drm/xe: Fix potential deadlock handling page faults")
Signed-off-by: default avatarBrian Welty <brian.welty@intel.com>
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
(cherry picked from commit a8004af3)
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
parent fef257eb
...@@ -67,6 +67,11 @@ static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe, ...@@ -67,6 +67,11 @@ static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe,
q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us; q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
q->sched_props.preempt_timeout_us = q->sched_props.preempt_timeout_us =
hwe->eclass->sched_props.preempt_timeout_us; hwe->eclass->sched_props.preempt_timeout_us;
if (q->flags & EXEC_QUEUE_FLAG_KERNEL &&
q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY)
q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
else
q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
if (xe_exec_queue_is_parallel(q)) { if (xe_exec_queue_is_parallel(q)) {
q->parallel.composite_fence_ctx = dma_fence_context_alloc(1); q->parallel.composite_fence_ctx = dma_fence_context_alloc(1);
......
...@@ -52,8 +52,6 @@ struct xe_exec_queue { ...@@ -52,8 +52,6 @@ struct xe_exec_queue {
struct xe_vm *vm; struct xe_vm *vm;
/** @class: class of this exec queue */ /** @class: class of this exec queue */
enum xe_engine_class class; enum xe_engine_class class;
/** @priority: priority of this exec queue */
enum xe_exec_queue_priority priority;
/** /**
* @logical_mask: logical mask of where job submitted to exec queue can run * @logical_mask: logical mask of where job submitted to exec queue can run
*/ */
...@@ -84,6 +82,8 @@ struct xe_exec_queue { ...@@ -84,6 +82,8 @@ struct xe_exec_queue {
#define EXEC_QUEUE_FLAG_VM BIT(4) #define EXEC_QUEUE_FLAG_VM BIT(4)
/* child of VM queue for multi-tile VM jobs */ /* child of VM queue for multi-tile VM jobs */
#define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(5) #define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(5)
/* kernel exec_queue only, set priority to highest level */
#define EXEC_QUEUE_FLAG_HIGH_PRIORITY BIT(6)
/** /**
* @flags: flags for this exec queue, should statically setup aside from ban * @flags: flags for this exec queue, should statically setup aside from ban
...@@ -142,6 +142,8 @@ struct xe_exec_queue { ...@@ -142,6 +142,8 @@ struct xe_exec_queue {
u32 timeslice_us; u32 timeslice_us;
/** @preempt_timeout_us: preemption timeout in micro-seconds */ /** @preempt_timeout_us: preemption timeout in micro-seconds */
u32 preempt_timeout_us; u32 preempt_timeout_us;
/** @priority: priority of this exec queue */
enum xe_exec_queue_priority priority;
} sched_props; } sched_props;
/** @compute: compute exec queue state */ /** @compute: compute exec queue state */
......
...@@ -421,7 +421,7 @@ static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q) ...@@ -421,7 +421,7 @@ static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q)
{ {
struct exec_queue_policy policy; struct exec_queue_policy policy;
struct xe_device *xe = guc_to_xe(guc); struct xe_device *xe = guc_to_xe(guc);
enum xe_exec_queue_priority prio = q->priority; enum xe_exec_queue_priority prio = q->sched_props.priority;
u32 timeslice_us = q->sched_props.timeslice_us; u32 timeslice_us = q->sched_props.timeslice_us;
u32 preempt_timeout_us = q->sched_props.preempt_timeout_us; u32 preempt_timeout_us = q->sched_props.preempt_timeout_us;
...@@ -1231,7 +1231,6 @@ static int guc_exec_queue_init(struct xe_exec_queue *q) ...@@ -1231,7 +1231,6 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
err = xe_sched_entity_init(&ge->entity, sched); err = xe_sched_entity_init(&ge->entity, sched);
if (err) if (err)
goto err_sched; goto err_sched;
q->priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
if (xe_exec_queue_is_lr(q)) if (xe_exec_queue_is_lr(q))
INIT_WORK(&q->guc->lr_tdr, xe_guc_exec_queue_lr_cleanup); INIT_WORK(&q->guc->lr_tdr, xe_guc_exec_queue_lr_cleanup);
...@@ -1301,14 +1300,14 @@ static int guc_exec_queue_set_priority(struct xe_exec_queue *q, ...@@ -1301,14 +1300,14 @@ static int guc_exec_queue_set_priority(struct xe_exec_queue *q,
{ {
struct xe_sched_msg *msg; struct xe_sched_msg *msg;
if (q->priority == priority || exec_queue_killed_or_banned(q)) if (q->sched_props.priority == priority || exec_queue_killed_or_banned(q))
return 0; return 0;
msg = kmalloc(sizeof(*msg), GFP_KERNEL); msg = kmalloc(sizeof(*msg), GFP_KERNEL);
if (!msg) if (!msg)
return -ENOMEM; return -ENOMEM;
q->priority = priority; q->sched_props.priority = priority;
guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS); guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
return 0; return 0;
......
...@@ -344,7 +344,8 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile) ...@@ -344,7 +344,8 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe, m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_KERNEL |
EXEC_QUEUE_FLAG_PERMANENT); EXEC_QUEUE_FLAG_PERMANENT |
EXEC_QUEUE_FLAG_HIGH_PRIORITY);
} else { } else {
m->q = xe_exec_queue_create_class(xe, primary_gt, vm, m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
XE_ENGINE_CLASS_COPY, XE_ENGINE_CLASS_COPY,
...@@ -355,8 +356,6 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile) ...@@ -355,8 +356,6 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
xe_vm_close_and_put(vm); xe_vm_close_and_put(vm);
return ERR_CAST(m->q); return ERR_CAST(m->q);
} }
if (xe->info.has_usm)
m->q->priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
mutex_init(&m->job_mutex); mutex_init(&m->job_mutex);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment