Commit 6e144a7d authored by Brian Welty's avatar Brian Welty Committed by Matthew Brost

drm/xe: Refactor __xe_exec_queue_create()

Split __xe_exec_queue_create() into two functions, alloc and init.

We have an issue in that exec_queue_user_extensions are applied too late.
In the case of USM properties, these need to be set prior to xe_lrc_init().
Refactor the logic here, so we can resolve this in follow-on. We only need
the xe_vm_lock held during the exec_queue_init function.
Signed-off-by: default avatarBrian Welty <brian.welty@intel.com>
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
parent a109d199
...@@ -30,16 +30,14 @@ enum xe_exec_queue_sched_prop { ...@@ -30,16 +30,14 @@ enum xe_exec_queue_sched_prop {
XE_EXEC_QUEUE_SCHED_PROP_MAX = 3, XE_EXEC_QUEUE_SCHED_PROP_MAX = 3,
}; };
static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe, static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
struct xe_vm *vm, struct xe_vm *vm,
u32 logical_mask, u32 logical_mask,
u16 width, struct xe_hw_engine *hwe, u16 width, struct xe_hw_engine *hwe,
u32 flags) u32 flags)
{ {
struct xe_exec_queue *q; struct xe_exec_queue *q;
struct xe_gt *gt = hwe->gt; struct xe_gt *gt = hwe->gt;
int err;
int i;
/* only kernel queues can be permanent */ /* only kernel queues can be permanent */
XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL)); XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL));
...@@ -82,8 +80,23 @@ static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe, ...@@ -82,8 +80,23 @@ static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe,
q->bind.fence_seqno = XE_FENCE_INITIAL_SEQNO; q->bind.fence_seqno = XE_FENCE_INITIAL_SEQNO;
} }
for (i = 0; i < width; ++i) { return q;
err = xe_lrc_init(q->lrc + i, hwe, q, vm, SZ_16K); }
static void __xe_exec_queue_free(struct xe_exec_queue *q)
{
if (q->vm)
xe_vm_put(q->vm);
kfree(q);
}
static int __xe_exec_queue_init(struct xe_exec_queue *q)
{
struct xe_device *xe = gt_to_xe(q->gt);
int i, err;
for (i = 0; i < q->width; ++i) {
err = xe_lrc_init(q->lrc + i, q->hwe, q, q->vm, SZ_16K);
if (err) if (err)
goto err_lrc; goto err_lrc;
} }
...@@ -100,16 +113,15 @@ static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe, ...@@ -100,16 +113,15 @@ static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe,
* can perform GuC CT actions when needed. Caller is expected to have * can perform GuC CT actions when needed. Caller is expected to have
* already grabbed the rpm ref outside any sensitive locks. * already grabbed the rpm ref outside any sensitive locks.
*/ */
if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !vm)) if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !q->vm))
drm_WARN_ON(&xe->drm, !xe_device_mem_access_get_if_ongoing(xe)); drm_WARN_ON(&xe->drm, !xe_device_mem_access_get_if_ongoing(xe));
return q; return 0;
err_lrc: err_lrc:
for (i = i - 1; i >= 0; --i) for (i = i - 1; i >= 0; --i)
xe_lrc_finish(q->lrc + i); xe_lrc_finish(q->lrc + i);
kfree(q); return err;
return ERR_PTR(err);
} }
struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm, struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
...@@ -119,16 +131,27 @@ struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *v ...@@ -119,16 +131,27 @@ struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *v
struct xe_exec_queue *q; struct xe_exec_queue *q;
int err; int err;
q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags);
if (IS_ERR(q))
return q;
if (vm) { if (vm) {
err = xe_vm_lock(vm, true); err = xe_vm_lock(vm, true);
if (err) if (err)
return ERR_PTR(err); goto err_post_alloc;
} }
q = __xe_exec_queue_create(xe, vm, logical_mask, width, hwe, flags);
err = __xe_exec_queue_init(q);
if (vm) if (vm)
xe_vm_unlock(vm); xe_vm_unlock(vm);
if (err)
goto err_post_alloc;
return q; return q;
err_post_alloc:
__xe_exec_queue_free(q);
return ERR_PTR(err);
} }
struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt, struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
...@@ -179,10 +202,7 @@ void xe_exec_queue_fini(struct xe_exec_queue *q) ...@@ -179,10 +202,7 @@ void xe_exec_queue_fini(struct xe_exec_queue *q)
xe_lrc_finish(q->lrc + i); xe_lrc_finish(q->lrc + i);
if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !q->vm)) if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !q->vm))
xe_device_mem_access_put(gt_to_xe(q->gt)); xe_device_mem_access_put(gt_to_xe(q->gt));
if (q->vm) __xe_exec_queue_free(q);
xe_vm_put(q->vm);
kfree(q);
} }
void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance) void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment