Commit cb90d469 authored by Daniele Ceraolo Spurio's avatar Daniele Ceraolo Spurio Committed by Rodrigo Vivi

drm/xe: Add child contexts to the GuC context lookup

The CAT_ERROR message from the GuC provides the guc id of the context
that caused the problem, which can be a child context. We therefore
need to be able to match that id to the exec_queue that owns it, which
we do by adding child context to the context lookup.

While at it, fix the error path of the guc id allocation code to
correctly free the ids allocated for parallel queues.

v2: rebase on s/XE_WARN_ON/xe_assert

Link: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/590Signed-off-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: John Harrison <John.C.Harrison@Intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 0d053475
......@@ -247,10 +247,28 @@ int xe_guc_submit_init(struct xe_guc *guc)
return 0;
}
static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count)
{
int i;
lockdep_assert_held(&guc->submission_state.lock);
for (i = 0; i < xa_count; ++i)
xa_erase(&guc->submission_state.exec_queue_lookup, q->guc->id + i);
if (xe_exec_queue_is_parallel(q))
bitmap_release_region(guc->submission_state.guc_ids_bitmap,
q->guc->id - GUC_ID_START_MLRC,
order_base_2(q->width));
else
ida_simple_remove(&guc->submission_state.guc_ids, q->guc->id);
}
static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
{
int ret;
void *ptr;
int i;
/*
* Must use GFP_NOWAIT as this lock is in the dma fence signalling path,
......@@ -277,30 +295,27 @@ static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
if (xe_exec_queue_is_parallel(q))
q->guc->id += GUC_ID_START_MLRC;
ptr = xa_store(&guc->submission_state.exec_queue_lookup,
q->guc->id, q, GFP_NOWAIT);
if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr);
goto err_release;
for (i = 0; i < q->width; ++i) {
ptr = xa_store(&guc->submission_state.exec_queue_lookup,
q->guc->id + i, q, GFP_NOWAIT);
if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr);
goto err_release;
}
}
return 0;
err_release:
ida_simple_remove(&guc->submission_state.guc_ids, q->guc->id);
__release_guc_id(guc, q, i);
return ret;
}
static void release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
{
mutex_lock(&guc->submission_state.lock);
xa_erase(&guc->submission_state.exec_queue_lookup, q->guc->id);
if (xe_exec_queue_is_parallel(q))
bitmap_release_region(guc->submission_state.guc_ids_bitmap,
q->guc->id - GUC_ID_START_MLRC,
order_base_2(q->width));
else
ida_simple_remove(&guc->submission_state.guc_ids, q->guc->id);
__release_guc_id(guc, q, q->width);
mutex_unlock(&guc->submission_state.lock);
}
......@@ -1489,7 +1504,8 @@ g2h_exec_queue_lookup(struct xe_guc *guc, u32 guc_id)
return NULL;
}
xe_assert(xe, q->guc->id == guc_id);
xe_assert(xe, guc_id >= q->guc->id);
xe_assert(xe, guc_id < (q->guc->id + q->width));
return q;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment