Commit 50aec966 authored by Matthew Brost's avatar Matthew Brost

drm/xe: Use ordered WQ for G2H handler

System work queues are shared, use a dedicated work queue for G2H
processing to avoid G2H processing getting block behind system tasks.

Fixes: dd08ebf6 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarFrancois Dugast <francois.dugast@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240506034758.3697397-1-matthew.brost@intel.com
parent 9fbd0adb
......@@ -121,6 +121,7 @@ static void guc_ct_fini(struct drm_device *drm, void *arg)
{
struct xe_guc_ct *ct = arg;
destroy_workqueue(ct->g2h_wq);
xa_destroy(&ct->fence_lookup);
}
......@@ -146,6 +147,10 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
xe_gt_assert(gt, !(guc_ct_size() % PAGE_SIZE));
ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", 0);
if (!ct->g2h_wq)
return -ENOMEM;
spin_lock_init(&ct->fast_lock);
xa_init(&ct->fence_lookup);
INIT_WORK(&ct->g2h_worker, g2h_worker_func);
......
......@@ -34,7 +34,7 @@ static inline void xe_guc_ct_irq_handler(struct xe_guc_ct *ct)
return;
wake_up_all(&ct->wq);
queue_work(system_unbound_wq, &ct->g2h_worker);
queue_work(ct->g2h_wq, &ct->g2h_worker);
xe_guc_ct_fast_path(ct);
}
......
......@@ -120,6 +120,8 @@ struct xe_guc_ct {
wait_queue_head_t wq;
/** @g2h_fence_wq: wait queue used for G2H fencing */
wait_queue_head_t g2h_fence_wq;
/** @g2h_wq: used to process G2H */
struct workqueue_struct *g2h_wq;
/** @msg: Message buffer */
u32 msg[GUC_CTB_MSG_MAX_LEN];
/** @fast_msg: Message buffer */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment