Commit b0d83888 authored by Matthew Brost's avatar Matthew Brost Committed by John Harrison

drm/i915/guc: Release submit fence from an irq_work

A subsequent patch will flip the locking hierarchy from
ce->guc_state.lock -> sched_engine->lock to sched_engine->lock ->
ce->guc_state.lock. As such we need to release the submit fence for a
request from an IRQ to break a lock inversion - i.e. the fence must be
release went holding ce->guc_state.lock and the releasing of the can
acquire sched_engine->lock.

v2:
 (Daniele)
  - Delete request from list before calling irq_work_queue
Reviewed-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210909164744.31249-16-matthew.brost@intel.com
parent ae36b629
...@@ -2049,17 +2049,32 @@ static const struct intel_context_ops guc_context_ops = { ...@@ -2049,17 +2049,32 @@ static const struct intel_context_ops guc_context_ops = {
.create_virtual = guc_create_virtual, .create_virtual = guc_create_virtual,
}; };
static void submit_work_cb(struct irq_work *wrk)
{
struct i915_request *rq = container_of(wrk, typeof(*rq), submit_work);
might_lock(&rq->engine->sched_engine->lock);
i915_sw_fence_complete(&rq->submit);
}
static void __guc_signal_context_fence(struct intel_context *ce) static void __guc_signal_context_fence(struct intel_context *ce)
{ {
struct i915_request *rq; struct i915_request *rq, *rn;
lockdep_assert_held(&ce->guc_state.lock); lockdep_assert_held(&ce->guc_state.lock);
if (!list_empty(&ce->guc_state.fences)) if (!list_empty(&ce->guc_state.fences))
trace_intel_context_fence_release(ce); trace_intel_context_fence_release(ce);
list_for_each_entry(rq, &ce->guc_state.fences, guc_fence_link) /*
i915_sw_fence_complete(&rq->submit); * Use an IRQ to ensure locking order of sched_engine->lock ->
* ce->guc_state.lock is preserved.
*/
list_for_each_entry_safe(rq, rn, &ce->guc_state.fences,
guc_fence_link) {
list_del(&rq->guc_fence_link);
irq_work_queue(&rq->submit_work);
}
INIT_LIST_HEAD(&ce->guc_state.fences); INIT_LIST_HEAD(&ce->guc_state.fences);
} }
...@@ -2169,6 +2184,7 @@ static int guc_request_alloc(struct i915_request *rq) ...@@ -2169,6 +2184,7 @@ static int guc_request_alloc(struct i915_request *rq)
spin_lock_irqsave(&ce->guc_state.lock, flags); spin_lock_irqsave(&ce->guc_state.lock, flags);
if (context_wait_for_deregister_to_register(ce) || if (context_wait_for_deregister_to_register(ce) ||
context_pending_disable(ce)) { context_pending_disable(ce)) {
init_irq_work(&rq->submit_work, submit_work_cb);
i915_sw_fence_await(&rq->submit); i915_sw_fence_await(&rq->submit);
list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences); list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
......
...@@ -218,6 +218,11 @@ struct i915_request { ...@@ -218,6 +218,11 @@ struct i915_request {
}; };
struct llist_head execute_cb; struct llist_head execute_cb;
struct i915_sw_fence semaphore; struct i915_sw_fence semaphore;
/**
* @submit_work: complete submit fence from an IRQ if needed for
* locking hierarchy reasons.
*/
struct irq_work submit_work;
/* /*
* A list of everyone we wait upon, and everyone who waits upon us. * A list of everyone we wait upon, and everyone who waits upon us.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment