Commit 1f5cdb06 authored by Matthew Brost's avatar Matthew Brost Committed by John Harrison

drm/i915/guc: Extend deregistration fence to schedule disable

Extend the deregistration context fence to fence whne a GuC context has
scheduling disable pending.

v2:
 (John H)
  - Update comment why we check the pin count within spin lock

Cc: John Harrison <john.c.harrison@intel.com>
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarJohn Harrison <john.c.harrison@intel.com>
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210721215101.139794-11-matthew.brost@intel.com
parent b8b183ab
...@@ -930,7 +930,22 @@ static void guc_context_sched_disable(struct intel_context *ce) ...@@ -930,7 +930,22 @@ static void guc_context_sched_disable(struct intel_context *ce)
goto unpin; goto unpin;
spin_lock_irqsave(&ce->guc_state.lock, flags); spin_lock_irqsave(&ce->guc_state.lock, flags);
/*
* We have to check if the context has been pinned again as another pin
* operation is allowed to pass this function. Checking the pin count,
* within ce->guc_state.lock, synchronizes this function with
* guc_request_alloc ensuring a request doesn't slip through the
* 'context_pending_disable' fence. Checking within the spin lock (can't
* sleep) ensures another process doesn't pin this context and generate
* a request before we set the 'context_pending_disable' flag here.
*/
if (unlikely(atomic_add_unless(&ce->pin_count, -2, 2))) {
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
return;
}
guc_id = prep_context_pending_disable(ce); guc_id = prep_context_pending_disable(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags); spin_unlock_irqrestore(&ce->guc_state.lock, flags);
with_intel_runtime_pm(runtime_pm, wakeref) with_intel_runtime_pm(runtime_pm, wakeref)
...@@ -1135,19 +1150,22 @@ static int guc_request_alloc(struct i915_request *rq) ...@@ -1135,19 +1150,22 @@ static int guc_request_alloc(struct i915_request *rq)
out: out:
/* /*
* We block all requests on this context if a G2H is pending for a * We block all requests on this context if a G2H is pending for a
* context deregistration as the GuC will fail a context registration * schedule disable or context deregistration as the GuC will fail a
* while this G2H is pending. Once a G2H returns, the fence is released * schedule enable or context registration if either G2H is pending
* that is blocking these requests (see guc_signal_context_fence). * respectfully. Once a G2H returns, the fence is released that is
* blocking these requests (see guc_signal_context_fence).
* *
* We can safely check the below field outside of the lock as it isn't * We can safely check the below fields outside of the lock as it isn't
* possible for this field to transition from being clear to set but * possible for these fields to transition from being clear to set but
* converse is possible, hence the need for the check within the lock. * converse is possible, hence the need for the check within the lock.
*/ */
if (likely(!context_wait_for_deregister_to_register(ce))) if (likely(!context_wait_for_deregister_to_register(ce) &&
!context_pending_disable(ce)))
return 0; return 0;
spin_lock_irqsave(&ce->guc_state.lock, flags); spin_lock_irqsave(&ce->guc_state.lock, flags);
if (context_wait_for_deregister_to_register(ce)) { if (context_wait_for_deregister_to_register(ce) ||
context_pending_disable(ce)) {
i915_sw_fence_await(&rq->submit); i915_sw_fence_await(&rq->submit);
list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences); list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
...@@ -1491,10 +1509,18 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc, ...@@ -1491,10 +1509,18 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc,
if (context_pending_enable(ce)) { if (context_pending_enable(ce)) {
clr_context_pending_enable(ce); clr_context_pending_enable(ce);
} else if (context_pending_disable(ce)) { } else if (context_pending_disable(ce)) {
/*
* Unpin must be done before __guc_signal_context_fence,
* otherwise a race exists between the requests getting
* submitted + retired before this unpin completes resulting in
* the pin_count going to zero and the context still being
* enabled.
*/
intel_context_sched_disable_unpin(ce); intel_context_sched_disable_unpin(ce);
spin_lock_irqsave(&ce->guc_state.lock, flags); spin_lock_irqsave(&ce->guc_state.lock, flags);
clr_context_pending_disable(ce); clr_context_pending_disable(ce);
__guc_signal_context_fence(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags); spin_unlock_irqrestore(&ce->guc_state.lock, flags);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment