Commit a7ac9d84 authored by Alan Previn's avatar Alan Previn Committed by John Harrison

drm/i915/guc: Remove intel_context:number_committed_requests counter

With the introduction of the delayed disable-sched behavior,
we use the GuC's xarray of valid guc-id's as a way to
identify if new requests had been added to a context
when the said context is being checked for closure.

Additionally that prior change also closes the race for when
a new incoming request fails to cancel the pending
delayed disable-sched worker.

With these two complementary checks, we see no more
use for intel_context:guc_state:number_committed_requests.
Signed-off-by: default avatarAlan Previn <alan.previn.teres.alexis@intel.com>
Reviewed-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221006225121.826257-3-alan.previn.teres.alexis@intel.com
parent 83321094
...@@ -199,8 +199,6 @@ struct intel_context { ...@@ -199,8 +199,6 @@ struct intel_context {
* context's submissions is complete. * context's submissions is complete.
*/ */
struct i915_sw_fence blocked; struct i915_sw_fence blocked;
/** @number_committed_requests: number of committed requests */
int number_committed_requests;
/** @requests: list of active requests on this context */ /** @requests: list of active requests on this context */
struct list_head requests; struct list_head requests;
/** @prio: the context's current guc priority */ /** @prio: the context's current guc priority */
......
...@@ -370,25 +370,6 @@ static inline void decr_context_blocked(struct intel_context *ce) ...@@ -370,25 +370,6 @@ static inline void decr_context_blocked(struct intel_context *ce)
ce->guc_state.sched_state -= SCHED_STATE_BLOCKED; ce->guc_state.sched_state -= SCHED_STATE_BLOCKED;
} }
static inline bool context_has_committed_requests(struct intel_context *ce)
{
return !!ce->guc_state.number_committed_requests;
}
static inline void incr_context_committed_requests(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
++ce->guc_state.number_committed_requests;
GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
}
static inline void decr_context_committed_requests(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
--ce->guc_state.number_committed_requests;
GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
}
static struct intel_context * static struct intel_context *
request_to_scheduling_context(struct i915_request *rq) request_to_scheduling_context(struct i915_request *rq)
{ {
...@@ -3180,7 +3161,6 @@ static void __guc_context_destroy(struct intel_context *ce) ...@@ -3180,7 +3161,6 @@ static void __guc_context_destroy(struct intel_context *ce)
ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_HIGH] || ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] || ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_NORMAL]); ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
GEM_BUG_ON(ce->guc_state.number_committed_requests);
lrc_fini(ce); lrc_fini(ce);
intel_context_fini(ce); intel_context_fini(ce);
...@@ -3449,8 +3429,6 @@ static void remove_from_context(struct i915_request *rq) ...@@ -3449,8 +3429,6 @@ static void remove_from_context(struct i915_request *rq)
guc_prio_fini(rq, ce); guc_prio_fini(rq, ce);
decr_context_committed_requests(ce);
spin_unlock_irq(&ce->guc_state.lock); spin_unlock_irq(&ce->guc_state.lock);
atomic_dec(&ce->guc_id.ref); atomic_dec(&ce->guc_id.ref);
...@@ -3659,7 +3637,6 @@ static int guc_request_alloc(struct i915_request *rq) ...@@ -3659,7 +3637,6 @@ static int guc_request_alloc(struct i915_request *rq)
list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences); list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
} }
incr_context_committed_requests(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags); spin_unlock_irqrestore(&ce->guc_state.lock, flags);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment