Commit af5bc9f2 authored by Matthew Brost's avatar Matthew Brost Committed by John Harrison

drm/i915/guc: Drop guc_active move everything into guc_state

Now that we have locking hierarchy of sched_engine->lock ->
ce->guc_state everything from guc_active can be moved into guc_state and
protected the guc_state.lock.
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210909164744.31249-23-matthew.brost@intel.com
parent 3cb3e343
...@@ -394,9 +394,7 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine) ...@@ -394,9 +394,7 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
spin_lock_init(&ce->guc_state.lock); spin_lock_init(&ce->guc_state.lock);
INIT_LIST_HEAD(&ce->guc_state.fences); INIT_LIST_HEAD(&ce->guc_state.fences);
INIT_LIST_HEAD(&ce->guc_state.requests);
spin_lock_init(&ce->guc_active.lock);
INIT_LIST_HEAD(&ce->guc_active.requests);
ce->guc_id.id = GUC_INVALID_LRC_ID; ce->guc_id.id = GUC_INVALID_LRC_ID;
INIT_LIST_HEAD(&ce->guc_id.link); INIT_LIST_HEAD(&ce->guc_id.link);
...@@ -521,15 +519,15 @@ struct i915_request *intel_context_find_active_request(struct intel_context *ce) ...@@ -521,15 +519,15 @@ struct i915_request *intel_context_find_active_request(struct intel_context *ce)
GEM_BUG_ON(!intel_engine_uses_guc(ce->engine)); GEM_BUG_ON(!intel_engine_uses_guc(ce->engine));
spin_lock_irqsave(&ce->guc_active.lock, flags); spin_lock_irqsave(&ce->guc_state.lock, flags);
list_for_each_entry_reverse(rq, &ce->guc_active.requests, list_for_each_entry_reverse(rq, &ce->guc_state.requests,
sched.link) { sched.link) {
if (i915_request_completed(rq)) if (i915_request_completed(rq))
break; break;
active = rq; active = rq;
} }
spin_unlock_irqrestore(&ce->guc_active.lock, flags); spin_unlock_irqrestore(&ce->guc_state.lock, flags);
return active; return active;
} }
......
...@@ -172,11 +172,6 @@ struct intel_context { ...@@ -172,11 +172,6 @@ struct intel_context {
struct i915_sw_fence blocked; struct i915_sw_fence blocked;
/* GuC committed requests */ /* GuC committed requests */
int number_committed_requests; int number_committed_requests;
} guc_state;
struct {
/** lock: protects everything in guc_active */
spinlock_t lock;
/** requests: active requests on this context */ /** requests: active requests on this context */
struct list_head requests; struct list_head requests;
/* /*
...@@ -184,7 +179,7 @@ struct intel_context { ...@@ -184,7 +179,7 @@ struct intel_context {
*/ */
u8 prio; u8 prio;
u32 prio_count[GUC_CLIENT_PRIORITY_NUM]; u32 prio_count[GUC_CLIENT_PRIORITY_NUM];
} guc_active; } guc_state;
struct { struct {
/* GuC LRC descriptor ID */ /* GuC LRC descriptor ID */
......
...@@ -846,9 +846,9 @@ __unwind_incomplete_requests(struct intel_context *ce) ...@@ -846,9 +846,9 @@ __unwind_incomplete_requests(struct intel_context *ce)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&sched_engine->lock, flags); spin_lock_irqsave(&sched_engine->lock, flags);
spin_lock(&ce->guc_active.lock); spin_lock(&ce->guc_state.lock);
list_for_each_entry_safe_reverse(rq, rn, list_for_each_entry_safe_reverse(rq, rn,
&ce->guc_active.requests, &ce->guc_state.requests,
sched.link) { sched.link) {
if (i915_request_completed(rq)) if (i915_request_completed(rq))
continue; continue;
...@@ -867,7 +867,7 @@ __unwind_incomplete_requests(struct intel_context *ce) ...@@ -867,7 +867,7 @@ __unwind_incomplete_requests(struct intel_context *ce)
list_add(&rq->sched.link, pl); list_add(&rq->sched.link, pl);
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
} }
spin_unlock(&ce->guc_active.lock); spin_unlock(&ce->guc_state.lock);
spin_unlock_irqrestore(&sched_engine->lock, flags); spin_unlock_irqrestore(&sched_engine->lock, flags);
} }
...@@ -962,10 +962,10 @@ static void guc_cancel_context_requests(struct intel_context *ce) ...@@ -962,10 +962,10 @@ static void guc_cancel_context_requests(struct intel_context *ce)
/* Mark all executing requests as skipped. */ /* Mark all executing requests as skipped. */
spin_lock_irqsave(&sched_engine->lock, flags); spin_lock_irqsave(&sched_engine->lock, flags);
spin_lock(&ce->guc_active.lock); spin_lock(&ce->guc_state.lock);
list_for_each_entry(rq, &ce->guc_active.requests, sched.link) list_for_each_entry(rq, &ce->guc_state.requests, sched.link)
i915_request_put(i915_request_mark_eio(rq)); i915_request_put(i915_request_mark_eio(rq));
spin_unlock(&ce->guc_active.lock); spin_unlock(&ce->guc_state.lock);
spin_unlock_irqrestore(&sched_engine->lock, flags); spin_unlock_irqrestore(&sched_engine->lock, flags);
} }
...@@ -1416,7 +1416,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) ...@@ -1416,7 +1416,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
desc->engine_submit_mask = adjust_engine_mask(engine->class, desc->engine_submit_mask = adjust_engine_mask(engine->class,
engine->mask); engine->mask);
desc->hw_context_desc = ce->lrc.lrca; desc->hw_context_desc = ce->lrc.lrca;
desc->priority = ce->guc_active.prio; desc->priority = ce->guc_state.prio;
desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD; desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
guc_context_policy_init(engine, desc); guc_context_policy_init(engine, desc);
...@@ -1811,10 +1811,10 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce) ...@@ -1811,10 +1811,10 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce)
static void __guc_context_destroy(struct intel_context *ce) static void __guc_context_destroy(struct intel_context *ce)
{ {
GEM_BUG_ON(ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] || GEM_BUG_ON(ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] ||
ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_HIGH] || ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] || ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_NORMAL]); ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
GEM_BUG_ON(ce->guc_state.number_committed_requests); GEM_BUG_ON(ce->guc_state.number_committed_requests);
lrc_fini(ce); lrc_fini(ce);
...@@ -1924,17 +1924,17 @@ static void guc_context_set_prio(struct intel_guc *guc, ...@@ -1924,17 +1924,17 @@ static void guc_context_set_prio(struct intel_guc *guc,
GEM_BUG_ON(prio < GUC_CLIENT_PRIORITY_KMD_HIGH || GEM_BUG_ON(prio < GUC_CLIENT_PRIORITY_KMD_HIGH ||
prio > GUC_CLIENT_PRIORITY_NORMAL); prio > GUC_CLIENT_PRIORITY_NORMAL);
lockdep_assert_held(&ce->guc_active.lock); lockdep_assert_held(&ce->guc_state.lock);
if (ce->guc_active.prio == prio || submission_disabled(guc) || if (ce->guc_state.prio == prio || submission_disabled(guc) ||
!context_registered(ce)) { !context_registered(ce)) {
ce->guc_active.prio = prio; ce->guc_state.prio = prio;
return; return;
} }
guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true); guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
ce->guc_active.prio = prio; ce->guc_state.prio = prio;
trace_intel_context_set_prio(ce); trace_intel_context_set_prio(ce);
} }
...@@ -1953,25 +1953,25 @@ static inline u8 map_i915_prio_to_guc_prio(int prio) ...@@ -1953,25 +1953,25 @@ static inline u8 map_i915_prio_to_guc_prio(int prio)
static inline void add_context_inflight_prio(struct intel_context *ce, static inline void add_context_inflight_prio(struct intel_context *ce,
u8 guc_prio) u8 guc_prio)
{ {
lockdep_assert_held(&ce->guc_active.lock); lockdep_assert_held(&ce->guc_state.lock);
GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_active.prio_count)); GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count));
++ce->guc_active.prio_count[guc_prio]; ++ce->guc_state.prio_count[guc_prio];
/* Overflow protection */ /* Overflow protection */
GEM_WARN_ON(!ce->guc_active.prio_count[guc_prio]); GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]);
} }
static inline void sub_context_inflight_prio(struct intel_context *ce, static inline void sub_context_inflight_prio(struct intel_context *ce,
u8 guc_prio) u8 guc_prio)
{ {
lockdep_assert_held(&ce->guc_active.lock); lockdep_assert_held(&ce->guc_state.lock);
GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_active.prio_count)); GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count));
/* Underflow protection */ /* Underflow protection */
GEM_WARN_ON(!ce->guc_active.prio_count[guc_prio]); GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]);
--ce->guc_active.prio_count[guc_prio]; --ce->guc_state.prio_count[guc_prio];
} }
static inline void update_context_prio(struct intel_context *ce) static inline void update_context_prio(struct intel_context *ce)
...@@ -1982,10 +1982,10 @@ static inline void update_context_prio(struct intel_context *ce) ...@@ -1982,10 +1982,10 @@ static inline void update_context_prio(struct intel_context *ce)
BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH != 0); BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH != 0);
BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH > GUC_CLIENT_PRIORITY_NORMAL); BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH > GUC_CLIENT_PRIORITY_NORMAL);
lockdep_assert_held(&ce->guc_active.lock); lockdep_assert_held(&ce->guc_state.lock);
for (i = 0; i < ARRAY_SIZE(ce->guc_active.prio_count); ++i) { for (i = 0; i < ARRAY_SIZE(ce->guc_state.prio_count); ++i) {
if (ce->guc_active.prio_count[i]) { if (ce->guc_state.prio_count[i]) {
guc_context_set_prio(guc, ce, i); guc_context_set_prio(guc, ce, i);
break; break;
} }
...@@ -2005,8 +2005,8 @@ static void add_to_context(struct i915_request *rq) ...@@ -2005,8 +2005,8 @@ static void add_to_context(struct i915_request *rq)
GEM_BUG_ON(rq->guc_prio == GUC_PRIO_FINI); GEM_BUG_ON(rq->guc_prio == GUC_PRIO_FINI);
spin_lock(&ce->guc_active.lock); spin_lock(&ce->guc_state.lock);
list_move_tail(&rq->sched.link, &ce->guc_active.requests); list_move_tail(&rq->sched.link, &ce->guc_state.requests);
if (rq->guc_prio == GUC_PRIO_INIT) { if (rq->guc_prio == GUC_PRIO_INIT) {
rq->guc_prio = new_guc_prio; rq->guc_prio = new_guc_prio;
...@@ -2018,12 +2018,12 @@ static void add_to_context(struct i915_request *rq) ...@@ -2018,12 +2018,12 @@ static void add_to_context(struct i915_request *rq)
} }
update_context_prio(ce); update_context_prio(ce);
spin_unlock(&ce->guc_active.lock); spin_unlock(&ce->guc_state.lock);
} }
static void guc_prio_fini(struct i915_request *rq, struct intel_context *ce) static void guc_prio_fini(struct i915_request *rq, struct intel_context *ce)
{ {
lockdep_assert_held(&ce->guc_active.lock); lockdep_assert_held(&ce->guc_state.lock);
if (rq->guc_prio != GUC_PRIO_INIT && if (rq->guc_prio != GUC_PRIO_INIT &&
rq->guc_prio != GUC_PRIO_FINI) { rq->guc_prio != GUC_PRIO_FINI) {
...@@ -2037,7 +2037,7 @@ static void remove_from_context(struct i915_request *rq) ...@@ -2037,7 +2037,7 @@ static void remove_from_context(struct i915_request *rq)
{ {
struct intel_context *ce = rq->context; struct intel_context *ce = rq->context;
spin_lock_irq(&ce->guc_active.lock); spin_lock_irq(&ce->guc_state.lock);
list_del_init(&rq->sched.link); list_del_init(&rq->sched.link);
clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
...@@ -2047,10 +2047,8 @@ static void remove_from_context(struct i915_request *rq) ...@@ -2047,10 +2047,8 @@ static void remove_from_context(struct i915_request *rq)
guc_prio_fini(rq, ce); guc_prio_fini(rq, ce);
spin_unlock_irq(&ce->guc_active.lock);
spin_lock_irq(&ce->guc_state.lock);
decr_context_committed_requests(ce); decr_context_committed_requests(ce);
spin_unlock_irq(&ce->guc_state.lock); spin_unlock_irq(&ce->guc_state.lock);
atomic_dec(&ce->guc_id.ref); atomic_dec(&ce->guc_id.ref);
...@@ -2138,7 +2136,7 @@ static void guc_context_init(struct intel_context *ce) ...@@ -2138,7 +2136,7 @@ static void guc_context_init(struct intel_context *ce)
prio = ctx->sched.priority; prio = ctx->sched.priority;
rcu_read_unlock(); rcu_read_unlock();
ce->guc_active.prio = map_i915_prio_to_guc_prio(prio); ce->guc_state.prio = map_i915_prio_to_guc_prio(prio);
set_bit(CONTEXT_GUC_INIT, &ce->flags); set_bit(CONTEXT_GUC_INIT, &ce->flags);
} }
...@@ -2372,7 +2370,7 @@ static void guc_bump_inflight_request_prio(struct i915_request *rq, ...@@ -2372,7 +2370,7 @@ static void guc_bump_inflight_request_prio(struct i915_request *rq,
!new_guc_prio_higher(rq->guc_prio, new_guc_prio))) !new_guc_prio_higher(rq->guc_prio, new_guc_prio)))
return; return;
spin_lock(&ce->guc_active.lock); spin_lock(&ce->guc_state.lock);
if (rq->guc_prio != GUC_PRIO_FINI) { if (rq->guc_prio != GUC_PRIO_FINI) {
if (rq->guc_prio != GUC_PRIO_INIT) if (rq->guc_prio != GUC_PRIO_INIT)
sub_context_inflight_prio(ce, rq->guc_prio); sub_context_inflight_prio(ce, rq->guc_prio);
...@@ -2380,16 +2378,16 @@ static void guc_bump_inflight_request_prio(struct i915_request *rq, ...@@ -2380,16 +2378,16 @@ static void guc_bump_inflight_request_prio(struct i915_request *rq,
add_context_inflight_prio(ce, rq->guc_prio); add_context_inflight_prio(ce, rq->guc_prio);
update_context_prio(ce); update_context_prio(ce);
} }
spin_unlock(&ce->guc_active.lock); spin_unlock(&ce->guc_state.lock);
} }
static void guc_retire_inflight_request_prio(struct i915_request *rq) static void guc_retire_inflight_request_prio(struct i915_request *rq)
{ {
struct intel_context *ce = rq->context; struct intel_context *ce = rq->context;
spin_lock(&ce->guc_active.lock); spin_lock(&ce->guc_state.lock);
guc_prio_fini(rq, ce); guc_prio_fini(rq, ce);
spin_unlock(&ce->guc_active.lock); spin_unlock(&ce->guc_state.lock);
} }
static void sanitize_hwsp(struct intel_engine_cs *engine) static void sanitize_hwsp(struct intel_engine_cs *engine)
...@@ -2955,7 +2953,7 @@ void intel_guc_find_hung_context(struct intel_engine_cs *engine) ...@@ -2955,7 +2953,7 @@ void intel_guc_find_hung_context(struct intel_engine_cs *engine)
goto next; goto next;
} }
list_for_each_entry(rq, &ce->guc_active.requests, sched.link) { list_for_each_entry(rq, &ce->guc_state.requests, sched.link) {
if (i915_test_request_state(rq) != I915_REQUEST_ACTIVE) if (i915_test_request_state(rq) != I915_REQUEST_ACTIVE)
continue; continue;
...@@ -3005,10 +3003,10 @@ void intel_guc_dump_active_requests(struct intel_engine_cs *engine, ...@@ -3005,10 +3003,10 @@ void intel_guc_dump_active_requests(struct intel_engine_cs *engine,
goto next; goto next;
} }
spin_lock(&ce->guc_active.lock); spin_lock(&ce->guc_state.lock);
intel_engine_dump_active_requests(&ce->guc_active.requests, intel_engine_dump_active_requests(&ce->guc_state.requests,
hung_rq, m); hung_rq, m);
spin_unlock(&ce->guc_active.lock); spin_unlock(&ce->guc_state.lock);
next: next:
intel_context_put(ce); intel_context_put(ce);
...@@ -3052,12 +3050,12 @@ static inline void guc_log_context_priority(struct drm_printer *p, ...@@ -3052,12 +3050,12 @@ static inline void guc_log_context_priority(struct drm_printer *p,
{ {
int i; int i;
drm_printf(p, "\t\tPriority: %d\n", ce->guc_active.prio); drm_printf(p, "\t\tPriority: %d\n", ce->guc_state.prio);
drm_printf(p, "\t\tNumber Requests (lower index == higher priority)\n"); drm_printf(p, "\t\tNumber Requests (lower index == higher priority)\n");
for (i = GUC_CLIENT_PRIORITY_KMD_HIGH; for (i = GUC_CLIENT_PRIORITY_KMD_HIGH;
i < GUC_CLIENT_PRIORITY_NUM; ++i) { i < GUC_CLIENT_PRIORITY_NUM; ++i) {
drm_printf(p, "\t\tNumber requests in priority band[%d]: %d\n", drm_printf(p, "\t\tNumber requests in priority band[%d]: %d\n",
i, ce->guc_active.prio_count[i]); i, ce->guc_state.prio_count[i]);
} }
drm_printf(p, "\n"); drm_printf(p, "\n");
} }
......
...@@ -910,7 +910,7 @@ DECLARE_EVENT_CLASS(intel_context, ...@@ -910,7 +910,7 @@ DECLARE_EVENT_CLASS(intel_context,
__entry->guc_id = ce->guc_id.id; __entry->guc_id = ce->guc_id.id;
__entry->pin_count = atomic_read(&ce->pin_count); __entry->pin_count = atomic_read(&ce->pin_count);
__entry->sched_state = ce->guc_state.sched_state; __entry->sched_state = ce->guc_state.sched_state;
__entry->guc_prio = ce->guc_active.prio; __entry->guc_prio = ce->guc_state.prio;
), ),
TP_printk("guc_id=%d, pin_count=%d sched_state=0x%x, guc_prio=%u", TP_printk("guc_id=%d, pin_count=%d sched_state=0x%x, guc_prio=%u",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment