Commit f16d5cb9 authored by Matthew Brost's avatar Matthew Brost Committed by John Harrison

drm/i915/guc: Don't touch guc_state.sched_state without a lock

Before we did some clever tricks to not use the a lock when touching
guc_state.sched_state in certain cases. Don't do that, enforce the use
of the lock.

v2:
 (kernel test robo )
  - Add __maybe_unused to sched_state_is_init()

v3: rebase after the unused code path removal has been moved to an
earlier patch.
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reported-by: default avatarkernel test robot <lkp@intel.com>
Reviewed-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210909164744.31249-14-matthew.brost@intel.com
parent 422cda4f
...@@ -151,11 +151,23 @@ static inline void clr_context_registered(struct intel_context *ce) ...@@ -151,11 +151,23 @@ static inline void clr_context_registered(struct intel_context *ce)
static inline void init_sched_state(struct intel_context *ce) static inline void init_sched_state(struct intel_context *ce)
{ {
/* Only should be called from guc_lrc_desc_pin() */ lockdep_assert_held(&ce->guc_state.lock);
atomic_set(&ce->guc_sched_state_no_lock, 0); atomic_set(&ce->guc_sched_state_no_lock, 0);
ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK; ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK;
} }
__maybe_unused
static bool sched_state_is_init(struct intel_context *ce)
{
/*
* XXX: Kernel contexts can have SCHED_STATE_NO_LOCK_REGISTERED after
* suspend.
*/
return !(atomic_read(&ce->guc_sched_state_no_lock) &
~SCHED_STATE_NO_LOCK_REGISTERED) &&
!(ce->guc_state.sched_state &= ~SCHED_STATE_BLOCKED_MASK);
}
static inline bool static inline bool
context_wait_for_deregister_to_register(struct intel_context *ce) context_wait_for_deregister_to_register(struct intel_context *ce)
{ {
...@@ -166,7 +178,7 @@ context_wait_for_deregister_to_register(struct intel_context *ce) ...@@ -166,7 +178,7 @@ context_wait_for_deregister_to_register(struct intel_context *ce)
static inline void static inline void
set_context_wait_for_deregister_to_register(struct intel_context *ce) set_context_wait_for_deregister_to_register(struct intel_context *ce)
{ {
/* Only should be called from guc_lrc_desc_pin() without lock */ lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state |= ce->guc_state.sched_state |=
SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER; SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
} }
...@@ -607,9 +619,7 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc) ...@@ -607,9 +619,7 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
bool pending_disable, pending_enable, deregister, destroyed, banned; bool pending_disable, pending_enable, deregister, destroyed, banned;
xa_for_each(&guc->context_lookup, index, ce) { xa_for_each(&guc->context_lookup, index, ce) {
/* Flush context */
spin_lock_irqsave(&ce->guc_state.lock, flags); spin_lock_irqsave(&ce->guc_state.lock, flags);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
/* /*
* Once we are at this point submission_disabled() is guaranteed * Once we are at this point submission_disabled() is guaranteed
...@@ -625,6 +635,8 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc) ...@@ -625,6 +635,8 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
banned = context_banned(ce); banned = context_banned(ce);
init_sched_state(ce); init_sched_state(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
if (pending_enable || destroyed || deregister) { if (pending_enable || destroyed || deregister) {
decr_outstanding_submission_g2h(guc); decr_outstanding_submission_g2h(guc);
if (deregister) if (deregister)
...@@ -1324,6 +1336,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) ...@@ -1324,6 +1336,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
int ret = 0; int ret = 0;
GEM_BUG_ON(!engine->mask); GEM_BUG_ON(!engine->mask);
GEM_BUG_ON(!sched_state_is_init(ce));
/* /*
* Ensure LRC + CT vmas are is same region as write barrier is done * Ensure LRC + CT vmas are is same region as write barrier is done
...@@ -1352,7 +1365,6 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop) ...@@ -1352,7 +1365,6 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
desc->priority = ce->guc_prio; desc->priority = ce->guc_prio;
desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD; desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
guc_context_policy_init(engine, desc); guc_context_policy_init(engine, desc);
init_sched_state(ce);
/* /*
* The context_lookup xarray is used to determine if the hardware * The context_lookup xarray is used to determine if the hardware
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment