Commit 0f797650 authored by Matthew Brost's avatar Matthew Brost Committed by John Harrison

drm/i915/guc: Rework and simplify locking

Rework and simplify the locking with GuC subission. Drop
sched_state_no_lock and move all fields under the guc_state.sched_state
and protect all these fields with guc_state.lock . This requires
changing the locking hierarchy from guc_state.lock -> sched_engine.lock
to sched_engine.lock -> guc_state.lock.

v2:
 (Daniele)
  - Don't check fields outside of lock during sched disable, check less
    fields within lock as some of the outside are no longer needed
Reviewed-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210909164744.31249-18-matthew.brost@intel.com
parent 52d66c06
...@@ -161,7 +161,7 @@ struct intel_context { ...@@ -161,7 +161,7 @@ struct intel_context {
* sched_state: scheduling state of this context using GuC * sched_state: scheduling state of this context using GuC
* submission * submission
*/ */
u16 sched_state; u32 sched_state;
/* /*
* fences: maintains of list of requests that have a submit * fences: maintains of list of requests that have a submit
* fence related to GuC submission * fence related to GuC submission
...@@ -178,9 +178,6 @@ struct intel_context { ...@@ -178,9 +178,6 @@ struct intel_context {
struct list_head requests; struct list_head requests;
} guc_active; } guc_active;
/* GuC scheduling state flags that do not require a lock. */
atomic_t guc_sched_state_no_lock;
/* GuC LRC descriptor ID */ /* GuC LRC descriptor ID */
u16 guc_id; u16 guc_id;
......
...@@ -72,87 +72,24 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count); ...@@ -72,87 +72,24 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count);
#define GUC_REQUEST_SIZE 64 /* bytes */ #define GUC_REQUEST_SIZE 64 /* bytes */
/*
* Below is a set of functions which control the GuC scheduling state which do
* not require a lock as all state transitions are mutually exclusive. i.e. It
* is not possible for the context pinning code and submission, for the same
* context, to be executing simultaneously. We still need an atomic as it is
* possible for some of the bits to changing at the same time though.
*/
#define SCHED_STATE_NO_LOCK_ENABLED BIT(0)
#define SCHED_STATE_NO_LOCK_PENDING_ENABLE BIT(1)
#define SCHED_STATE_NO_LOCK_REGISTERED BIT(2)
static inline bool context_enabled(struct intel_context *ce)
{
return (atomic_read(&ce->guc_sched_state_no_lock) &
SCHED_STATE_NO_LOCK_ENABLED);
}
static inline void set_context_enabled(struct intel_context *ce)
{
atomic_or(SCHED_STATE_NO_LOCK_ENABLED, &ce->guc_sched_state_no_lock);
}
static inline void clr_context_enabled(struct intel_context *ce)
{
atomic_and((u32)~SCHED_STATE_NO_LOCK_ENABLED,
&ce->guc_sched_state_no_lock);
}
static inline bool context_pending_enable(struct intel_context *ce)
{
return (atomic_read(&ce->guc_sched_state_no_lock) &
SCHED_STATE_NO_LOCK_PENDING_ENABLE);
}
static inline void set_context_pending_enable(struct intel_context *ce)
{
atomic_or(SCHED_STATE_NO_LOCK_PENDING_ENABLE,
&ce->guc_sched_state_no_lock);
}
static inline void clr_context_pending_enable(struct intel_context *ce)
{
atomic_and((u32)~SCHED_STATE_NO_LOCK_PENDING_ENABLE,
&ce->guc_sched_state_no_lock);
}
static inline bool context_registered(struct intel_context *ce)
{
return (atomic_read(&ce->guc_sched_state_no_lock) &
SCHED_STATE_NO_LOCK_REGISTERED);
}
static inline void set_context_registered(struct intel_context *ce)
{
atomic_or(SCHED_STATE_NO_LOCK_REGISTERED,
&ce->guc_sched_state_no_lock);
}
static inline void clr_context_registered(struct intel_context *ce)
{
atomic_and((u32)~SCHED_STATE_NO_LOCK_REGISTERED,
&ce->guc_sched_state_no_lock);
}
/* /*
* Below is a set of functions which control the GuC scheduling state which * Below is a set of functions which control the GuC scheduling state which
* require a lock, aside from the special case where the functions are called * require a lock.
* from guc_lrc_desc_pin(). In that case it isn't possible for any other code
* path to be executing on the context.
*/ */
#define SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER BIT(0) #define SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER BIT(0)
#define SCHED_STATE_DESTROYED BIT(1) #define SCHED_STATE_DESTROYED BIT(1)
#define SCHED_STATE_PENDING_DISABLE BIT(2) #define SCHED_STATE_PENDING_DISABLE BIT(2)
#define SCHED_STATE_BANNED BIT(3) #define SCHED_STATE_BANNED BIT(3)
#define SCHED_STATE_BLOCKED_SHIFT 4 #define SCHED_STATE_ENABLED BIT(4)
#define SCHED_STATE_PENDING_ENABLE BIT(5)
#define SCHED_STATE_REGISTERED BIT(6)
#define SCHED_STATE_BLOCKED_SHIFT 7
#define SCHED_STATE_BLOCKED BIT(SCHED_STATE_BLOCKED_SHIFT) #define SCHED_STATE_BLOCKED BIT(SCHED_STATE_BLOCKED_SHIFT)
#define SCHED_STATE_BLOCKED_MASK (0xfff << SCHED_STATE_BLOCKED_SHIFT) #define SCHED_STATE_BLOCKED_MASK (0xfff << SCHED_STATE_BLOCKED_SHIFT)
static inline void init_sched_state(struct intel_context *ce) static inline void init_sched_state(struct intel_context *ce)
{ {
lockdep_assert_held(&ce->guc_state.lock); lockdep_assert_held(&ce->guc_state.lock);
atomic_set(&ce->guc_sched_state_no_lock, 0);
ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK; ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK;
} }
...@@ -163,9 +100,8 @@ static bool sched_state_is_init(struct intel_context *ce) ...@@ -163,9 +100,8 @@ static bool sched_state_is_init(struct intel_context *ce)
* XXX: Kernel contexts can have SCHED_STATE_NO_LOCK_REGISTERED after * XXX: Kernel contexts can have SCHED_STATE_NO_LOCK_REGISTERED after
* suspend. * suspend.
*/ */
return !(atomic_read(&ce->guc_sched_state_no_lock) & return !(ce->guc_state.sched_state &=
~SCHED_STATE_NO_LOCK_REGISTERED) && ~(SCHED_STATE_BLOCKED_MASK | SCHED_STATE_REGISTERED));
!(ce->guc_state.sched_state &= ~SCHED_STATE_BLOCKED_MASK);
} }
static inline bool static inline bool
...@@ -238,6 +174,57 @@ static inline void clr_context_banned(struct intel_context *ce) ...@@ -238,6 +174,57 @@ static inline void clr_context_banned(struct intel_context *ce)
ce->guc_state.sched_state &= ~SCHED_STATE_BANNED; ce->guc_state.sched_state &= ~SCHED_STATE_BANNED;
} }
static inline bool context_enabled(struct intel_context *ce)
{
return ce->guc_state.sched_state & SCHED_STATE_ENABLED;
}
static inline void set_context_enabled(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state |= SCHED_STATE_ENABLED;
}
static inline void clr_context_enabled(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state &= ~SCHED_STATE_ENABLED;
}
static inline bool context_pending_enable(struct intel_context *ce)
{
return ce->guc_state.sched_state & SCHED_STATE_PENDING_ENABLE;
}
static inline void set_context_pending_enable(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state |= SCHED_STATE_PENDING_ENABLE;
}
static inline void clr_context_pending_enable(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_ENABLE;
}
static inline bool context_registered(struct intel_context *ce)
{
return ce->guc_state.sched_state & SCHED_STATE_REGISTERED;
}
static inline void set_context_registered(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state |= SCHED_STATE_REGISTERED;
}
static inline void clr_context_registered(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state &= ~SCHED_STATE_REGISTERED;
}
static inline u32 context_blocked(struct intel_context *ce) static inline u32 context_blocked(struct intel_context *ce)
{ {
return (ce->guc_state.sched_state & SCHED_STATE_BLOCKED_MASK) >> return (ce->guc_state.sched_state & SCHED_STATE_BLOCKED_MASK) >>
...@@ -246,7 +233,6 @@ static inline u32 context_blocked(struct intel_context *ce) ...@@ -246,7 +233,6 @@ static inline u32 context_blocked(struct intel_context *ce)
static inline void incr_context_blocked(struct intel_context *ce) static inline void incr_context_blocked(struct intel_context *ce)
{ {
lockdep_assert_held(&ce->engine->sched_engine->lock);
lockdep_assert_held(&ce->guc_state.lock); lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state += SCHED_STATE_BLOCKED; ce->guc_state.sched_state += SCHED_STATE_BLOCKED;
...@@ -256,7 +242,6 @@ static inline void incr_context_blocked(struct intel_context *ce) ...@@ -256,7 +242,6 @@ static inline void incr_context_blocked(struct intel_context *ce)
static inline void decr_context_blocked(struct intel_context *ce) static inline void decr_context_blocked(struct intel_context *ce)
{ {
lockdep_assert_held(&ce->engine->sched_engine->lock);
lockdep_assert_held(&ce->guc_state.lock); lockdep_assert_held(&ce->guc_state.lock);
GEM_BUG_ON(!context_blocked(ce)); /* Underflow check */ GEM_BUG_ON(!context_blocked(ce)); /* Underflow check */
...@@ -452,6 +437,8 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq) ...@@ -452,6 +437,8 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
u32 g2h_len_dw = 0; u32 g2h_len_dw = 0;
bool enabled; bool enabled;
lockdep_assert_held(&rq->engine->sched_engine->lock);
/* /*
* Corner case where requests were sitting in the priority list or a * Corner case where requests were sitting in the priority list or a
* request resubmitted after the context was banned. * request resubmitted after the context was banned.
...@@ -459,7 +446,7 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq) ...@@ -459,7 +446,7 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
if (unlikely(intel_context_is_banned(ce))) { if (unlikely(intel_context_is_banned(ce))) {
i915_request_put(i915_request_mark_eio(rq)); i915_request_put(i915_request_mark_eio(rq));
intel_engine_signal_breadcrumbs(ce->engine); intel_engine_signal_breadcrumbs(ce->engine);
goto out; return 0;
} }
GEM_BUG_ON(!atomic_read(&ce->guc_id_ref)); GEM_BUG_ON(!atomic_read(&ce->guc_id_ref));
...@@ -472,9 +459,11 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq) ...@@ -472,9 +459,11 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
if (unlikely(!lrc_desc_registered(guc, ce->guc_id))) { if (unlikely(!lrc_desc_registered(guc, ce->guc_id))) {
err = guc_lrc_desc_pin(ce, false); err = guc_lrc_desc_pin(ce, false);
if (unlikely(err)) if (unlikely(err))
goto out; return err;
} }
spin_lock(&ce->guc_state.lock);
/* /*
* The request / context will be run on the hardware when scheduling * The request / context will be run on the hardware when scheduling
* gets enabled in the unblock. * gets enabled in the unblock.
...@@ -509,6 +498,7 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq) ...@@ -509,6 +498,7 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
trace_i915_request_guc_submit(rq); trace_i915_request_guc_submit(rq);
out: out:
spin_unlock(&ce->guc_state.lock);
return err; return err;
} }
...@@ -747,6 +737,7 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc) ...@@ -747,6 +737,7 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc)
wait_for_reset(guc, &guc->outstanding_submission_g2h); wait_for_reset(guc, &guc->outstanding_submission_g2h);
} while (!list_empty(&guc->ct.requests.incoming)); } while (!list_empty(&guc->ct.requests.incoming));
} }
scrub_guc_desc_for_outstanding_g2h(guc); scrub_guc_desc_for_outstanding_g2h(guc);
} }
...@@ -1147,7 +1138,11 @@ static int steal_guc_id(struct intel_guc *guc) ...@@ -1147,7 +1138,11 @@ static int steal_guc_id(struct intel_guc *guc)
list_del_init(&ce->guc_id_link); list_del_init(&ce->guc_id_link);
guc_id = ce->guc_id; guc_id = ce->guc_id;
spin_lock(&ce->guc_state.lock);
clr_context_registered(ce); clr_context_registered(ce);
spin_unlock(&ce->guc_state.lock);
set_context_guc_id_invalid(ce); set_context_guc_id_invalid(ce);
return guc_id; return guc_id;
} else { } else {
...@@ -1183,6 +1178,8 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce) ...@@ -1183,6 +1178,8 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
try_again: try_again:
spin_lock_irqsave(&guc->contexts_lock, flags); spin_lock_irqsave(&guc->contexts_lock, flags);
might_lock(&ce->guc_state.lock);
if (context_guc_id_invalid(ce)) { if (context_guc_id_invalid(ce)) {
ret = assign_guc_id(guc, &ce->guc_id); ret = assign_guc_id(guc, &ce->guc_id);
if (ret) if (ret)
...@@ -1262,8 +1259,13 @@ static int register_context(struct intel_context *ce, bool loop) ...@@ -1262,8 +1259,13 @@ static int register_context(struct intel_context *ce, bool loop)
trace_intel_context_register(ce); trace_intel_context_register(ce);
ret = __guc_action_register_context(guc, ce->guc_id, offset, loop); ret = __guc_action_register_context(guc, ce->guc_id, offset, loop);
if (likely(!ret)) if (likely(!ret)) {
unsigned long flags;
spin_lock_irqsave(&ce->guc_state.lock, flags);
set_context_registered(ce); set_context_registered(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
}
return ret; return ret;
} }
...@@ -1537,7 +1539,6 @@ static u16 prep_context_pending_disable(struct intel_context *ce) ...@@ -1537,7 +1539,6 @@ static u16 prep_context_pending_disable(struct intel_context *ce)
static struct i915_sw_fence *guc_context_block(struct intel_context *ce) static struct i915_sw_fence *guc_context_block(struct intel_context *ce)
{ {
struct intel_guc *guc = ce_to_guc(ce); struct intel_guc *guc = ce_to_guc(ce);
struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
unsigned long flags; unsigned long flags;
struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm; struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
...@@ -1546,13 +1547,7 @@ static struct i915_sw_fence *guc_context_block(struct intel_context *ce) ...@@ -1546,13 +1547,7 @@ static struct i915_sw_fence *guc_context_block(struct intel_context *ce)
spin_lock_irqsave(&ce->guc_state.lock, flags); spin_lock_irqsave(&ce->guc_state.lock, flags);
/*
* Sync with submission path, increment before below changes to context
* state.
*/
spin_lock(&sched_engine->lock);
incr_context_blocked(ce); incr_context_blocked(ce);
spin_unlock(&sched_engine->lock);
enabled = context_enabled(ce); enabled = context_enabled(ce);
if (unlikely(!enabled || submission_disabled(guc))) { if (unlikely(!enabled || submission_disabled(guc))) {
...@@ -1598,7 +1593,6 @@ static bool context_cant_unblock(struct intel_context *ce) ...@@ -1598,7 +1593,6 @@ static bool context_cant_unblock(struct intel_context *ce)
static void guc_context_unblock(struct intel_context *ce) static void guc_context_unblock(struct intel_context *ce)
{ {
struct intel_guc *guc = ce_to_guc(ce); struct intel_guc *guc = ce_to_guc(ce);
struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
unsigned long flags; unsigned long flags;
struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm; struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
...@@ -1618,13 +1612,7 @@ static void guc_context_unblock(struct intel_context *ce) ...@@ -1618,13 +1612,7 @@ static void guc_context_unblock(struct intel_context *ce)
intel_context_get(ce); intel_context_get(ce);
} }
/*
* Sync with submission path, decrement after above changes to context
* state.
*/
spin_lock(&sched_engine->lock);
decr_context_blocked(ce); decr_context_blocked(ce);
spin_unlock(&sched_engine->lock);
spin_unlock_irqrestore(&ce->guc_state.lock, flags); spin_unlock_irqrestore(&ce->guc_state.lock, flags);
...@@ -1730,16 +1718,6 @@ static void guc_context_sched_disable(struct intel_context *ce) ...@@ -1730,16 +1718,6 @@ static void guc_context_sched_disable(struct intel_context *ce)
struct intel_runtime_pm *runtime_pm = &ce->engine->gt->i915->runtime_pm; struct intel_runtime_pm *runtime_pm = &ce->engine->gt->i915->runtime_pm;
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
u16 guc_id; u16 guc_id;
bool enabled;
if (submission_disabled(guc) || context_guc_id_invalid(ce) ||
!lrc_desc_registered(guc, ce->guc_id)) {
clr_context_enabled(ce);
goto unpin;
}
if (!context_enabled(ce))
goto unpin;
spin_lock_irqsave(&ce->guc_state.lock, flags); spin_lock_irqsave(&ce->guc_state.lock, flags);
...@@ -1753,9 +1731,7 @@ static void guc_context_sched_disable(struct intel_context *ce) ...@@ -1753,9 +1731,7 @@ static void guc_context_sched_disable(struct intel_context *ce)
* sleep) ensures another process doesn't pin this context and generate * sleep) ensures another process doesn't pin this context and generate
* a request before we set the 'context_pending_disable' flag here. * a request before we set the 'context_pending_disable' flag here.
*/ */
enabled = context_enabled(ce); if (unlikely(!context_enabled(ce) || submission_disabled(guc))) {
if (unlikely(!enabled || submission_disabled(guc))) {
if (enabled)
clr_context_enabled(ce); clr_context_enabled(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags); spin_unlock_irqrestore(&ce->guc_state.lock, flags);
goto unpin; goto unpin;
...@@ -1784,7 +1760,6 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce) ...@@ -1784,7 +1760,6 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce)
GEM_BUG_ON(ce != __get_context(guc, ce->guc_id)); GEM_BUG_ON(ce != __get_context(guc, ce->guc_id));
GEM_BUG_ON(context_enabled(ce)); GEM_BUG_ON(context_enabled(ce));
clr_context_registered(ce);
deregister_context(ce, ce->guc_id); deregister_context(ce, ce->guc_id);
} }
...@@ -1857,8 +1832,10 @@ static void guc_context_destroy(struct kref *kref) ...@@ -1857,8 +1832,10 @@ static void guc_context_destroy(struct kref *kref)
/* Seal race with Reset */ /* Seal race with Reset */
spin_lock_irqsave(&ce->guc_state.lock, flags); spin_lock_irqsave(&ce->guc_state.lock, flags);
disabled = submission_disabled(guc); disabled = submission_disabled(guc);
if (likely(!disabled)) if (likely(!disabled)) {
set_context_destroyed(ce); set_context_destroyed(ce);
clr_context_registered(ce);
}
spin_unlock_irqrestore(&ce->guc_state.lock, flags); spin_unlock_irqrestore(&ce->guc_state.lock, flags);
if (unlikely(disabled)) { if (unlikely(disabled)) {
release_guc_id(guc, ce); release_guc_id(guc, ce);
...@@ -2724,8 +2701,7 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc, ...@@ -2724,8 +2701,7 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc,
(!context_pending_enable(ce) && (!context_pending_enable(ce) &&
!context_pending_disable(ce)))) { !context_pending_disable(ce)))) {
drm_err(&guc_to_gt(guc)->i915->drm, drm_err(&guc_to_gt(guc)->i915->drm,
"Bad context sched_state 0x%x, 0x%x, desc_idx %u", "Bad context sched_state 0x%x, desc_idx %u",
atomic_read(&ce->guc_sched_state_no_lock),
ce->guc_state.sched_state, desc_idx); ce->guc_state.sched_state, desc_idx);
return -EPROTO; return -EPROTO;
} }
...@@ -2740,7 +2716,9 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc, ...@@ -2740,7 +2716,9 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc,
} }
#endif #endif
spin_lock_irqsave(&ce->guc_state.lock, flags);
clr_context_pending_enable(ce); clr_context_pending_enable(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
} else if (context_pending_disable(ce)) { } else if (context_pending_disable(ce)) {
bool banned; bool banned;
...@@ -3014,9 +2992,8 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc, ...@@ -3014,9 +2992,8 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc,
atomic_read(&ce->pin_count)); atomic_read(&ce->pin_count));
drm_printf(p, "\t\tGuC ID Ref Count: %u\n", drm_printf(p, "\t\tGuC ID Ref Count: %u\n",
atomic_read(&ce->guc_id_ref)); atomic_read(&ce->guc_id_ref));
drm_printf(p, "\t\tSchedule State: 0x%x, 0x%x\n\n", drm_printf(p, "\t\tSchedule State: 0x%x\n\n",
ce->guc_state.sched_state, ce->guc_state.sched_state);
atomic_read(&ce->guc_sched_state_no_lock));
guc_log_context_priority(p, ce); guc_log_context_priority(p, ce);
} }
......
...@@ -903,7 +903,6 @@ DECLARE_EVENT_CLASS(intel_context, ...@@ -903,7 +903,6 @@ DECLARE_EVENT_CLASS(intel_context,
__field(u32, guc_id) __field(u32, guc_id)
__field(int, pin_count) __field(int, pin_count)
__field(u32, sched_state) __field(u32, sched_state)
__field(u32, guc_sched_state_no_lock)
__field(u8, guc_prio) __field(u8, guc_prio)
), ),
...@@ -911,15 +910,12 @@ DECLARE_EVENT_CLASS(intel_context, ...@@ -911,15 +910,12 @@ DECLARE_EVENT_CLASS(intel_context,
__entry->guc_id = ce->guc_id; __entry->guc_id = ce->guc_id;
__entry->pin_count = atomic_read(&ce->pin_count); __entry->pin_count = atomic_read(&ce->pin_count);
__entry->sched_state = ce->guc_state.sched_state; __entry->sched_state = ce->guc_state.sched_state;
__entry->guc_sched_state_no_lock =
atomic_read(&ce->guc_sched_state_no_lock);
__entry->guc_prio = ce->guc_prio; __entry->guc_prio = ce->guc_prio;
), ),
TP_printk("guc_id=%d, pin_count=%d sched_state=0x%x,0x%x, guc_prio=%u", TP_printk("guc_id=%d, pin_count=%d sched_state=0x%x, guc_prio=%u",
__entry->guc_id, __entry->pin_count, __entry->guc_id, __entry->pin_count,
__entry->sched_state, __entry->sched_state,
__entry->guc_sched_state_no_lock,
__entry->guc_prio) __entry->guc_prio)
); );
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment