Commit ee242ca7 authored by Matthew Brost's avatar Matthew Brost Committed by John Harrison

drm/i915/guc: Implement GuC priority management

Implement a simple static mapping algorithm of the i915 priority levels
(int, -1k to 1k exposed to user) to the 4 GuC levels. Mapping is as
follows:

i915 level < 0          -> GuC low level     (3)
i915 level == 0         -> GuC normal level  (2)
i915 level < INT_MAX    -> GuC high level    (1)
i915 level == INT_MAX   -> GuC highest level (0)

We believe this mapping should cover the UMD use cases (3 distinct user
levels + 1 kernel level).

In addition to static mapping, a simple counter system is attached to
each context tracking the number of requests inflight on the context at
each level. This is needed as the GuC levels are per context while in
the i915 levels are per request.

v2:
 (Daniele)
  - Add BUILD_BUG_ON to enforce ordering of priority levels
  - Add missing lockdep to guc_prio_fini
  - Check for return before setting context registered flag
  - Map DISPLAY priority or higher to highest guc prio
  - Update comment for guc_prio
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Reviewed-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210727002348.97202-33-matthew.brost@intel.com
parent 3a7b7266
...@@ -245,6 +245,9 @@ static void signal_irq_work(struct irq_work *work) ...@@ -245,6 +245,9 @@ static void signal_irq_work(struct irq_work *work)
llist_entry(signal, typeof(*rq), signal_node); llist_entry(signal, typeof(*rq), signal_node);
struct list_head cb_list; struct list_head cb_list;
if (rq->engine->sched_engine->retire_inflight_request_prio)
rq->engine->sched_engine->retire_inflight_request_prio(rq);
spin_lock(&rq->lock); spin_lock(&rq->lock);
list_replace(&rq->fence.cb_list, &cb_list); list_replace(&rq->fence.cb_list, &cb_list);
__dma_fence_signal__timestamp(&rq->fence, timestamp); __dma_fence_signal__timestamp(&rq->fence, timestamp);
......
...@@ -18,8 +18,9 @@ ...@@ -18,8 +18,9 @@
#include "intel_engine_types.h" #include "intel_engine_types.h"
#include "intel_sseu.h" #include "intel_sseu.h"
#define CONTEXT_REDZONE POISON_INUSE #include "uc/intel_guc_fwif.h"
#define CONTEXT_REDZONE POISON_INUSE
DECLARE_EWMA(runtime, 3, 8); DECLARE_EWMA(runtime, 3, 8);
struct i915_gem_context; struct i915_gem_context;
...@@ -191,6 +192,12 @@ struct intel_context { ...@@ -191,6 +192,12 @@ struct intel_context {
/* GuC context blocked fence */ /* GuC context blocked fence */
struct i915_sw_fence guc_blocked; struct i915_sw_fence guc_blocked;
/*
* GuC priority management
*/
u8 guc_prio;
u32 guc_prio_count[GUC_CLIENT_PRIORITY_NUM];
}; };
#endif /* __INTEL_CONTEXT_TYPES__ */ #endif /* __INTEL_CONTEXT_TYPES__ */
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include "intel_engine.h" #include "intel_engine.h"
#include "intel_engine_user.h" #include "intel_engine_user.h"
#include "intel_gt.h" #include "intel_gt.h"
#include "uc/intel_guc_submission.h"
struct intel_engine_cs * struct intel_engine_cs *
intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance) intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
...@@ -115,6 +116,9 @@ static void set_scheduler_caps(struct drm_i915_private *i915) ...@@ -115,6 +116,9 @@ static void set_scheduler_caps(struct drm_i915_private *i915)
disabled |= (I915_SCHEDULER_CAP_ENABLED | disabled |= (I915_SCHEDULER_CAP_ENABLED |
I915_SCHEDULER_CAP_PRIORITY); I915_SCHEDULER_CAP_PRIORITY);
if (intel_uc_uses_guc_submission(&i915->gt.uc))
enabled |= I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP;
for (i = 0; i < ARRAY_SIZE(map); i++) { for (i = 0; i < ARRAY_SIZE(map); i++) {
if (engine->flags & BIT(map[i].engine)) if (engine->flags & BIT(map[i].engine))
enabled |= BIT(map[i].sched); enabled |= BIT(map[i].sched);
......
...@@ -114,6 +114,9 @@ static void i915_fence_release(struct dma_fence *fence) ...@@ -114,6 +114,9 @@ static void i915_fence_release(struct dma_fence *fence)
{ {
struct i915_request *rq = to_request(fence); struct i915_request *rq = to_request(fence);
GEM_BUG_ON(rq->guc_prio != GUC_PRIO_INIT &&
rq->guc_prio != GUC_PRIO_FINI);
/* /*
* The request is put onto a RCU freelist (i.e. the address * The request is put onto a RCU freelist (i.e. the address
* is immediately reused), mark the fences as being freed now. * is immediately reused), mark the fences as being freed now.
...@@ -924,6 +927,8 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) ...@@ -924,6 +927,8 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */ rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
rq->guc_prio = GUC_PRIO_INIT;
/* We bump the ref for the fence chain */ /* We bump the ref for the fence chain */
i915_sw_fence_reinit(&i915_request_get(rq)->submit); i915_sw_fence_reinit(&i915_request_get(rq)->submit);
i915_sw_fence_reinit(&i915_request_get(rq)->semaphore); i915_sw_fence_reinit(&i915_request_get(rq)->semaphore);
......
...@@ -293,6 +293,15 @@ struct i915_request { ...@@ -293,6 +293,15 @@ struct i915_request {
*/ */
struct list_head guc_fence_link; struct list_head guc_fence_link;
/**
* Priority level while the request is inflight. Differs from i915
* scheduler priority. See comment above
* I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP for details.
*/
#define GUC_PRIO_INIT 0xff
#define GUC_PRIO_FINI 0xfe
u8 guc_prio;
I915_SELFTEST_DECLARE(struct { I915_SELFTEST_DECLARE(struct {
struct list_head link; struct list_head link;
unsigned long delay; unsigned long delay;
......
...@@ -241,6 +241,9 @@ static void __i915_schedule(struct i915_sched_node *node, ...@@ -241,6 +241,9 @@ static void __i915_schedule(struct i915_sched_node *node,
/* Fifo and depth-first replacement ensure our deps execute before us */ /* Fifo and depth-first replacement ensure our deps execute before us */
sched_engine = lock_sched_engine(node, sched_engine, &cache); sched_engine = lock_sched_engine(node, sched_engine, &cache);
list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) { list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
struct i915_request *from = container_of(dep->signaler,
struct i915_request,
sched);
INIT_LIST_HEAD(&dep->dfs_link); INIT_LIST_HEAD(&dep->dfs_link);
node = dep->signaler; node = dep->signaler;
...@@ -254,6 +257,10 @@ static void __i915_schedule(struct i915_sched_node *node, ...@@ -254,6 +257,10 @@ static void __i915_schedule(struct i915_sched_node *node,
GEM_BUG_ON(node_to_request(node)->engine->sched_engine != GEM_BUG_ON(node_to_request(node)->engine->sched_engine !=
sched_engine); sched_engine);
/* Must be called before changing the nodes priority */
if (sched_engine->bump_inflight_request_prio)
sched_engine->bump_inflight_request_prio(from, prio);
WRITE_ONCE(node->attr.priority, prio); WRITE_ONCE(node->attr.priority, prio);
/* /*
......
...@@ -179,6 +179,18 @@ struct i915_sched_engine { ...@@ -179,6 +179,18 @@ struct i915_sched_engine {
void (*kick_backend)(const struct i915_request *rq, void (*kick_backend)(const struct i915_request *rq,
int prio); int prio);
/**
* @bump_inflight_request_prio: update priority of an inflight request
*/
void (*bump_inflight_request_prio)(struct i915_request *rq,
int prio);
/**
* @retire_inflight_request_prio: indicate request is retired to
* priority tracking
*/
void (*retire_inflight_request_prio)(struct i915_request *rq);
/** /**
* @schedule: adjust priority of request * @schedule: adjust priority of request
* *
......
...@@ -904,6 +904,7 @@ DECLARE_EVENT_CLASS(intel_context, ...@@ -904,6 +904,7 @@ DECLARE_EVENT_CLASS(intel_context,
__field(int, pin_count) __field(int, pin_count)
__field(u32, sched_state) __field(u32, sched_state)
__field(u32, guc_sched_state_no_lock) __field(u32, guc_sched_state_no_lock)
__field(u8, guc_prio)
), ),
TP_fast_assign( TP_fast_assign(
...@@ -912,12 +913,19 @@ DECLARE_EVENT_CLASS(intel_context, ...@@ -912,12 +913,19 @@ DECLARE_EVENT_CLASS(intel_context,
__entry->sched_state = ce->guc_state.sched_state; __entry->sched_state = ce->guc_state.sched_state;
__entry->guc_sched_state_no_lock = __entry->guc_sched_state_no_lock =
atomic_read(&ce->guc_sched_state_no_lock); atomic_read(&ce->guc_sched_state_no_lock);
__entry->guc_prio = ce->guc_prio;
), ),
TP_printk("guc_id=%d, pin_count=%d sched_state=0x%x,0x%x", TP_printk("guc_id=%d, pin_count=%d sched_state=0x%x,0x%x, guc_prio=%u",
__entry->guc_id, __entry->pin_count, __entry->guc_id, __entry->pin_count,
__entry->sched_state, __entry->sched_state,
__entry->guc_sched_state_no_lock) __entry->guc_sched_state_no_lock,
__entry->guc_prio)
);
DEFINE_EVENT(intel_context, intel_context_set_prio,
TP_PROTO(struct intel_context *ce),
TP_ARGS(ce)
); );
DEFINE_EVENT(intel_context, intel_context_reset, DEFINE_EVENT(intel_context, intel_context_reset,
...@@ -1017,6 +1025,11 @@ trace_i915_request_out(struct i915_request *rq) ...@@ -1017,6 +1025,11 @@ trace_i915_request_out(struct i915_request *rq)
{ {
} }
static inline void
trace_intel_context_set_prio(struct intel_context *ce)
{
}
static inline void static inline void
trace_intel_context_reset(struct intel_context *ce) trace_intel_context_reset(struct intel_context *ce)
{ {
......
...@@ -572,6 +572,15 @@ typedef struct drm_i915_irq_wait { ...@@ -572,6 +572,15 @@ typedef struct drm_i915_irq_wait {
#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2) #define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
#define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3) #define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3)
#define I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4) #define I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4)
/*
* Indicates the 2k user priority levels are statically mapped into 3 buckets as
* follows:
*
* -1k to -1 Low priority
* 0 Normal priority
* 1 to 1k Highest priority
*/
#define I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP (1ul << 5)
#define I915_PARAM_HUC_STATUS 42 #define I915_PARAM_HUC_STATUS 42
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment