Commit f6322edd authored by Chris Wilson's avatar Chris Wilson

drm/i915/preemption: Allow preemption between submission ports

Sometimes we need to boost the priority of an in-flight request, which
may lead to the situation where the second submission port then contains
a higher priority context than the first and so we need to inject a
preemption event. To do so we must always check inside
execlists_dequeue() whether there is a priority inversion between the
ports themselves as well as the head of the priority sorted queue, and we
cannot just skip dequeuing if the queue is empty.

As Michał noted, this doesn't simply extend to handling more than 2-port
submission, as we may need to reorder within the array of executing
requests which themselves are lower priority than the first. A task for
later!
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180222142229.14517-1-chris@chris-wilson.co.ukReviewed-by: default avatarMichał Winiarski <michal.winiarski@intel.com>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
parent e532be89
...@@ -423,6 +423,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine) ...@@ -423,6 +423,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists)); BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists));
GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS); GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
execlists->queue_priority = INT_MIN;
execlists->queue = RB_ROOT; execlists->queue = RB_ROOT;
execlists->first = NULL; execlists->first = NULL;
} }
...@@ -1903,6 +1904,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, ...@@ -1903,6 +1904,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
spin_lock_irq(&engine->timeline->lock); spin_lock_irq(&engine->timeline->lock);
list_for_each_entry(rq, &engine->timeline->requests, link) list_for_each_entry(rq, &engine->timeline->requests, link)
print_request(m, rq, "\t\tE "); print_request(m, rq, "\t\tE ");
drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
for (rb = execlists->first; rb; rb = rb_next(rb)) { for (rb = execlists->first; rb; rb = rb_next(rb)) {
struct i915_priolist *p = struct i915_priolist *p =
rb_entry(rb, typeof(*p), node); rb_entry(rb, typeof(*p), node);
......
...@@ -75,6 +75,11 @@ ...@@ -75,6 +75,11 @@
* *
*/ */
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
{
return rb_entry(rb, struct i915_priolist, node);
}
static inline bool is_high_priority(struct intel_guc_client *client) static inline bool is_high_priority(struct intel_guc_client *client)
{ {
return (client->priority == GUC_CLIENT_PRIORITY_KMD_HIGH || return (client->priority == GUC_CLIENT_PRIORITY_KMD_HIGH ||
...@@ -682,15 +687,12 @@ static void guc_dequeue(struct intel_engine_cs *engine) ...@@ -682,15 +687,12 @@ static void guc_dequeue(struct intel_engine_cs *engine)
rb = execlists->first; rb = execlists->first;
GEM_BUG_ON(rb_first(&execlists->queue) != rb); GEM_BUG_ON(rb_first(&execlists->queue) != rb);
if (!rb)
goto unlock;
if (port_isset(port)) { if (port_isset(port)) {
if (engine->i915->preempt_context) { if (engine->i915->preempt_context) {
struct guc_preempt_work *preempt_work = struct guc_preempt_work *preempt_work =
&engine->i915->guc.preempt_work[engine->id]; &engine->i915->guc.preempt_work[engine->id];
if (rb_entry(rb, struct i915_priolist, node)->priority > if (execlists->queue_priority >
max(port_request(port)->priotree.priority, 0)) { max(port_request(port)->priotree.priority, 0)) {
execlists_set_active(execlists, execlists_set_active(execlists,
EXECLISTS_ACTIVE_PREEMPT); EXECLISTS_ACTIVE_PREEMPT);
...@@ -706,8 +708,8 @@ static void guc_dequeue(struct intel_engine_cs *engine) ...@@ -706,8 +708,8 @@ static void guc_dequeue(struct intel_engine_cs *engine)
} }
GEM_BUG_ON(port_isset(port)); GEM_BUG_ON(port_isset(port));
do { while (rb) {
struct i915_priolist *p = rb_entry(rb, typeof(*p), node); struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn; struct i915_request *rq, *rn;
list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) { list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
...@@ -736,8 +738,9 @@ static void guc_dequeue(struct intel_engine_cs *engine) ...@@ -736,8 +738,9 @@ static void guc_dequeue(struct intel_engine_cs *engine)
INIT_LIST_HEAD(&p->requests); INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL) if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p); kmem_cache_free(engine->i915->priorities, p);
} while (rb); }
done: done:
execlists->queue_priority = rb ? to_priolist(rb)->priority : INT_MIN;
execlists->first = rb; execlists->first = rb;
if (submit) { if (submit) {
port_assign(port, last); port_assign(port, last);
......
...@@ -169,6 +169,23 @@ static void execlists_init_reg_state(u32 *reg_state, ...@@ -169,6 +169,23 @@ static void execlists_init_reg_state(u32 *reg_state,
struct intel_engine_cs *engine, struct intel_engine_cs *engine,
struct intel_ring *ring); struct intel_ring *ring);
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
{
return rb_entry(rb, struct i915_priolist, node);
}
static inline int rq_prio(const struct i915_request *rq)
{
return rq->priotree.priority;
}
static inline bool need_preempt(const struct intel_engine_cs *engine,
const struct i915_request *last,
int prio)
{
return engine->i915->preempt_context && prio > max(rq_prio(last), 0);
}
/** /**
* intel_lr_context_descriptor_update() - calculate & cache the descriptor * intel_lr_context_descriptor_update() - calculate & cache the descriptor
* descriptor for a pinned context * descriptor for a pinned context
...@@ -224,7 +241,7 @@ lookup_priolist(struct intel_engine_cs *engine, ...@@ -224,7 +241,7 @@ lookup_priolist(struct intel_engine_cs *engine,
parent = &execlists->queue.rb_node; parent = &execlists->queue.rb_node;
while (*parent) { while (*parent) {
rb = *parent; rb = *parent;
p = rb_entry(rb, typeof(*p), node); p = to_priolist(rb);
if (prio > p->priority) { if (prio > p->priority) {
parent = &rb->rb_left; parent = &rb->rb_left;
} else if (prio < p->priority) { } else if (prio < p->priority) {
...@@ -264,7 +281,7 @@ lookup_priolist(struct intel_engine_cs *engine, ...@@ -264,7 +281,7 @@ lookup_priolist(struct intel_engine_cs *engine,
if (first) if (first)
execlists->first = &p->node; execlists->first = &p->node;
return ptr_pack_bits(p, first, 1); return p;
} }
static void unwind_wa_tail(struct i915_request *rq) static void unwind_wa_tail(struct i915_request *rq)
...@@ -290,14 +307,10 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine) ...@@ -290,14 +307,10 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
__i915_request_unsubmit(rq); __i915_request_unsubmit(rq);
unwind_wa_tail(rq); unwind_wa_tail(rq);
GEM_BUG_ON(rq->priotree.priority == I915_PRIORITY_INVALID); GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
if (rq->priotree.priority != last_prio) { if (rq_prio(rq) != last_prio) {
p = lookup_priolist(engine, last_prio = rq_prio(rq);
&rq->priotree, p = lookup_priolist(engine, &rq->priotree, last_prio);
rq->priotree.priority);
p = ptr_mask_bits(p, 1);
last_prio = rq->priotree.priority;
} }
list_add(&rq->priotree.link, &p->requests); list_add(&rq->priotree.link, &p->requests);
...@@ -397,10 +410,11 @@ static void execlists_submit_ports(struct intel_engine_cs *engine) ...@@ -397,10 +410,11 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
desc = execlists_update_context(rq); desc = execlists_update_context(rq);
GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc)); GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
GEM_TRACE("%s in[%d]: ctx=%d.%d, seqno=%x\n", GEM_TRACE("%s in[%d]: ctx=%d.%d, seqno=%x, prio=%d\n",
engine->name, n, engine->name, n,
port[n].context_id, count, port[n].context_id, count,
rq->global_seqno); rq->global_seqno,
rq_prio(rq));
} else { } else {
GEM_BUG_ON(!n); GEM_BUG_ON(!n);
desc = 0; desc = 0;
...@@ -453,12 +467,17 @@ static void inject_preempt_context(struct intel_engine_cs *engine) ...@@ -453,12 +467,17 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT)); CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT));
/*
* Switch to our empty preempt context so
* the state of the GPU is known (idle).
*/
GEM_TRACE("%s\n", engine->name); GEM_TRACE("%s\n", engine->name);
for (n = execlists_num_ports(&engine->execlists); --n; ) for (n = execlists_num_ports(&engine->execlists); --n; )
elsp_write(0, engine->execlists.elsp); elsp_write(0, engine->execlists.elsp);
elsp_write(ce->lrc_desc, engine->execlists.elsp); elsp_write(ce->lrc_desc, engine->execlists.elsp);
execlists_clear_active(&engine->execlists, EXECLISTS_ACTIVE_HWACK); execlists_clear_active(&engine->execlists, EXECLISTS_ACTIVE_HWACK);
execlists_set_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT);
} }
static void execlists_dequeue(struct intel_engine_cs *engine) static void execlists_dequeue(struct intel_engine_cs *engine)
...@@ -495,8 +514,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -495,8 +514,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
spin_lock_irq(&engine->timeline->lock); spin_lock_irq(&engine->timeline->lock);
rb = execlists->first; rb = execlists->first;
GEM_BUG_ON(rb_first(&execlists->queue) != rb); GEM_BUG_ON(rb_first(&execlists->queue) != rb);
if (!rb)
goto unlock;
if (last) { if (last) {
/* /*
...@@ -519,18 +536,11 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -519,18 +536,11 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK)) if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
goto unlock; goto unlock;
if (engine->i915->preempt_context && if (need_preempt(engine, last, execlists->queue_priority)) {
rb_entry(rb, struct i915_priolist, node)->priority >
max(last->priotree.priority, 0)) {
/*
* Switch to our empty preempt context so
* the state of the GPU is known (idle).
*/
inject_preempt_context(engine); inject_preempt_context(engine);
execlists_set_active(execlists,
EXECLISTS_ACTIVE_PREEMPT);
goto unlock; goto unlock;
} else { }
/* /*
* In theory, we could coalesce more requests onto * In theory, we could coalesce more requests onto
* the second port (the first port is active, with * the second port (the first port is active, with
...@@ -549,12 +559,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -549,12 +559,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* able to resubmit the new ELSP before it idles, * able to resubmit the new ELSP before it idles,
* avoiding pipeline bubbles (momentary pauses where * avoiding pipeline bubbles (momentary pauses where
* the driver is unable to keep up the supply of new * the driver is unable to keep up the supply of new
* work). * work). However, we have to double check that the
* priorities of the ports haven't been switch.
*/ */
if (port_count(&port[1])) if (port_count(&port[1]))
goto unlock; goto unlock;
/* WaIdleLiteRestore:bdw,skl /*
* WaIdleLiteRestore:bdw,skl
* Apply the wa NOOPs to prevent * Apply the wa NOOPs to prevent
* ring:HEAD == rq:TAIL as we resubmit the * ring:HEAD == rq:TAIL as we resubmit the
* request. See gen8_emit_breadcrumb() for * request. See gen8_emit_breadcrumb() for
...@@ -563,10 +575,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -563,10 +575,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
*/ */
last->tail = last->wa_tail; last->tail = last->wa_tail;
} }
}
do { while (rb) {
struct i915_priolist *p = rb_entry(rb, typeof(*p), node); struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn; struct i915_request *rq, *rn;
list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) { list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
...@@ -628,8 +639,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -628,8 +639,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
INIT_LIST_HEAD(&p->requests); INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL) if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p); kmem_cache_free(engine->i915->priorities, p);
} while (rb); }
done: done:
execlists->queue_priority = rb ? to_priolist(rb)->priority : INT_MIN;
execlists->first = rb; execlists->first = rb;
if (submit) if (submit)
port_assign(port, last); port_assign(port, last);
...@@ -690,7 +702,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) ...@@ -690,7 +702,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
/* Flush the queued requests to the timeline list (for retiring). */ /* Flush the queued requests to the timeline list (for retiring). */
rb = execlists->first; rb = execlists->first;
while (rb) { while (rb) {
struct i915_priolist *p = rb_entry(rb, typeof(*p), node); struct i915_priolist *p = to_priolist(rb);
list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) { list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
INIT_LIST_HEAD(&rq->priotree.link); INIT_LIST_HEAD(&rq->priotree.link);
...@@ -708,7 +720,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) ...@@ -708,7 +720,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
/* Remaining _unready_ requests will be nop'ed when submitted */ /* Remaining _unready_ requests will be nop'ed when submitted */
execlists->queue_priority = INT_MIN;
execlists->queue = RB_ROOT; execlists->queue = RB_ROOT;
execlists->first = NULL; execlists->first = NULL;
GEM_BUG_ON(port_isset(execlists->port)); GEM_BUG_ON(port_isset(execlists->port));
...@@ -864,10 +876,11 @@ static void execlists_submission_tasklet(unsigned long data) ...@@ -864,10 +876,11 @@ static void execlists_submission_tasklet(unsigned long data)
EXECLISTS_ACTIVE_USER)); EXECLISTS_ACTIVE_USER));
rq = port_unpack(port, &count); rq = port_unpack(port, &count);
GEM_TRACE("%s out[0]: ctx=%d.%d, seqno=%x\n", GEM_TRACE("%s out[0]: ctx=%d.%d, seqno=%x, prio=%d\n",
engine->name, engine->name,
port->context_id, count, port->context_id, count,
rq ? rq->global_seqno : 0); rq ? rq->global_seqno : 0,
rq ? rq_prio(rq) : 0);
/* Check the context/desc id for this event matches */ /* Check the context/desc id for this event matches */
GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id); GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
...@@ -912,15 +925,19 @@ static void execlists_submission_tasklet(unsigned long data) ...@@ -912,15 +925,19 @@ static void execlists_submission_tasklet(unsigned long data)
intel_uncore_forcewake_put(dev_priv, execlists->fw_domains); intel_uncore_forcewake_put(dev_priv, execlists->fw_domains);
} }
static void insert_request(struct intel_engine_cs *engine, static void queue_request(struct intel_engine_cs *engine,
struct i915_priotree *pt, struct i915_priotree *pt,
int prio) int prio)
{ {
struct i915_priolist *p = lookup_priolist(engine, pt, prio); list_add_tail(&pt->link, &lookup_priolist(engine, pt, prio)->requests);
}
list_add_tail(&pt->link, &ptr_mask_bits(p, 1)->requests); static void submit_queue(struct intel_engine_cs *engine, int prio)
if (ptr_unmask_bits(p, 1)) {
if (prio > engine->execlists.queue_priority) {
engine->execlists.queue_priority = prio;
tasklet_hi_schedule(&engine->execlists.tasklet); tasklet_hi_schedule(&engine->execlists.tasklet);
}
} }
static void execlists_submit_request(struct i915_request *request) static void execlists_submit_request(struct i915_request *request)
...@@ -931,7 +948,8 @@ static void execlists_submit_request(struct i915_request *request) ...@@ -931,7 +948,8 @@ static void execlists_submit_request(struct i915_request *request)
/* Will be called from irq-context when using foreign fences. */ /* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&engine->timeline->lock, flags); spin_lock_irqsave(&engine->timeline->lock, flags);
insert_request(engine, &request->priotree, request->priotree.priority); queue_request(engine, &request->priotree, rq_prio(request));
submit_queue(engine, rq_prio(request));
GEM_BUG_ON(!engine->execlists.first); GEM_BUG_ON(!engine->execlists.first);
GEM_BUG_ON(list_empty(&request->priotree.link)); GEM_BUG_ON(list_empty(&request->priotree.link));
...@@ -987,7 +1005,7 @@ static void execlists_schedule(struct i915_request *request, int prio) ...@@ -987,7 +1005,7 @@ static void execlists_schedule(struct i915_request *request, int prio)
* static void update_priorities(struct i915_priotree *pt, prio) { * static void update_priorities(struct i915_priotree *pt, prio) {
* list_for_each_entry(dep, &pt->signalers_list, signal_link) * list_for_each_entry(dep, &pt->signalers_list, signal_link)
* update_priorities(dep->signal, prio) * update_priorities(dep->signal, prio)
* insert_request(pt); * queue_request(pt);
* } * }
* but that may have unlimited recursion depth and so runs a very * but that may have unlimited recursion depth and so runs a very
* real risk of overunning the kernel stack. Instead, we build * real risk of overunning the kernel stack. Instead, we build
...@@ -1050,8 +1068,9 @@ static void execlists_schedule(struct i915_request *request, int prio) ...@@ -1050,8 +1068,9 @@ static void execlists_schedule(struct i915_request *request, int prio)
pt->priority = prio; pt->priority = prio;
if (!list_empty(&pt->link)) { if (!list_empty(&pt->link)) {
__list_del_entry(&pt->link); __list_del_entry(&pt->link);
insert_request(engine, pt, prio); queue_request(engine, pt, prio);
} }
submit_queue(engine, prio);
} }
spin_unlock_irq(&engine->timeline->lock); spin_unlock_irq(&engine->timeline->lock);
......
...@@ -257,6 +257,16 @@ struct intel_engine_execlists { ...@@ -257,6 +257,16 @@ struct intel_engine_execlists {
*/ */
unsigned int port_mask; unsigned int port_mask;
/**
* @queue_priority: Highest pending priority.
*
* When we add requests into the queue, or adjust the priority of
* executing requests, we compute the maximum priority of those
* pending requests. We can then use this value to determine if
* we need to preempt the executing requests to service the queue.
*/
int queue_priority;
/** /**
* @queue: queue of requests, in priority lists * @queue: queue of requests, in priority lists
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment