Commit 4386b8e5 authored by Chris Wilson's avatar Chris Wilson

drm/i915/gt: Remove timeslice suppression

In the next^W future patch, we remove the strict priority system and
continuously re-evaluate the relative priority of tasks. As such we need
to enable the timeslice whenever there is more than one context in the
pipeline. This simplifies the decision and removes some of the tweaks to
suppress timeslicing, allowing us to lift the timeslice enabling to a
common spot at the end of running the submission tasklet.

One consequence of the suppression is that it was reducing fairness
between virtual engines on an over saturated system; undermining the
principle for timeslicing.

v2: Commentary
v3: Commentary for the right cancel_timer()
v4: Add tracing for why we need a timeslice

Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/2802
Testcase: igt/gem_exec_balancer/fairslice
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarAndi Shyti <andi.shyti@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210107132322.28373-1-chris@chris-wilson.co.uk
parent c185a16e
...@@ -238,16 +238,6 @@ struct intel_engine_execlists { ...@@ -238,16 +238,6 @@ struct intel_engine_execlists {
*/ */
unsigned int port_mask; unsigned int port_mask;
/**
* @switch_priority_hint: Second context priority.
*
* We submit multiple contexts to the HW simultaneously and would
* like to occasionally switch between them to emulate timeslicing.
* To know when timeslicing is suitable, we track the priority of
* the context submitted second.
*/
int switch_priority_hint;
/** /**
* @queue_priority_hint: Highest pending priority. * @queue_priority_hint: Highest pending priority.
* *
......
...@@ -1148,25 +1148,6 @@ static void defer_active(struct intel_engine_cs *engine) ...@@ -1148,25 +1148,6 @@ static void defer_active(struct intel_engine_cs *engine)
defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq))); defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq)));
} }
static bool
need_timeslice(const struct intel_engine_cs *engine,
const struct i915_request *rq)
{
int hint;
if (!intel_engine_has_timeslices(engine))
return false;
hint = max(engine->execlists.queue_priority_hint,
virtual_prio(&engine->execlists));
if (!list_is_last(&rq->sched.link, &engine->active.requests))
hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
GEM_BUG_ON(hint >= I915_PRIORITY_UNPREEMPTABLE);
return hint >= effective_prio(rq);
}
static bool static bool
timeslice_yield(const struct intel_engine_execlists *el, timeslice_yield(const struct intel_engine_execlists *el,
const struct i915_request *rq) const struct i915_request *rq)
...@@ -1186,76 +1167,83 @@ timeslice_yield(const struct intel_engine_execlists *el, ...@@ -1186,76 +1167,83 @@ timeslice_yield(const struct intel_engine_execlists *el,
return rq->context->lrc.ccid == READ_ONCE(el->yield); return rq->context->lrc.ccid == READ_ONCE(el->yield);
} }
static bool static bool needs_timeslice(const struct intel_engine_cs *engine,
timeslice_expired(const struct intel_engine_execlists *el, const struct i915_request *rq)
const struct i915_request *rq)
{ {
return timer_expired(&el->timer) || timeslice_yield(el, rq); if (!intel_engine_has_timeslices(engine))
} return false;
static int
switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq)
{
if (list_is_last(&rq->sched.link, &engine->active.requests))
return engine->execlists.queue_priority_hint;
return rq_prio(list_next_entry(rq, sched.link)); /* If not currently active, or about to switch, wait for next event */
} if (!rq || __i915_request_is_complete(rq))
return false;
static inline unsigned long /* We do not need to start the timeslice until after the ACK */
timeslice(const struct intel_engine_cs *engine) if (READ_ONCE(engine->execlists.pending[0]))
{ return false;
return READ_ONCE(engine->props.timeslice_duration_ms);
}
static unsigned long active_timeslice(const struct intel_engine_cs *engine) /* If ELSP[1] is occupied, always check to see if worth slicing */
{ if (!list_is_last_rcu(&rq->sched.link, &engine->active.requests)) {
const struct intel_engine_execlists *execlists = &engine->execlists; ENGINE_TRACE(engine, "timeslice required for second inflight context\n");
const struct i915_request *rq = *execlists->active; return true;
}
if (!rq || __i915_request_is_complete(rq)) /* Otherwise, ELSP[0] is by itself, but may be waiting in the queue */
return 0; if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)) {
ENGINE_TRACE(engine, "timeslice required for queue\n");
return true;
}
if (READ_ONCE(execlists->switch_priority_hint) < effective_prio(rq)) if (!RB_EMPTY_ROOT(&engine->execlists.virtual.rb_root)) {
return 0; ENGINE_TRACE(engine, "timeslice required for virtual\n");
return true;
}
return timeslice(engine); return false;
} }
static void set_timeslice(struct intel_engine_cs *engine) static bool
timeslice_expired(struct intel_engine_cs *engine, const struct i915_request *rq)
{ {
unsigned long duration; const struct intel_engine_execlists *el = &engine->execlists;
if (!intel_engine_has_timeslices(engine)) if (i915_request_has_nopreempt(rq) && __i915_request_has_started(rq))
return; return false;
duration = active_timeslice(engine); if (!needs_timeslice(engine, rq))
ENGINE_TRACE(engine, "bump timeslicing, interval:%lu", duration); return false;
set_timer_ms(&engine->execlists.timer, duration); return timer_expired(&el->timer) || timeslice_yield(el, rq);
} }
static void start_timeslice(struct intel_engine_cs *engine, int prio) static unsigned long timeslice(const struct intel_engine_cs *engine)
{ {
struct intel_engine_execlists *execlists = &engine->execlists; return READ_ONCE(engine->props.timeslice_duration_ms);
unsigned long duration; }
if (!intel_engine_has_timeslices(engine))
return;
WRITE_ONCE(execlists->switch_priority_hint, prio); static void start_timeslice(struct intel_engine_cs *engine)
if (prio == INT_MIN) {
return; struct intel_engine_execlists *el = &engine->execlists;
unsigned long duration;
if (timer_pending(&execlists->timer)) /* Disable the timer if there is nothing to switch to */
return; duration = 0;
if (needs_timeslice(engine, *el->active)) {
/* Avoid continually prolonging an active timeslice */
if (timer_active(&el->timer)) {
/*
* If we just submitted a new ELSP after an old
* context, that context may have already consumed
* its timeslice, so recheck.
*/
if (!timer_pending(&el->timer))
tasklet_hi_schedule(&el->tasklet);
return;
}
duration = timeslice(engine); duration = timeslice(engine);
ENGINE_TRACE(engine, }
"start timeslicing, prio:%d, interval:%lu",
prio, duration);
set_timer_ms(&execlists->timer, duration); set_timer_ms(&el->timer, duration);
} }
static void record_preemption(struct intel_engine_execlists *execlists) static void record_preemption(struct intel_engine_execlists *execlists)
...@@ -1368,16 +1356,32 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -1368,16 +1356,32 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
__unwind_incomplete_requests(engine); __unwind_incomplete_requests(engine);
last = NULL; last = NULL;
} else if (need_timeslice(engine, last) && } else if (timeslice_expired(engine, last)) {
timeslice_expired(execlists, last)) {
ENGINE_TRACE(engine, ENGINE_TRACE(engine,
"expired last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n", "expired:%s last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
last->fence.context, yesno(timer_expired(&execlists->timer)),
last->fence.seqno, last->fence.context, last->fence.seqno,
last->sched.attr.priority, rq_prio(last),
execlists->queue_priority_hint, execlists->queue_priority_hint,
yesno(timeslice_yield(execlists, last))); yesno(timeslice_yield(execlists, last)));
/*
* Consume this timeslice; ensure we start a new one.
*
* The timeslice expired, and we will unwind the
* running contexts and recompute the next ELSP.
* If that submit will be the same pair of contexts
* (due to dependency ordering), we will skip the
* submission. If we don't cancel the timer now,
* we will see that the timer has expired and
* reschedule the tasklet; continually until the
* next context switch or other preeemption event.
*
* Since we have decided to reschedule based on
* consumption of this timeslice, if we submit the
* same context again, grant it a full timeslice.
*/
cancel_timer(&execlists->timer);
ring_set_paused(engine, 1); ring_set_paused(engine, 1);
defer_active(engine); defer_active(engine);
...@@ -1413,7 +1417,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -1413,7 +1417,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* of timeslices, our queue might be. * of timeslices, our queue might be.
*/ */
spin_unlock(&engine->active.lock); spin_unlock(&engine->active.lock);
start_timeslice(engine, queue_prio(execlists));
return; return;
} }
} }
...@@ -1440,7 +1443,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -1440,7 +1443,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (last && !can_merge_rq(last, rq)) { if (last && !can_merge_rq(last, rq)) {
spin_unlock(&ve->base.active.lock); spin_unlock(&ve->base.active.lock);
spin_unlock(&engine->active.lock); spin_unlock(&engine->active.lock);
start_timeslice(engine, rq_prio(rq));
return; /* leave this for another sibling */ return; /* leave this for another sibling */
} }
...@@ -1604,29 +1606,23 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -1604,29 +1606,23 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
execlists->queue_priority_hint = queue_prio(execlists); execlists->queue_priority_hint = queue_prio(execlists);
spin_unlock(&engine->active.lock); spin_unlock(&engine->active.lock);
if (submit) { /*
/* * We can skip poking the HW if we ended up with exactly the same set
* Skip if we ended up with exactly the same set of requests, * of requests as currently running, e.g. trying to timeslice a pair
* e.g. trying to timeslice a pair of ordered contexts * of ordered contexts.
*/ */
if (!memcmp(execlists->active, if (submit &&
execlists->pending, memcmp(execlists->active,
(port - execlists->pending) * sizeof(*port))) execlists->pending,
goto skip_submit; (port - execlists->pending) * sizeof(*port))) {
*port = NULL; *port = NULL;
while (port-- != execlists->pending) while (port-- != execlists->pending)
execlists_schedule_in(*port, port - execlists->pending); execlists_schedule_in(*port, port - execlists->pending);
execlists->switch_priority_hint =
switch_prio(engine, *execlists->pending);
WRITE_ONCE(execlists->yield, -1); WRITE_ONCE(execlists->yield, -1);
set_preempt_timeout(engine, *execlists->active); set_preempt_timeout(engine, *execlists->active);
execlists_submit_ports(engine); execlists_submit_ports(engine);
} else { } else {
start_timeslice(engine, execlists->queue_priority_hint);
skip_submit:
ring_set_paused(engine, 0); ring_set_paused(engine, 0);
while (port-- != execlists->pending) while (port-- != execlists->pending)
i915_request_put(*port); i915_request_put(*port);
...@@ -1805,12 +1801,19 @@ csb_read(const struct intel_engine_cs *engine, u64 * const csb) ...@@ -1805,12 +1801,19 @@ csb_read(const struct intel_engine_cs *engine, u64 * const csb)
return entry; return entry;
} }
static void new_timeslice(struct intel_engine_execlists *el)
{
/* By cancelling, we will start afresh in start_timeslice() */
cancel_timer(&el->timer);
}
static struct i915_request ** static struct i915_request **
process_csb(struct intel_engine_cs *engine, struct i915_request **inactive) process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
{ {
struct intel_engine_execlists * const execlists = &engine->execlists; struct intel_engine_execlists * const execlists = &engine->execlists;
u64 * const buf = execlists->csb_status; u64 * const buf = execlists->csb_status;
const u8 num_entries = execlists->csb_size; const u8 num_entries = execlists->csb_size;
struct i915_request **prev;
u8 head, tail; u8 head, tail;
/* /*
...@@ -1865,6 +1868,11 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive) ...@@ -1865,6 +1868,11 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
* we perform the READ_ONCE(*csb_write). * we perform the READ_ONCE(*csb_write).
*/ */
rmb(); rmb();
/* Remember who was last running under the timer */
prev = inactive;
*prev = NULL;
do { do {
bool promote; bool promote;
u64 csb; u64 csb;
...@@ -1984,8 +1992,6 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive) ...@@ -1984,8 +1992,6 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
} }
} while (head != tail); } while (head != tail);
set_timeslice(engine);
/* /*
* Gen11 has proven to fail wrt global observation point between * Gen11 has proven to fail wrt global observation point between
* entry and tail update, failing on the ordering and thus * entry and tail update, failing on the ordering and thus
...@@ -1999,6 +2005,14 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive) ...@@ -1999,6 +2005,14 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
*/ */
invalidate_csb_entries(&buf[0], &buf[num_entries - 1]); invalidate_csb_entries(&buf[0], &buf[num_entries - 1]);
/*
* We assume that any event reflects a change in context flow
* and merits a fresh timeslice. We reinstall the timer after
* inspecting the queue to see if we need to resumbit.
*/
if (*prev != *execlists->active) /* elide lite-restores */
new_timeslice(execlists);
return inactive; return inactive;
} }
...@@ -2410,8 +2424,10 @@ static void execlists_submission_tasklet(unsigned long data) ...@@ -2410,8 +2424,10 @@ static void execlists_submission_tasklet(unsigned long data)
execlists_reset(engine, msg); execlists_reset(engine, msg);
} }
if (!engine->execlists.pending[0]) if (!engine->execlists.pending[0]) {
execlists_dequeue_irq(engine); execlists_dequeue_irq(engine);
start_timeslice(engine);
}
post_process_csb(post, inactive); post_process_csb(post, inactive);
rcu_read_unlock(); rcu_read_unlock();
...@@ -3856,9 +3872,6 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine, ...@@ -3856,9 +3872,6 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
show_request(m, last, "\t\t", 0); show_request(m, last, "\t\t", 0);
} }
if (execlists->switch_priority_hint != INT_MIN)
drm_printf(m, "\t\tSwitch priority hint: %d\n",
READ_ONCE(execlists->switch_priority_hint));
if (execlists->queue_priority_hint != INT_MIN) if (execlists->queue_priority_hint != INT_MIN)
drm_printf(m, "\t\tQueue priority hint: %d\n", drm_printf(m, "\t\tQueue priority hint: %d\n",
READ_ONCE(execlists->queue_priority_hint)); READ_ONCE(execlists->queue_priority_hint));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment