Commit 85f5e1f3 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Combine multiple internal plists into the same i915_priolist bucket

As we are about to allow ourselves to slightly bump the user priority
into a few different sublevels, packthose internal priority lists
into the same i915_priolist to keep the rbtree compact and avoid having
to allocate the default user priority even after the internal bumping.
The downside to having an requests[] rather than a node per active list,
is that we then have to walk over the empty higher priority lists. To
compensate, we track the active buckets and use a small bitmap to skip
over any inactive ones.

v2: Use MASK of internal levels to simplify our usage.
v3: Prevent overflow when SHIFT is zero.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181001123204.23982-4-chris@chris-wilson.co.uk
parent 7651a445
...@@ -1534,10 +1534,10 @@ void intel_engine_dump(struct intel_engine_cs *engine, ...@@ -1534,10 +1534,10 @@ void intel_engine_dump(struct intel_engine_cs *engine,
count = 0; count = 0;
drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority); drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) { for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
struct i915_priolist *p = struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
rb_entry(rb, typeof(*p), node); int i;
list_for_each_entry(rq, &p->requests, sched.link) { priolist_for_each_request(rq, p, i) {
if (count++ < MAX_REQUESTS_TO_SHOW - 1) if (count++ < MAX_REQUESTS_TO_SHOW - 1)
print_request(m, rq, "\t\tQ "); print_request(m, rq, "\t\tQ ");
else else
......
...@@ -746,30 +746,28 @@ static bool __guc_dequeue(struct intel_engine_cs *engine) ...@@ -746,30 +746,28 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
while ((rb = rb_first_cached(&execlists->queue))) { while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb); struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn; struct i915_request *rq, *rn;
int i;
list_for_each_entry_safe(rq, rn, &p->requests, sched.link) { priolist_for_each_request_consume(rq, rn, p, i) {
if (last && rq->hw_context != last->hw_context) { if (last && rq->hw_context != last->hw_context) {
if (port == last_port) { if (port == last_port)
__list_del_many(&p->requests,
&rq->sched.link);
goto done; goto done;
}
if (submit) if (submit)
port_assign(port, last); port_assign(port, last);
port++; port++;
} }
INIT_LIST_HEAD(&rq->sched.link); list_del_init(&rq->sched.link);
__i915_request_submit(rq); __i915_request_submit(rq);
trace_i915_request_in(rq, port_index(port, execlists)); trace_i915_request_in(rq, port_index(port, execlists));
last = rq; last = rq;
submit = true; submit = true;
} }
rb_erase_cached(&p->node, &execlists->queue); rb_erase_cached(&p->node, &execlists->queue);
INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL) if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p); kmem_cache_free(engine->i915->priorities, p);
} }
......
...@@ -259,14 +259,49 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx, ...@@ -259,14 +259,49 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
ce->lrc_desc = desc; ce->lrc_desc = desc;
} }
static struct i915_priolist * static void assert_priolists(struct intel_engine_execlists * const execlists,
long queue_priority)
{
struct rb_node *rb;
long last_prio, i;
if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
return;
GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
rb_first(&execlists->queue.rb_root));
last_prio = (queue_priority >> I915_USER_PRIORITY_SHIFT) + 1;
for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
struct i915_priolist *p = to_priolist(rb);
GEM_BUG_ON(p->priority >= last_prio);
last_prio = p->priority;
GEM_BUG_ON(!p->used);
for (i = 0; i < ARRAY_SIZE(p->requests); i++) {
if (list_empty(&p->requests[i]))
continue;
GEM_BUG_ON(!(p->used & BIT(i)));
}
}
}
static struct list_head *
lookup_priolist(struct intel_engine_cs *engine, int prio) lookup_priolist(struct intel_engine_cs *engine, int prio)
{ {
struct intel_engine_execlists * const execlists = &engine->execlists; struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_priolist *p; struct i915_priolist *p;
struct rb_node **parent, *rb; struct rb_node **parent, *rb;
bool first = true; bool first = true;
int idx, i;
assert_priolists(execlists, INT_MAX);
/* buckets sorted from highest [in slot 0] to lowest priority */
idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
prio >>= I915_USER_PRIORITY_SHIFT;
if (unlikely(execlists->no_priolist)) if (unlikely(execlists->no_priolist))
prio = I915_PRIORITY_NORMAL; prio = I915_PRIORITY_NORMAL;
...@@ -283,7 +318,7 @@ lookup_priolist(struct intel_engine_cs *engine, int prio) ...@@ -283,7 +318,7 @@ lookup_priolist(struct intel_engine_cs *engine, int prio)
parent = &rb->rb_right; parent = &rb->rb_right;
first = false; first = false;
} else { } else {
return p; goto out;
} }
} }
...@@ -309,11 +344,15 @@ lookup_priolist(struct intel_engine_cs *engine, int prio) ...@@ -309,11 +344,15 @@ lookup_priolist(struct intel_engine_cs *engine, int prio)
} }
p->priority = prio; p->priority = prio;
INIT_LIST_HEAD(&p->requests); for (i = 0; i < ARRAY_SIZE(p->requests); i++)
INIT_LIST_HEAD(&p->requests[i]);
rb_link_node(&p->node, rb, parent); rb_link_node(&p->node, rb, parent);
rb_insert_color_cached(&p->node, &execlists->queue, first); rb_insert_color_cached(&p->node, &execlists->queue, first);
p->used = 0;
return p; out:
p->used |= BIT(idx);
return &p->requests[idx];
} }
static void unwind_wa_tail(struct i915_request *rq) static void unwind_wa_tail(struct i915_request *rq)
...@@ -325,7 +364,7 @@ static void unwind_wa_tail(struct i915_request *rq) ...@@ -325,7 +364,7 @@ static void unwind_wa_tail(struct i915_request *rq)
static void __unwind_incomplete_requests(struct intel_engine_cs *engine) static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
{ {
struct i915_request *rq, *rn; struct i915_request *rq, *rn;
struct i915_priolist *uninitialized_var(p); struct list_head *uninitialized_var(pl);
int last_prio = I915_PRIORITY_INVALID; int last_prio = I915_PRIORITY_INVALID;
lockdep_assert_held(&engine->timeline.lock); lockdep_assert_held(&engine->timeline.lock);
...@@ -342,12 +381,11 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine) ...@@ -342,12 +381,11 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID); GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
if (rq_prio(rq) != last_prio) { if (rq_prio(rq) != last_prio) {
last_prio = rq_prio(rq); last_prio = rq_prio(rq);
p = lookup_priolist(engine, last_prio); pl = lookup_priolist(engine, last_prio);
} }
GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
GEM_BUG_ON(p->priority != rq_prio(rq)); list_add(&rq->sched.link, pl);
list_add(&rq->sched.link, &p->requests);
} }
} }
...@@ -664,8 +702,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -664,8 +702,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
while ((rb = rb_first_cached(&execlists->queue))) { while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb); struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn; struct i915_request *rq, *rn;
int i;
list_for_each_entry_safe(rq, rn, &p->requests, sched.link) { priolist_for_each_request_consume(rq, rn, p, i) {
/* /*
* Can we combine this request with the current port? * Can we combine this request with the current port?
* It has to be the same context/ringbuffer and not * It has to be the same context/ringbuffer and not
...@@ -684,11 +723,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -684,11 +723,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* combine this request with the last, then we * combine this request with the last, then we
* are done. * are done.
*/ */
if (port == last_port) { if (port == last_port)
__list_del_many(&p->requests,
&rq->sched.link);
goto done; goto done;
}
/* /*
* If GVT overrides us we only ever submit * If GVT overrides us we only ever submit
...@@ -698,11 +734,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -698,11 +734,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* request) to the second port. * request) to the second port.
*/ */
if (ctx_single_port_submission(last->hw_context) || if (ctx_single_port_submission(last->hw_context) ||
ctx_single_port_submission(rq->hw_context)) { ctx_single_port_submission(rq->hw_context))
__list_del_many(&p->requests,
&rq->sched.link);
goto done; goto done;
}
GEM_BUG_ON(last->hw_context == rq->hw_context); GEM_BUG_ON(last->hw_context == rq->hw_context);
...@@ -713,15 +746,16 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -713,15 +746,16 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
GEM_BUG_ON(port_isset(port)); GEM_BUG_ON(port_isset(port));
} }
INIT_LIST_HEAD(&rq->sched.link); list_del_init(&rq->sched.link);
__i915_request_submit(rq); __i915_request_submit(rq);
trace_i915_request_in(rq, port_index(port, execlists)); trace_i915_request_in(rq, port_index(port, execlists));
last = rq; last = rq;
submit = true; submit = true;
} }
rb_erase_cached(&p->node, &execlists->queue); rb_erase_cached(&p->node, &execlists->queue);
INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL) if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p); kmem_cache_free(engine->i915->priorities, p);
} }
...@@ -745,6 +779,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -745,6 +779,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
*/ */
execlists->queue_priority = execlists->queue_priority =
port != execlists->port ? rq_prio(last) : INT_MIN; port != execlists->port ? rq_prio(last) : INT_MIN;
assert_priolists(execlists, execlists->queue_priority);
if (submit) { if (submit) {
port_assign(port, last); port_assign(port, last);
...@@ -856,16 +891,16 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) ...@@ -856,16 +891,16 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
/* Flush the queued requests to the timeline list (for retiring). */ /* Flush the queued requests to the timeline list (for retiring). */
while ((rb = rb_first_cached(&execlists->queue))) { while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb); struct i915_priolist *p = to_priolist(rb);
int i;
list_for_each_entry_safe(rq, rn, &p->requests, sched.link) { priolist_for_each_request_consume(rq, rn, p, i) {
INIT_LIST_HEAD(&rq->sched.link); list_del_init(&rq->sched.link);
dma_fence_set_error(&rq->fence, -EIO); dma_fence_set_error(&rq->fence, -EIO);
__i915_request_submit(rq); __i915_request_submit(rq);
} }
rb_erase_cached(&p->node, &execlists->queue); rb_erase_cached(&p->node, &execlists->queue);
INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL) if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p); kmem_cache_free(engine->i915->priorities, p);
} }
...@@ -1071,8 +1106,7 @@ static void queue_request(struct intel_engine_cs *engine, ...@@ -1071,8 +1106,7 @@ static void queue_request(struct intel_engine_cs *engine,
struct i915_sched_node *node, struct i915_sched_node *node,
int prio) int prio)
{ {
list_add_tail(&node->link, list_add_tail(&node->link, lookup_priolist(engine, prio));
&lookup_priolist(engine, prio)->requests);
} }
static void __update_queue(struct intel_engine_cs *engine, int prio) static void __update_queue(struct intel_engine_cs *engine, int prio)
...@@ -1142,7 +1176,7 @@ sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked) ...@@ -1142,7 +1176,7 @@ sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
static void execlists_schedule(struct i915_request *request, static void execlists_schedule(struct i915_request *request,
const struct i915_sched_attr *attr) const struct i915_sched_attr *attr)
{ {
struct i915_priolist *uninitialized_var(pl); struct list_head *uninitialized_var(pl);
struct intel_engine_cs *engine, *last; struct intel_engine_cs *engine, *last;
struct i915_dependency *dep, *p; struct i915_dependency *dep, *p;
struct i915_dependency stack; struct i915_dependency stack;
...@@ -1241,8 +1275,7 @@ static void execlists_schedule(struct i915_request *request, ...@@ -1241,8 +1275,7 @@ static void execlists_schedule(struct i915_request *request,
pl = lookup_priolist(engine, prio); pl = lookup_priolist(engine, prio);
last = engine; last = engine;
} }
GEM_BUG_ON(pl->priority != prio); list_move_tail(&node->link, pl);
list_move_tail(&node->link, &pl->requests);
} else { } else {
/* /*
* If the request is not in the priolist queue because * If the request is not in the priolist queue because
......
...@@ -190,11 +190,22 @@ enum intel_engine_id { ...@@ -190,11 +190,22 @@ enum intel_engine_id {
}; };
struct i915_priolist { struct i915_priolist {
struct list_head requests[I915_PRIORITY_COUNT];
struct rb_node node; struct rb_node node;
struct list_head requests; unsigned long used;
int priority; int priority;
}; };
#define priolist_for_each_request(it, plist, idx) \
for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
list_for_each_entry(it, &(plist)->requests[idx], sched.link)
#define priolist_for_each_request_consume(it, n, plist, idx) \
for (; (idx = ffs((plist)->used)); (plist)->used &= ~BIT(idx - 1)) \
list_for_each_entry_safe(it, n, \
&(plist)->requests[idx - 1], \
sched.link)
struct st_preempt_hang { struct st_preempt_hang {
struct completion completion; struct completion completion;
bool inject_hang; bool inject_hang;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment