Commit 89b6d183 authored by Chris Wilson's avatar Chris Wilson

drm/i915/execlists: Tweak virtual unsubmission

Since commit e2144503 ("drm/i915: Prevent bonded requests from
overtaking each other on preemption") we have restricted requests to run
on their chosen engine across preemption events. We can take this
restriction into account to know that we will want to resubmit those
requests onto the same physical engine, and so can shortcircuit the
virtual engine selection process and keep the request on the same
engine during unwind.

References: e2144503 ("drm/i915: Prevent bonded requests from overtaking each other on preemption")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarRamlingam C <ramalingam.c@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191013203012.25208-1-chris@chris-wilson.co.uk
parent 9506c23d
...@@ -847,7 +847,6 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) ...@@ -847,7 +847,6 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
list_for_each_entry_safe_reverse(rq, rn, list_for_each_entry_safe_reverse(rq, rn,
&engine->active.requests, &engine->active.requests,
sched.link) { sched.link) {
struct intel_engine_cs *owner;
if (i915_request_completed(rq)) if (i915_request_completed(rq))
continue; /* XXX */ continue; /* XXX */
...@@ -862,8 +861,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) ...@@ -862,8 +861,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
* engine so that it can be moved across onto another physical * engine so that it can be moved across onto another physical
* engine as load dictates. * engine as load dictates.
*/ */
owner = rq->hw_context->engine; if (likely(rq->execution_mask == engine->mask)) {
if (likely(owner == engine)) {
GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID); GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
if (rq_prio(rq) != prio) { if (rq_prio(rq) != prio) {
prio = rq_prio(rq); prio = rq_prio(rq);
...@@ -874,6 +872,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) ...@@ -874,6 +872,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
list_move(&rq->sched.link, pl); list_move(&rq->sched.link, pl);
active = rq; active = rq;
} else { } else {
struct intel_engine_cs *owner = rq->hw_context->engine;
/* /*
* Decouple the virtual breadcrumb before moving it * Decouple the virtual breadcrumb before moving it
* back to the virtual engine -- we don't want the * back to the virtual engine -- we don't want the
......
...@@ -649,6 +649,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) ...@@ -649,6 +649,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->gem_context = ce->gem_context; rq->gem_context = ce->gem_context;
rq->engine = ce->engine; rq->engine = ce->engine;
rq->ring = ce->ring; rq->ring = ce->ring;
rq->execution_mask = ce->engine->mask;
rcu_assign_pointer(rq->timeline, tl); rcu_assign_pointer(rq->timeline, tl);
rq->hwsp_seqno = tl->hwsp_seqno; rq->hwsp_seqno = tl->hwsp_seqno;
...@@ -671,7 +672,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) ...@@ -671,7 +672,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->batch = NULL; rq->batch = NULL;
rq->capture_list = NULL; rq->capture_list = NULL;
rq->flags = 0; rq->flags = 0;
rq->execution_mask = ALL_ENGINES;
INIT_LIST_HEAD(&rq->execute_cb); INIT_LIST_HEAD(&rq->execute_cb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment