Commit 4a317415 authored by Chris Wilson's avatar Chris Wilson

drm/i915/gem: Refine occupancy test in kill_context()

Don't just look at the very last request in a queue when deciding if we
need to evict the context from the GPU, as that request may still be in
the submission queue while the rest of the context is running!

Instead, walk back along the queued requests looking for the active
request and checking that.

Fixes: 2e0986a5 ("drm/i915/gem: Cancel contexts when hangchecking is disabled")
Testcase: igt/gem_ctx_persistence/queued
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191031090104.22245-1-chris@chris-wilson.co.uk
parent 2b73b350
...@@ -333,10 +333,8 @@ static bool __cancel_engine(struct intel_engine_cs *engine) ...@@ -333,10 +333,8 @@ static bool __cancel_engine(struct intel_engine_cs *engine)
return __reset_engine(engine); return __reset_engine(engine);
} }
static struct intel_engine_cs * static struct intel_engine_cs *__active_engine(struct i915_request *rq)
active_engine(struct dma_fence *fence, struct intel_context *ce)
{ {
struct i915_request *rq = to_request(fence);
struct intel_engine_cs *engine, *locked; struct intel_engine_cs *engine, *locked;
/* /*
...@@ -360,6 +358,29 @@ active_engine(struct dma_fence *fence, struct intel_context *ce) ...@@ -360,6 +358,29 @@ active_engine(struct dma_fence *fence, struct intel_context *ce)
return engine; return engine;
} }
static struct intel_engine_cs *active_engine(struct intel_context *ce)
{
struct intel_engine_cs *engine = NULL;
struct i915_request *rq;
if (!ce->timeline)
return NULL;
rcu_read_lock();
list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
if (i915_request_completed(rq))
break;
/* Check with the backend if the request is inflight */
engine = __active_engine(rq);
if (engine)
break;
}
rcu_read_unlock();
return engine;
}
static void kill_context(struct i915_gem_context *ctx) static void kill_context(struct i915_gem_context *ctx)
{ {
struct i915_gem_engines_iter it; struct i915_gem_engines_iter it;
...@@ -383,17 +404,15 @@ static void kill_context(struct i915_gem_context *ctx) ...@@ -383,17 +404,15 @@ static void kill_context(struct i915_gem_context *ctx)
*/ */
for_each_gem_engine(ce, __context_engines_static(ctx), it) { for_each_gem_engine(ce, __context_engines_static(ctx), it) {
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
struct dma_fence *fence;
if (!ce->timeline)
continue;
fence = i915_active_fence_get(&ce->timeline->last_request); /*
if (!fence) * Check the current active state of this context; if we
continue; * are currently executing on the GPU we need to evict
* ourselves. On the other hand, if we haven't yet been
/* Check with the backend if the request is still inflight */ * submitted to the GPU or if everything is complete,
engine = active_engine(fence, ce); * we have nothing to do.
*/
engine = active_engine(ce);
/* First attempt to gracefully cancel the context */ /* First attempt to gracefully cancel the context */
if (engine && !__cancel_engine(engine)) if (engine && !__cancel_engine(engine))
...@@ -403,8 +422,6 @@ static void kill_context(struct i915_gem_context *ctx) ...@@ -403,8 +422,6 @@ static void kill_context(struct i915_gem_context *ctx)
* reset. We hope the collateral damage is worth it. * reset. We hope the collateral damage is worth it.
*/ */
__reset_context(ctx, engine); __reset_context(ctx, engine);
dma_fence_put(fence);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment