Commit 9726920b authored by Chris Wilson's avatar Chris Wilson

drm/i915: Only reset the pinned kernel contexts on resume

On resume, we know that the only pinned contexts in danger of seeing
corruption are the kernel context, and so we do not need to walk the
list of all GEM contexts as we tracked them on each engine.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190410190120.830-1-chris@chris-wilson.co.uk
parent feb8846b
...@@ -1995,7 +1995,6 @@ struct drm_i915_private { ...@@ -1995,7 +1995,6 @@ struct drm_i915_private {
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
struct { struct {
void (*resume)(struct drm_i915_private *);
void (*cleanup_engine)(struct intel_engine_cs *engine); void (*cleanup_engine)(struct intel_engine_cs *engine);
struct i915_gt_timelines { struct i915_gt_timelines {
......
...@@ -4513,7 +4513,7 @@ void i915_gem_resume(struct drm_i915_private *i915) ...@@ -4513,7 +4513,7 @@ void i915_gem_resume(struct drm_i915_private *i915)
* guarantee that the context image is complete. So let's just reset * guarantee that the context image is complete. So let's just reset
* it and start again. * it and start again.
*/ */
i915->gt.resume(i915); intel_gt_resume(i915);
if (i915_gem_init_hw(i915)) if (i915_gem_init_hw(i915))
goto err_wedged; goto err_wedged;
...@@ -4853,13 +4853,10 @@ int i915_gem_init(struct drm_i915_private *dev_priv) ...@@ -4853,13 +4853,10 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1); dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) { if (HAS_LOGICAL_RING_CONTEXTS(dev_priv))
dev_priv->gt.resume = intel_lr_context_resume;
dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup; dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
} else { else
dev_priv->gt.resume = intel_legacy_submission_resume;
dev_priv->gt.cleanup_engine = intel_engine_cleanup; dev_priv->gt.cleanup_engine = intel_engine_cleanup;
}
i915_timelines_init(dev_priv); i915_timelines_init(dev_priv);
......
...@@ -24,6 +24,7 @@ struct intel_context_ops { ...@@ -24,6 +24,7 @@ struct intel_context_ops {
int (*pin)(struct intel_context *ce); int (*pin)(struct intel_context *ce);
void (*unpin)(struct intel_context *ce); void (*unpin)(struct intel_context *ce);
void (*reset)(struct intel_context *ce);
void (*destroy)(struct kref *kref); void (*destroy)(struct kref *kref);
}; };
......
...@@ -753,6 +753,30 @@ int intel_engine_init_common(struct intel_engine_cs *engine) ...@@ -753,6 +753,30 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
return ret; return ret;
} }
void intel_gt_resume(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
/*
* After resume, we may need to poke into the pinned kernel
* contexts to paper over any damage caused by the sudden suspend.
* Only the kernel contexts should remain pinned over suspend,
* allowing us to fixup the user contexts on their first pin.
*/
for_each_engine(engine, i915, id) {
struct intel_context *ce;
ce = engine->kernel_context;
if (ce)
ce->ops->reset(ce);
ce = engine->preempt_context;
if (ce)
ce->ops->reset(ce);
}
}
/** /**
* intel_engines_cleanup_common - cleans up the engine state created by * intel_engines_cleanup_common - cleans up the engine state created by
* the common initiailizers. * the common initiailizers.
......
...@@ -1379,9 +1379,33 @@ static int execlists_context_pin(struct intel_context *ce) ...@@ -1379,9 +1379,33 @@ static int execlists_context_pin(struct intel_context *ce)
return __execlists_context_pin(ce, ce->engine); return __execlists_context_pin(ce, ce->engine);
} }
static void execlists_context_reset(struct intel_context *ce)
{
/*
* Because we emit WA_TAIL_DWORDS there may be a disparity
* between our bookkeeping in ce->ring->head and ce->ring->tail and
* that stored in context. As we only write new commands from
* ce->ring->tail onwards, everything before that is junk. If the GPU
* starts reading from its RING_HEAD from the context, it may try to
* execute that junk and die.
*
* The contexts that are stilled pinned on resume belong to the
* kernel, and are local to each engine. All other contexts will
* have their head/tail sanitized upon pinning before use, so they
* will never see garbage,
*
* So to avoid that we reset the context images upon resume. For
* simplicity, we just zero everything out.
*/
intel_ring_reset(ce->ring, 0);
__execlists_update_reg_state(ce, ce->engine);
}
static const struct intel_context_ops execlists_context_ops = { static const struct intel_context_ops execlists_context_ops = {
.pin = execlists_context_pin, .pin = execlists_context_pin,
.unpin = execlists_context_unpin, .unpin = execlists_context_unpin,
.reset = execlists_context_reset,
.destroy = execlists_context_destroy, .destroy = execlists_context_destroy,
}; };
...@@ -2895,31 +2919,6 @@ static int execlists_context_deferred_alloc(struct intel_context *ce, ...@@ -2895,31 +2919,6 @@ static int execlists_context_deferred_alloc(struct intel_context *ce,
return ret; return ret;
} }
void intel_lr_context_resume(struct drm_i915_private *i915)
{
struct i915_gem_context *ctx;
struct intel_context *ce;
/*
* Because we emit WA_TAIL_DWORDS there may be a disparity
* between our bookkeeping in ce->ring->head and ce->ring->tail and
* that stored in context. As we only write new commands from
* ce->ring->tail onwards, everything before that is junk. If the GPU
* starts reading from its RING_HEAD from the context, it may try to
* execute that junk and die.
*
* So to avoid that we reset the context images upon resume. For
* simplicity, we just zero everything out.
*/
list_for_each_entry(ctx, &i915->contexts.list, link) {
list_for_each_entry(ce, &ctx->active_engines, active_link) {
GEM_BUG_ON(!ce->ring);
intel_ring_reset(ce->ring, 0);
__execlists_update_reg_state(ce, ce->engine);
}
}
}
void intel_execlists_show_requests(struct intel_engine_cs *engine, void intel_execlists_show_requests(struct intel_engine_cs *engine,
struct drm_printer *m, struct drm_printer *m,
void (*show_request)(struct drm_printer *m, void (*show_request)(struct drm_printer *m,
......
...@@ -102,7 +102,6 @@ struct drm_printer; ...@@ -102,7 +102,6 @@ struct drm_printer;
struct drm_i915_private; struct drm_i915_private;
struct i915_gem_context; struct i915_gem_context;
void intel_lr_context_resume(struct drm_i915_private *dev_priv);
void intel_execlists_set_default_submission(struct intel_engine_cs *engine); void intel_execlists_set_default_submission(struct intel_engine_cs *engine);
void intel_execlists_show_requests(struct intel_engine_cs *engine, void intel_execlists_show_requests(struct intel_engine_cs *engine,
......
...@@ -1508,9 +1508,16 @@ static int ring_context_pin(struct intel_context *ce) ...@@ -1508,9 +1508,16 @@ static int ring_context_pin(struct intel_context *ce)
return err; return err;
} }
static void ring_context_reset(struct intel_context *ce)
{
intel_ring_reset(ce->ring, 0);
}
static const struct intel_context_ops ring_context_ops = { static const struct intel_context_ops ring_context_ops = {
.pin = ring_context_pin, .pin = ring_context_pin,
.unpin = ring_context_unpin, .unpin = ring_context_unpin,
.reset = ring_context_reset,
.destroy = ring_context_destroy, .destroy = ring_context_destroy,
}; };
...@@ -1581,16 +1588,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine) ...@@ -1581,16 +1588,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
kfree(engine); kfree(engine);
} }
void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
/* Restart from the beginning of the rings for convenience */
for_each_engine(engine, dev_priv, id)
intel_ring_reset(engine->buffer, 0);
}
static int load_pd_dir(struct i915_request *rq, static int load_pd_dir(struct i915_request *rq,
const struct i915_hw_ppgtt *ppgtt) const struct i915_hw_ppgtt *ppgtt)
{ {
......
...@@ -268,8 +268,6 @@ static inline void intel_ring_put(struct intel_ring *ring) ...@@ -268,8 +268,6 @@ static inline void intel_ring_put(struct intel_ring *ring)
void intel_engine_stop(struct intel_engine_cs *engine); void intel_engine_stop(struct intel_engine_cs *engine);
void intel_engine_cleanup(struct intel_engine_cs *engine); void intel_engine_cleanup(struct intel_engine_cs *engine);
void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
int __must_check intel_ring_cacheline_align(struct i915_request *rq); int __must_check intel_ring_cacheline_align(struct i915_request *rq);
u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n); u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
...@@ -463,6 +461,7 @@ static inline void intel_engine_reset(struct intel_engine_cs *engine, ...@@ -463,6 +461,7 @@ static inline void intel_engine_reset(struct intel_engine_cs *engine,
} }
void intel_engines_sanitize(struct drm_i915_private *i915, bool force); void intel_engines_sanitize(struct drm_i915_private *i915, bool force);
void intel_gt_resume(struct drm_i915_private *i915);
bool intel_engine_is_idle(struct intel_engine_cs *engine); bool intel_engine_is_idle(struct intel_engine_cs *engine);
bool intel_engines_are_idle(struct drm_i915_private *dev_priv); bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment