Commit 90f4fcd5 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Remove forced stop ring on suspend/unload

Before suspending (or unloading), we would first wait upon all rendering
to be completed and then disable the rings. This later step is a remanent
from DRI1 days when we did not use request tracking for all operations
upon the ring. Now that we are sure we are waiting upon the very last
operation by the engine, we can forgo clobbering the ring registers,
though we do keep the assert that the engine is indeed idle before
sleeping.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1470388464-28458-5-git-send-email-chris@chris-wilson.co.uk
parent f826ee21
...@@ -2004,7 +2004,6 @@ struct drm_i915_private { ...@@ -2004,7 +2004,6 @@ struct drm_i915_private {
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
struct { struct {
void (*cleanup_engine)(struct intel_engine_cs *engine); void (*cleanup_engine)(struct intel_engine_cs *engine);
void (*stop_engine)(struct intel_engine_cs *engine);
/** /**
* Is the GPU currently considered idle, or busy executing * Is the GPU currently considered idle, or busy executing
......
...@@ -4080,16 +4080,6 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, ...@@ -4080,16 +4080,6 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
return NULL; return NULL;
} }
static void
i915_gem_stop_engines(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
for_each_engine(engine, dev_priv)
dev_priv->gt.stop_engine(engine);
}
int int
i915_gem_suspend(struct drm_device *dev) i915_gem_suspend(struct drm_device *dev)
{ {
...@@ -4118,12 +4108,6 @@ i915_gem_suspend(struct drm_device *dev) ...@@ -4118,12 +4108,6 @@ i915_gem_suspend(struct drm_device *dev)
i915_gem_retire_requests(dev_priv); i915_gem_retire_requests(dev_priv);
/* Note that rather than stopping the engines, all we have to do
* is assert that every RING_HEAD == RING_TAIL (all execution complete)
* and similar for all logical context images (to ensure they are
* all ready for hibernation).
*/
i915_gem_stop_engines(dev);
i915_gem_context_lost(dev_priv); i915_gem_context_lost(dev_priv);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -4308,10 +4292,8 @@ int i915_gem_init(struct drm_device *dev) ...@@ -4308,10 +4292,8 @@ int i915_gem_init(struct drm_device *dev)
if (!i915.enable_execlists) { if (!i915.enable_execlists) {
dev_priv->gt.cleanup_engine = intel_engine_cleanup; dev_priv->gt.cleanup_engine = intel_engine_cleanup;
dev_priv->gt.stop_engine = intel_engine_stop;
} else { } else {
dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup; dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
dev_priv->gt.stop_engine = intel_logical_ring_stop;
} }
/* This is just a security blanket to placate dragons. /* This is just a security blanket to placate dragons.
......
...@@ -760,31 +760,6 @@ void intel_execlists_cancel_requests(struct intel_engine_cs *engine) ...@@ -760,31 +760,6 @@ void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
} }
} }
void intel_logical_ring_stop(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
int ret;
if (!intel_engine_initialized(engine))
return;
ret = intel_engine_idle(engine);
if (ret)
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
engine->name, ret);
/* TODO: Is this correct with Execlists enabled? */
I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
if (intel_wait_for_register(dev_priv,
RING_MI_MODE(engine->mmio_base),
MODE_IDLE, MODE_IDLE,
1000)) {
DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
return;
}
I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
}
static int intel_lr_context_pin(struct i915_gem_context *ctx, static int intel_lr_context_pin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine) struct intel_engine_cs *engine)
{ {
...@@ -1717,7 +1692,6 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) ...@@ -1717,7 +1692,6 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
dev_priv = engine->i915; dev_priv = engine->i915;
if (engine->buffer) { if (engine->buffer) {
intel_logical_ring_stop(engine);
WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0); WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
} }
......
...@@ -2203,7 +2203,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine) ...@@ -2203,7 +2203,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
dev_priv = engine->i915; dev_priv = engine->i915;
if (engine->buffer) { if (engine->buffer) {
intel_engine_stop(engine);
WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0); WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
intel_ring_unpin(engine->buffer); intel_ring_unpin(engine->buffer);
...@@ -2907,18 +2906,3 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine) ...@@ -2907,18 +2906,3 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
return intel_init_ring_buffer(engine); return intel_init_ring_buffer(engine);
} }
void intel_engine_stop(struct intel_engine_cs *engine)
{
int ret;
if (!intel_engine_initialized(engine))
return;
ret = intel_engine_idle(engine);
if (ret)
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
engine->name, ret);
stop_ring(engine);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment