Commit 1b39a917 authored by Nick Hoath's avatar Nick Hoath Committed by Daniel Vetter

drm/i915: fix context/engine cleanup order

Swap the order of context & engine cleanup, so that contexts are cleaned
up first, and *then* engines. This is a more sensible order anyway, but
in particular has become necessary since the 'intel_ring_initialized()
must be simple and inline' patch, which now uses ring->dev as an
'initialised' flag, so it can now be NULL after engine teardown. This
in turn can cause a problem in the context code, which (used to) check
the ring->dev->struct_mutex -- causing a fault if ring->dev was NULL.

Also rename the cleanup function to reflect what it actually does
(cleanup engines, not a ringbuffer), and fix an annoying whitespace issue.

v2: Also make the fix in i915_load_modeset_init, not just in
    i915_driver_unload (Chris Wilson)
v3: Had extra stuff in it.
v4: Reverted extra stuff (so we're back to v2).
    Rebased and updated commentary above (Dave Gordon).
Signed-off-by: default avatarNick Hoath <nicholas.hoath@intel.com>
Signed-off-by: default avatarDavid Gordon <david.s.gordon@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarDave Gordon <david.s.gordon@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1453504211-7982-2-git-send-email-david.s.gordon@intel.com
parent 8c448cad
...@@ -444,8 +444,8 @@ static int i915_load_modeset_init(struct drm_device *dev) ...@@ -444,8 +444,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
cleanup_gem: cleanup_gem:
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev); i915_gem_context_fini(dev);
i915_gem_cleanup_engines(dev);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
cleanup_irq: cleanup_irq:
intel_guc_ucode_fini(dev); intel_guc_ucode_fini(dev);
...@@ -1256,8 +1256,8 @@ int i915_driver_unload(struct drm_device *dev) ...@@ -1256,8 +1256,8 @@ int i915_driver_unload(struct drm_device *dev)
intel_guc_ucode_fini(dev); intel_guc_ucode_fini(dev);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev); i915_gem_context_fini(dev);
i915_gem_cleanup_engines(dev);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
intel_fbc_cleanup_cfb(dev_priv); intel_fbc_cleanup_cfb(dev_priv);
......
...@@ -3058,7 +3058,7 @@ int i915_gem_init_rings(struct drm_device *dev); ...@@ -3058,7 +3058,7 @@ int i915_gem_init_rings(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev); int __must_check i915_gem_init_hw(struct drm_device *dev);
int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice); int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
void i915_gem_init_swizzling(struct drm_device *dev); void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev); void i915_gem_cleanup_engines(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev); int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_suspend(struct drm_device *dev); int __must_check i915_gem_suspend(struct drm_device *dev);
void __i915_add_request(struct drm_i915_gem_request *req, void __i915_add_request(struct drm_i915_gem_request *req,
......
...@@ -4913,7 +4913,7 @@ i915_gem_init_hw(struct drm_device *dev) ...@@ -4913,7 +4913,7 @@ i915_gem_init_hw(struct drm_device *dev)
req = i915_gem_request_alloc(ring, NULL); req = i915_gem_request_alloc(ring, NULL);
if (IS_ERR(req)) { if (IS_ERR(req)) {
ret = PTR_ERR(req); ret = PTR_ERR(req);
i915_gem_cleanup_ringbuffer(dev); i915_gem_cleanup_engines(dev);
goto out; goto out;
} }
...@@ -4926,7 +4926,7 @@ i915_gem_init_hw(struct drm_device *dev) ...@@ -4926,7 +4926,7 @@ i915_gem_init_hw(struct drm_device *dev)
if (ret && ret != -EIO) { if (ret && ret != -EIO) {
DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret); DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
i915_gem_request_cancel(req); i915_gem_request_cancel(req);
i915_gem_cleanup_ringbuffer(dev); i915_gem_cleanup_engines(dev);
goto out; goto out;
} }
...@@ -4934,7 +4934,7 @@ i915_gem_init_hw(struct drm_device *dev) ...@@ -4934,7 +4934,7 @@ i915_gem_init_hw(struct drm_device *dev)
if (ret && ret != -EIO) { if (ret && ret != -EIO) {
DRM_ERROR("Context enable ring #%d failed %d\n", i, ret); DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
i915_gem_request_cancel(req); i915_gem_request_cancel(req);
i915_gem_cleanup_ringbuffer(dev); i915_gem_cleanup_engines(dev);
goto out; goto out;
} }
...@@ -5009,7 +5009,7 @@ int i915_gem_init(struct drm_device *dev) ...@@ -5009,7 +5009,7 @@ int i915_gem_init(struct drm_device *dev)
} }
void void
i915_gem_cleanup_ringbuffer(struct drm_device *dev) i915_gem_cleanup_engines(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring; struct intel_engine_cs *ring;
...@@ -5018,13 +5018,14 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) ...@@ -5018,13 +5018,14 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
for_each_ring(ring, dev_priv, i) for_each_ring(ring, dev_priv, i)
dev_priv->gt.cleanup_ring(ring); dev_priv->gt.cleanup_ring(ring);
if (i915.enable_execlists) if (i915.enable_execlists) {
/* /*
* Neither the BIOS, ourselves or any other kernel * Neither the BIOS, ourselves or any other kernel
* expects the system to be in execlists mode on startup, * expects the system to be in execlists mode on startup,
* so we need to reset the GPU back to legacy mode. * so we need to reset the GPU back to legacy mode.
*/ */
intel_gpu_reset(dev); intel_gpu_reset(dev);
}
} }
static void static void
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment