Commit 785fbda5 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Pull sync_scru for device reset outside of wedge_mutex

We need to flush our srcu protecting resources about to be clobbered
by the reset, inside of our timer failsafe but outside of the
error->wedge_mutex, so that the failsafe can run in case the
synchronize_srcu() takes too long (hits a shrinker deadlock?).

Fixes: 72eb16df ("drm/i915: Serialise resets with wedging")
References: https://bugs.freedesktop.org/show_bug.cgi?id=109605Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190211135040.1234-1-chris@chris-wilson.co.ukReviewed-by: default avatarMika Kuoppala <mika.kuoppala@intel.com>
parent a0f52c3d
...@@ -941,9 +941,6 @@ static int do_reset(struct drm_i915_private *i915, unsigned int stalled_mask) ...@@ -941,9 +941,6 @@ static int do_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
{ {
int err, i; int err, i;
/* Flush everyone currently using a resource about to be clobbered */
synchronize_srcu(&i915->gpu_error.reset_backoff_srcu);
err = intel_gpu_reset(i915, ALL_ENGINES); err = intel_gpu_reset(i915, ALL_ENGINES);
for (i = 0; err && i < RESET_MAX_RETRIES; i++) { for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
msleep(10 * (i + 1)); msleep(10 * (i + 1));
...@@ -1140,6 +1137,9 @@ static void i915_reset_device(struct drm_i915_private *i915, ...@@ -1140,6 +1137,9 @@ static void i915_reset_device(struct drm_i915_private *i915,
i915_wedge_on_timeout(&w, i915, 5 * HZ) { i915_wedge_on_timeout(&w, i915, 5 * HZ) {
intel_prepare_reset(i915); intel_prepare_reset(i915);
/* Flush everyone using a resource about to be clobbered */
synchronize_srcu(&error->reset_backoff_srcu);
mutex_lock(&error->wedge_mutex); mutex_lock(&error->wedge_mutex);
i915_reset(i915, engine_mask, reason); i915_reset(i915, engine_mask, reason);
mutex_unlock(&error->wedge_mutex); mutex_unlock(&error->wedge_mutex);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment