Commit 0de50e40 authored by Chris Wilson's avatar Chris Wilson Committed by Jani Nikula

drm/i915: Lift intel_engines_resume() to callers

Since the reset path wants to recover the engines itself, it only wants
to reinitialise the hardware using i915_gem_init_hw(). Pull the call to
intel_engines_resume() to the module init/resume path so we can avoid it
during reset.

Fixes: 79ffac85 ("drm/i915: Invert the GEM wakeref hierarchy")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190626154549.10066-3-chris@chris-wilson.co.uk
(cherry picked from commit 092be382)
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
parent cf4a4590
...@@ -253,14 +253,15 @@ void i915_gem_resume(struct drm_i915_private *i915) ...@@ -253,14 +253,15 @@ void i915_gem_resume(struct drm_i915_private *i915)
i915_gem_restore_gtt_mappings(i915); i915_gem_restore_gtt_mappings(i915);
i915_gem_restore_fences(i915); i915_gem_restore_fences(i915);
if (i915_gem_init_hw(i915))
goto err_wedged;
/* /*
* As we didn't flush the kernel context before suspend, we cannot * As we didn't flush the kernel context before suspend, we cannot
* guarantee that the context image is complete. So let's just reset * guarantee that the context image is complete. So let's just reset
* it and start again. * it and start again.
*/ */
intel_gt_resume(i915); if (intel_gt_resume(i915))
if (i915_gem_init_hw(i915))
goto err_wedged; goto err_wedged;
intel_uc_resume(i915); intel_uc_resume(i915);
......
...@@ -142,27 +142,3 @@ void intel_engine_init__pm(struct intel_engine_cs *engine) ...@@ -142,27 +142,3 @@ void intel_engine_init__pm(struct intel_engine_cs *engine)
{ {
intel_wakeref_init(&engine->wakeref); intel_wakeref_init(&engine->wakeref);
} }
int intel_engines_resume(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = 0;
intel_gt_pm_get(i915);
for_each_engine(engine, i915, id) {
intel_engine_pm_get(engine);
engine->serial++; /* kernel context lost */
err = engine->resume(engine);
intel_engine_pm_put(engine);
if (err) {
dev_err(i915->drm.dev,
"Failed to restart %s (%d)\n",
engine->name, err);
break;
}
}
intel_gt_pm_put(i915);
return err;
}
...@@ -17,6 +17,4 @@ void intel_engine_park(struct intel_engine_cs *engine); ...@@ -17,6 +17,4 @@ void intel_engine_park(struct intel_engine_cs *engine);
void intel_engine_init__pm(struct intel_engine_cs *engine); void intel_engine_init__pm(struct intel_engine_cs *engine);
int intel_engines_resume(struct drm_i915_private *i915);
#endif /* INTEL_ENGINE_PM_H */ #endif /* INTEL_ENGINE_PM_H */
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
*/ */
#include "i915_drv.h" #include "i915_drv.h"
#include "intel_engine_pm.h"
#include "intel_gt_pm.h" #include "intel_gt_pm.h"
#include "intel_pm.h" #include "intel_pm.h"
#include "intel_wakeref.h" #include "intel_wakeref.h"
...@@ -118,10 +119,11 @@ void intel_gt_sanitize(struct drm_i915_private *i915, bool force) ...@@ -118,10 +119,11 @@ void intel_gt_sanitize(struct drm_i915_private *i915, bool force)
intel_engine_reset(engine, false); intel_engine_reset(engine, false);
} }
void intel_gt_resume(struct drm_i915_private *i915) int intel_gt_resume(struct drm_i915_private *i915)
{ {
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id; enum intel_engine_id id;
int err = 0;
/* /*
* After resume, we may need to poke into the pinned kernel * After resume, we may need to poke into the pinned kernel
...@@ -129,9 +131,12 @@ void intel_gt_resume(struct drm_i915_private *i915) ...@@ -129,9 +131,12 @@ void intel_gt_resume(struct drm_i915_private *i915)
* Only the kernel contexts should remain pinned over suspend, * Only the kernel contexts should remain pinned over suspend,
* allowing us to fixup the user contexts on their first pin. * allowing us to fixup the user contexts on their first pin.
*/ */
intel_gt_pm_get(i915);
for_each_engine(engine, i915, id) { for_each_engine(engine, i915, id) {
struct intel_context *ce; struct intel_context *ce;
intel_engine_pm_get(engine);
ce = engine->kernel_context; ce = engine->kernel_context;
if (ce) if (ce)
ce->ops->reset(ce); ce->ops->reset(ce);
...@@ -139,5 +144,19 @@ void intel_gt_resume(struct drm_i915_private *i915) ...@@ -139,5 +144,19 @@ void intel_gt_resume(struct drm_i915_private *i915)
ce = engine->preempt_context; ce = engine->preempt_context;
if (ce) if (ce)
ce->ops->reset(ce); ce->ops->reset(ce);
engine->serial++; /* kernel context lost */
err = engine->resume(engine);
intel_engine_pm_put(engine);
if (err) {
dev_err(i915->drm.dev,
"Failed to restart %s (%d)\n",
engine->name, err);
break;
} }
}
intel_gt_pm_put(i915);
return err;
} }
...@@ -22,6 +22,6 @@ void intel_gt_pm_put(struct drm_i915_private *i915); ...@@ -22,6 +22,6 @@ void intel_gt_pm_put(struct drm_i915_private *i915);
void intel_gt_pm_init(struct drm_i915_private *i915); void intel_gt_pm_init(struct drm_i915_private *i915);
void intel_gt_sanitize(struct drm_i915_private *i915, bool force); void intel_gt_sanitize(struct drm_i915_private *i915, bool force);
void intel_gt_resume(struct drm_i915_private *i915); int intel_gt_resume(struct drm_i915_private *i915);
#endif /* INTEL_GT_PM_H */ #endif /* INTEL_GT_PM_H */
...@@ -951,6 +951,21 @@ static int do_reset(struct drm_i915_private *i915, ...@@ -951,6 +951,21 @@ static int do_reset(struct drm_i915_private *i915,
return gt_reset(i915, stalled_mask); return gt_reset(i915, stalled_mask);
} }
static int resume(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
int ret;
for_each_engine(engine, i915, id) {
ret = engine->resume(engine);
if (ret)
return ret;
}
return 0;
}
/** /**
* i915_reset - reset chip after a hang * i915_reset - reset chip after a hang
* @i915: #drm_i915_private to reset * @i915: #drm_i915_private to reset
...@@ -1024,9 +1039,13 @@ void i915_reset(struct drm_i915_private *i915, ...@@ -1024,9 +1039,13 @@ void i915_reset(struct drm_i915_private *i915,
if (ret) { if (ret) {
DRM_ERROR("Failed to initialise HW following reset (%d)\n", DRM_ERROR("Failed to initialise HW following reset (%d)\n",
ret); ret);
goto error; goto taint;
} }
ret = resume(i915);
if (ret)
goto taint;
i915_queue_hangcheck(i915); i915_queue_hangcheck(i915);
finish: finish:
......
...@@ -46,7 +46,6 @@ ...@@ -46,7 +46,6 @@
#include "gem/i915_gem_ioctls.h" #include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_pm.h" #include "gem/i915_gem_pm.h"
#include "gem/i915_gemfs.h" #include "gem/i915_gemfs.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_gt_pm.h" #include "gt/intel_gt_pm.h"
#include "gt/intel_mocs.h" #include "gt/intel_mocs.h"
#include "gt/intel_reset.h" #include "gt/intel_reset.h"
...@@ -1307,21 +1306,13 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) ...@@ -1307,21 +1306,13 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
intel_mocs_init_l3cc_table(dev_priv); intel_mocs_init_l3cc_table(dev_priv);
/* Only when the HW is re-initialised, can we replay the requests */
ret = intel_engines_resume(dev_priv);
if (ret)
goto cleanup_uc;
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
intel_engines_set_scheduler_caps(dev_priv); intel_engines_set_scheduler_caps(dev_priv);
return 0; return 0;
cleanup_uc:
intel_uc_fini_hw(dev_priv);
out: out:
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
return ret; return ret;
} }
...@@ -1580,6 +1571,11 @@ int i915_gem_init(struct drm_i915_private *dev_priv) ...@@ -1580,6 +1571,11 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
if (ret) if (ret)
goto err_uc_init; goto err_uc_init;
/* Only when the HW is re-initialised, can we replay the requests */
ret = intel_gt_resume(dev_priv);
if (ret)
goto err_init_hw;
/* /*
* Despite its name intel_init_clock_gating applies both display * Despite its name intel_init_clock_gating applies both display
* clock gating workarounds; GT mmio workarounds and the occasional * clock gating workarounds; GT mmio workarounds and the occasional
...@@ -1593,20 +1589,20 @@ int i915_gem_init(struct drm_i915_private *dev_priv) ...@@ -1593,20 +1589,20 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
ret = intel_engines_verify_workarounds(dev_priv); ret = intel_engines_verify_workarounds(dev_priv);
if (ret) if (ret)
goto err_init_hw; goto err_gt;
ret = __intel_engines_record_defaults(dev_priv); ret = __intel_engines_record_defaults(dev_priv);
if (ret) if (ret)
goto err_init_hw; goto err_gt;
if (i915_inject_load_failure()) { if (i915_inject_load_failure()) {
ret = -ENODEV; ret = -ENODEV;
goto err_init_hw; goto err_gt;
} }
if (i915_inject_load_failure()) { if (i915_inject_load_failure()) {
ret = -EIO; ret = -EIO;
goto err_init_hw; goto err_gt;
} }
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
...@@ -1620,7 +1616,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) ...@@ -1620,7 +1616,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
* HW as irrevisibly wedged, but keep enough state around that the * HW as irrevisibly wedged, but keep enough state around that the
* driver doesn't explode during runtime. * driver doesn't explode during runtime.
*/ */
err_init_hw: err_gt:
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
i915_gem_set_wedged(dev_priv); i915_gem_set_wedged(dev_priv);
...@@ -1630,6 +1626,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) ...@@ -1630,6 +1626,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
i915_gem_drain_workqueue(dev_priv); i915_gem_drain_workqueue(dev_priv);
mutex_lock(&dev_priv->drm.struct_mutex); mutex_lock(&dev_priv->drm.struct_mutex);
err_init_hw:
intel_uc_fini_hw(dev_priv); intel_uc_fini_hw(dev_priv);
err_uc_init: err_uc_init:
intel_uc_fini(dev_priv); intel_uc_fini(dev_priv);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment