Commit 3f04bdce authored by Michał Winiarski's avatar Michał Winiarski Committed by Chris Wilson

drm/i915: Reboot CI if we get wedged during driver init

Getting wedged device on driver init is pretty much unrecoverable.
Since we're running various scenarios that may potentially hit this in
CI (module reload / selftests / hotunplug), and if it happens, it means
that we can't trust any subsequent CI results, we should just apply the
taint to let the CI know that it should reboot (CI checks taint between
test runs).

v2: Comment that WEDGED_ON_INIT is non-recoverable, distinguish
    WEDGED_ON_INIT from WEDGED_ON_FINI (Chris)
v3: Appease checkpatch, fixup search-replace logic expression mindbomb
    in assert (Chris)
Signed-off-by: default avatarMichał Winiarski <michal.winiarski@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Petri Latvala <petri.latvala@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20200706144107.204821-1-michal@hardline.pl
parent d3913019
...@@ -201,7 +201,7 @@ void intel_engines_driver_register(struct drm_i915_private *i915) ...@@ -201,7 +201,7 @@ void intel_engines_driver_register(struct drm_i915_private *i915)
uabi_node); uabi_node);
char old[sizeof(engine->name)]; char old[sizeof(engine->name)];
if (intel_gt_has_init_error(engine->gt)) if (intel_gt_has_unrecoverable_error(engine->gt))
continue; /* ignore incomplete engines */ continue; /* ignore incomplete engines */
GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes)); GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
......
...@@ -510,7 +510,7 @@ static int __engines_verify_workarounds(struct intel_gt *gt) ...@@ -510,7 +510,7 @@ static int __engines_verify_workarounds(struct intel_gt *gt)
static void __intel_gt_disable(struct intel_gt *gt) static void __intel_gt_disable(struct intel_gt *gt)
{ {
intel_gt_set_wedged_on_init(gt); intel_gt_set_wedged_on_fini(gt);
intel_gt_suspend_prepare(gt); intel_gt_suspend_prepare(gt);
intel_gt_suspend_late(gt); intel_gt_suspend_late(gt);
......
...@@ -58,14 +58,18 @@ static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt, ...@@ -58,14 +58,18 @@ static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt,
return i915_ggtt_offset(gt->scratch) + field; return i915_ggtt_offset(gt->scratch) + field;
} }
static inline bool intel_gt_is_wedged(const struct intel_gt *gt) static inline bool intel_gt_has_unrecoverable_error(const struct intel_gt *gt)
{ {
return __intel_reset_failed(&gt->reset); return test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags) ||
test_bit(I915_WEDGED_ON_FINI, &gt->reset.flags);
} }
static inline bool intel_gt_has_init_error(const struct intel_gt *gt) static inline bool intel_gt_is_wedged(const struct intel_gt *gt)
{ {
return test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags); GEM_BUG_ON(intel_gt_has_unrecoverable_error(gt) &&
!test_bit(I915_WEDGED, &gt->reset.flags));
return unlikely(test_bit(I915_WEDGED, &gt->reset.flags));
} }
#endif /* __INTEL_GT_H__ */ #endif /* __INTEL_GT_H__ */
...@@ -188,7 +188,7 @@ int intel_gt_resume(struct intel_gt *gt) ...@@ -188,7 +188,7 @@ int intel_gt_resume(struct intel_gt *gt)
enum intel_engine_id id; enum intel_engine_id id;
int err; int err;
err = intel_gt_has_init_error(gt); err = intel_gt_has_unrecoverable_error(gt);
if (err) if (err)
return err; return err;
......
...@@ -880,7 +880,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) ...@@ -880,7 +880,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
return true; return true;
/* Never fully initialised, recovery impossible */ /* Never fully initialised, recovery impossible */
if (test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags)) if (intel_gt_has_unrecoverable_error(gt))
return false; return false;
GT_TRACE(gt, "start\n"); GT_TRACE(gt, "start\n");
...@@ -1342,7 +1342,7 @@ int intel_gt_terminally_wedged(struct intel_gt *gt) ...@@ -1342,7 +1342,7 @@ int intel_gt_terminally_wedged(struct intel_gt *gt)
if (!intel_gt_is_wedged(gt)) if (!intel_gt_is_wedged(gt))
return 0; return 0;
if (intel_gt_has_init_error(gt)) if (intel_gt_has_unrecoverable_error(gt))
return -EIO; return -EIO;
/* Reset still in progress? Maybe we will recover? */ /* Reset still in progress? Maybe we will recover? */
...@@ -1360,6 +1360,15 @@ void intel_gt_set_wedged_on_init(struct intel_gt *gt) ...@@ -1360,6 +1360,15 @@ void intel_gt_set_wedged_on_init(struct intel_gt *gt)
I915_WEDGED_ON_INIT); I915_WEDGED_ON_INIT);
intel_gt_set_wedged(gt); intel_gt_set_wedged(gt);
set_bit(I915_WEDGED_ON_INIT, &gt->reset.flags); set_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
/* Wedged on init is non-recoverable */
add_taint_for_CI(TAINT_WARN);
}
void intel_gt_set_wedged_on_fini(struct intel_gt *gt)
{
intel_gt_set_wedged(gt);
set_bit(I915_WEDGED_ON_FINI, &gt->reset.flags);
} }
void intel_gt_init_reset(struct intel_gt *gt) void intel_gt_init_reset(struct intel_gt *gt)
......
...@@ -47,8 +47,10 @@ int intel_gt_terminally_wedged(struct intel_gt *gt); ...@@ -47,8 +47,10 @@ int intel_gt_terminally_wedged(struct intel_gt *gt);
/* /*
* There's no unset_wedged_on_init paired with this one. * There's no unset_wedged_on_init paired with this one.
* Once we're wedged on init, there's no going back. * Once we're wedged on init, there's no going back.
* Same thing for unset_wedged_on_fini.
*/ */
void intel_gt_set_wedged_on_init(struct intel_gt *gt); void intel_gt_set_wedged_on_init(struct intel_gt *gt);
void intel_gt_set_wedged_on_fini(struct intel_gt *gt);
int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask); int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask);
...@@ -71,14 +73,6 @@ void __intel_fini_wedge(struct intel_wedge_me *w); ...@@ -71,14 +73,6 @@ void __intel_fini_wedge(struct intel_wedge_me *w);
(W)->gt; \ (W)->gt; \
__intel_fini_wedge((W))) __intel_fini_wedge((W)))
static inline bool __intel_reset_failed(const struct intel_reset *reset)
{
GEM_BUG_ON(test_bit(I915_WEDGED_ON_INIT, &reset->flags) ?
!test_bit(I915_WEDGED, &reset->flags) : false);
return unlikely(test_bit(I915_WEDGED, &reset->flags));
}
bool intel_has_gpu_reset(const struct intel_gt *gt); bool intel_has_gpu_reset(const struct intel_gt *gt);
bool intel_has_reset_engine(const struct intel_gt *gt); bool intel_has_reset_engine(const struct intel_gt *gt);
......
...@@ -34,12 +34,17 @@ struct intel_reset { ...@@ -34,12 +34,17 @@ struct intel_reset {
* longer use the GPU - similar to #I915_WEDGED bit. The difference in * longer use the GPU - similar to #I915_WEDGED bit. The difference in
* in the way we're handling "forced" unwedged (e.g. through debugfs), * in the way we're handling "forced" unwedged (e.g. through debugfs),
* which is not allowed in case we failed to initialize. * which is not allowed in case we failed to initialize.
*
* #I915_WEDGED_ON_FINI - Similar to #I915_WEDGED_ON_INIT, except we
* use it to mark that the GPU is no longer available (and prevent
* users from using it).
*/ */
unsigned long flags; unsigned long flags;
#define I915_RESET_BACKOFF 0 #define I915_RESET_BACKOFF 0
#define I915_RESET_MODESET 1 #define I915_RESET_MODESET 1
#define I915_RESET_ENGINE 2 #define I915_RESET_ENGINE 2
#define I915_WEDGED_ON_INIT (BITS_PER_LONG - 2) #define I915_WEDGED_ON_INIT (BITS_PER_LONG - 3)
#define I915_WEDGED_ON_FINI (BITS_PER_LONG - 2)
#define I915_WEDGED (BITS_PER_LONG - 1) #define I915_WEDGED (BITS_PER_LONG - 1)
struct mutex mutex; /* serialises wedging/unwedging */ struct mutex mutex; /* serialises wedging/unwedging */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment