Commit f6470c9b authored by Michal Wajdeczko's avatar Michal Wajdeczko Committed by Chris Wilson

drm/i915/selftests: Split igt_atomic_reset testcase

Split igt_atomic_reset selftests into separate full & engines parts,
so we can move former to the dedicated reset selftests file.

While here change engines test to loop first over atomic phases and
then loop over available engines.
Signed-off-by: default avatarMichal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190522193203.23932-3-michal.wajdeczko@intel.com
parent 932309fb
......@@ -32,6 +32,7 @@
#include "selftests/igt_gem_utils.h"
#include "selftests/igt_reset.h"
#include "selftests/igt_wedge_me.h"
#include "selftests/igt_atomic.h"
#include "selftests/mock_context.h"
#include "selftests/mock_drm.h"
......@@ -1603,44 +1604,8 @@ static int igt_handle_error(void *arg)
return err;
}
static void __preempt_begin(void)
{
preempt_disable();
}
static void __preempt_end(void)
{
preempt_enable();
}
static void __softirq_begin(void)
{
local_bh_disable();
}
static void __softirq_end(void)
{
local_bh_enable();
}
static void __hardirq_begin(void)
{
local_irq_disable();
}
static void __hardirq_end(void)
{
local_irq_enable();
}
struct atomic_section {
const char *name;
void (*critical_section_begin)(void);
void (*critical_section_end)(void);
};
static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
const struct atomic_section *p,
const struct igt_atomic_section *p,
const char *mode)
{
struct tasklet_struct * const t = &engine->execlists.tasklet;
......@@ -1665,7 +1630,7 @@ static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
}
static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
const struct atomic_section *p)
const struct igt_atomic_section *p)
{
struct drm_i915_private *i915 = engine->i915;
struct i915_request *rq;
......@@ -1716,79 +1681,43 @@ static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
return err;
}
static void force_reset(struct drm_i915_private *i915)
static int igt_reset_engines_atomic(void *arg)
{
i915_gem_set_wedged(i915);
i915_reset(i915, 0, NULL);
}
static int igt_atomic_reset(void *arg)
{
static const struct atomic_section phases[] = {
{ "preempt", __preempt_begin, __preempt_end },
{ "softirq", __softirq_begin, __softirq_end },
{ "hardirq", __hardirq_begin, __hardirq_end },
{ }
};
struct drm_i915_private *i915 = arg;
intel_wakeref_t wakeref;
const typeof(*igt_atomic_phases) *p;
int err = 0;
/* Check that the resets are usable from atomic context */
/* Check that the engines resets are usable from atomic context */
if (!intel_has_reset_engine(i915))
return 0;
if (USES_GUC_SUBMISSION(i915))
return 0;
igt_global_reset_lock(i915);
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(i915);
/* Flush any requests before we get started and check basics */
force_reset(i915);
if (i915_reset_failed(i915))
goto unlock;
if (intel_has_gpu_reset(i915)) {
const typeof(*phases) *p;
for (p = phases; p->name; p++) {
GEM_TRACE("intel_gpu_reset under %s\n", p->name);
p->critical_section_begin();
err = intel_gpu_reset(i915, ALL_ENGINES);
p->critical_section_end();
if (err) {
pr_err("intel_gpu_reset failed under %s\n",
p->name);
goto out;
}
}
force_reset(i915);
}
if (USES_GUC_SUBMISSION(i915))
if (!igt_force_reset(i915))
goto unlock;
if (intel_has_reset_engine(i915)) {
for (p = igt_atomic_phases; p->name; p++) {
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, i915, id) {
const typeof(*phases) *p;
for (p = phases; p->name; p++) {
err = igt_atomic_reset_engine(engine, p);
if (err)
goto out;
}
}
}
out:
/* As we poke around the guts, do a full reset before continuing. */
force_reset(i915);
igt_force_reset(i915);
unlock:
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
igt_global_reset_unlock(i915);
......@@ -1804,13 +1733,13 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_reset_idle_engine),
SUBTEST(igt_reset_active_engine),
SUBTEST(igt_reset_engines),
SUBTEST(igt_reset_engines_atomic),
SUBTEST(igt_reset_queue),
SUBTEST(igt_reset_wait),
SUBTEST(igt_reset_evict_ggtt),
SUBTEST(igt_reset_evict_ppgtt),
SUBTEST(igt_reset_evict_fence),
SUBTEST(igt_handle_error),
SUBTEST(igt_atomic_reset),
};
intel_wakeref_t wakeref;
bool saved_hangcheck;
......
......@@ -5,6 +5,7 @@
#include "i915_selftest.h"
#include "selftests/igt_reset.h"
#include "selftests/igt_atomic.h"
static int igt_global_reset(void *arg)
{
......@@ -54,11 +55,50 @@ static int igt_wedged_reset(void *arg)
return i915_reset_failed(i915) ? -EIO : 0;
}
static int igt_atomic_reset(void *arg)
{
struct drm_i915_private *i915 = arg;
const typeof(*igt_atomic_phases) *p;
int err = 0;
/* Check that the resets are usable from atomic context */
igt_global_reset_lock(i915);
mutex_lock(&i915->drm.struct_mutex);
/* Flush any requests before we get started and check basics */
if (!igt_force_reset(i915))
goto unlock;
for (p = igt_atomic_phases; p->name; p++) {
GEM_TRACE("intel_gpu_reset under %s\n", p->name);
p->critical_section_begin();
err = intel_gpu_reset(i915, ALL_ENGINES);
p->critical_section_end();
if (err) {
pr_err("intel_gpu_reset failed under %s\n", p->name);
break;
}
}
/* As we poke around the guts, do a full reset before continuing. */
igt_force_reset(i915);
unlock:
mutex_unlock(&i915->drm.struct_mutex);
igt_global_reset_unlock(i915);
return err;
}
int intel_reset_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_global_reset), /* attempt to recover GPU first */
SUBTEST(igt_wedged_reset),
SUBTEST(igt_atomic_reset),
};
intel_wakeref_t wakeref;
int err = 0;
......
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2018 Intel Corporation
*/
#ifndef IGT_ATOMIC_H
#define IGT_ATOMIC_H
#include <linux/preempt.h>
#include <linux/bottom_half.h>
#include <linux/irqflags.h>
static void __preempt_begin(void)
{
preempt_disable();
}
static void __preempt_end(void)
{
preempt_enable();
}
static void __softirq_begin(void)
{
local_bh_disable();
}
static void __softirq_end(void)
{
local_bh_enable();
}
static void __hardirq_begin(void)
{
local_irq_disable();
}
static void __hardirq_end(void)
{
local_irq_enable();
}
struct igt_atomic_section {
const char *name;
void (*critical_section_begin)(void);
void (*critical_section_end)(void);
};
static const struct igt_atomic_section igt_atomic_phases[] = {
{ "preempt", __preempt_begin, __preempt_end },
{ "softirq", __softirq_begin, __softirq_end },
{ "hardirq", __hardirq_begin, __hardirq_end },
{ }
};
#endif /* IGT_ATOMIC_H */
......@@ -43,3 +43,11 @@ void igt_global_reset_unlock(struct drm_i915_private *i915)
clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
wake_up_all(&i915->gpu_error.reset_queue);
}
bool igt_force_reset(struct drm_i915_private *i915)
{
i915_gem_set_wedged(i915);
i915_reset(i915, 0, NULL);
return !i915_reset_failed(i915);
}
......@@ -11,5 +11,6 @@
void igt_global_reset_lock(struct drm_i915_private *i915);
void igt_global_reset_unlock(struct drm_i915_private *i915);
bool igt_force_reset(struct drm_i915_private *i915);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment