Commit fde93886 authored by Tvrtko Ursulin's avatar Tvrtko Ursulin

drm/i915/selftests: Verify context workarounds

Test context workarounds have been correctly applied in newly created
contexts.

To accomplish this the existing engine_wa_list_verify helper is extended
to take in a context from which reading of the workaround list will be
done.

Context workaround verification is done from the existing subtests, which
have been renamed to reflect they are no longer only about GT and engine
workarounds.

v2:
 * Test after resets and refactor to use intel_context more. (Chris)

v3:
 * Use ce->engine->i915 instead of ce->gem_context->i915. (Chris)
 * gem_engine_iter.idx is engine->id + 1. (Chris)

v4:
 * Make local function static.
Signed-off-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190520142546.12493-1-tvrtko.ursulin@linux.intel.com
parent a88b6e4c
...@@ -196,10 +196,9 @@ ignore_wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, u32 val) ...@@ -196,10 +196,9 @@ ignore_wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, u32 val)
#define WA_SET_FIELD_MASKED(addr, mask, value) \ #define WA_SET_FIELD_MASKED(addr, mask, value) \
wa_write_masked_or(wal, (addr), (mask), _MASKED_FIELD((mask), (value))) wa_write_masked_or(wal, (addr), (mask), _MASKED_FIELD((mask), (value)))
static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine) static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{ {
struct i915_wa_list *wal = &engine->ctx_wa_list;
WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
/* WaDisableAsyncFlipPerfMode:bdw,chv */ /* WaDisableAsyncFlipPerfMode:bdw,chv */
...@@ -245,12 +244,12 @@ static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine) ...@@ -245,12 +244,12 @@ static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine)
GEN6_WIZ_HASHING_16x4); GEN6_WIZ_HASHING_16x4);
} }
static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine) static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{ {
struct drm_i915_private *i915 = engine->i915; struct drm_i915_private *i915 = engine->i915;
struct i915_wa_list *wal = &engine->ctx_wa_list;
gen8_ctx_workarounds_init(engine); gen8_ctx_workarounds_init(engine, wal);
/* WaDisableThreadStallDopClockGating:bdw (pre-production) */ /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
...@@ -273,11 +272,10 @@ static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine) ...@@ -273,11 +272,10 @@ static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine)
(IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
} }
static void chv_ctx_workarounds_init(struct intel_engine_cs *engine) static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{ {
struct i915_wa_list *wal = &engine->ctx_wa_list; gen8_ctx_workarounds_init(engine, wal);
gen8_ctx_workarounds_init(engine);
/* WaDisableThreadStallDopClockGating:chv */ /* WaDisableThreadStallDopClockGating:chv */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
...@@ -286,10 +284,10 @@ static void chv_ctx_workarounds_init(struct intel_engine_cs *engine) ...@@ -286,10 +284,10 @@ static void chv_ctx_workarounds_init(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X); WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
} }
static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine) static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{ {
struct drm_i915_private *i915 = engine->i915; struct drm_i915_private *i915 = engine->i915;
struct i915_wa_list *wal = &engine->ctx_wa_list;
if (HAS_LLC(i915)) { if (HAS_LLC(i915)) {
/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
...@@ -384,10 +382,10 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine) ...@@ -384,10 +382,10 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ); WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
} }
static void skl_tune_iz_hashing(struct intel_engine_cs *engine) static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{ {
struct drm_i915_private *i915 = engine->i915; struct drm_i915_private *i915 = engine->i915;
struct i915_wa_list *wal = &engine->ctx_wa_list;
u8 vals[3] = { 0, 0, 0 }; u8 vals[3] = { 0, 0, 0 };
unsigned int i; unsigned int i;
...@@ -424,17 +422,17 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine) ...@@ -424,17 +422,17 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine)
GEN9_IZ_HASHING(0, vals[0])); GEN9_IZ_HASHING(0, vals[0]));
} }
static void skl_ctx_workarounds_init(struct intel_engine_cs *engine) static void skl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{ {
gen9_ctx_workarounds_init(engine); gen9_ctx_workarounds_init(engine, wal);
skl_tune_iz_hashing(engine); skl_tune_iz_hashing(engine, wal);
} }
static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine) static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{ {
struct i915_wa_list *wal = &engine->ctx_wa_list; gen9_ctx_workarounds_init(engine, wal);
gen9_ctx_workarounds_init(engine);
/* WaDisableThreadStallDopClockGating:bxt */ /* WaDisableThreadStallDopClockGating:bxt */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
...@@ -445,12 +443,12 @@ static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine) ...@@ -445,12 +443,12 @@ static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine)
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
} }
static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine) static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{ {
struct drm_i915_private *i915 = engine->i915; struct drm_i915_private *i915 = engine->i915;
struct i915_wa_list *wal = &engine->ctx_wa_list;
gen9_ctx_workarounds_init(engine); gen9_ctx_workarounds_init(engine, wal);
/* WaToEnableHwFixForPushConstHWBug:kbl */ /* WaToEnableHwFixForPushConstHWBug:kbl */
if (IS_KBL_REVID(i915, KBL_REVID_C0, REVID_FOREVER)) if (IS_KBL_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
...@@ -462,22 +460,20 @@ static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine) ...@@ -462,22 +460,20 @@ static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine)
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
} }
static void glk_ctx_workarounds_init(struct intel_engine_cs *engine) static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{ {
struct i915_wa_list *wal = &engine->ctx_wa_list; gen9_ctx_workarounds_init(engine, wal);
gen9_ctx_workarounds_init(engine);
/* WaToEnableHwFixForPushConstHWBug:glk */ /* WaToEnableHwFixForPushConstHWBug:glk */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
} }
static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine) static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{ {
struct i915_wa_list *wal = &engine->ctx_wa_list; gen9_ctx_workarounds_init(engine, wal);
gen9_ctx_workarounds_init(engine);
/* WaToEnableHwFixForPushConstHWBug:cfl */ /* WaToEnableHwFixForPushConstHWBug:cfl */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
...@@ -488,10 +484,10 @@ static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine) ...@@ -488,10 +484,10 @@ static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine)
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
} }
static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine) static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{ {
struct drm_i915_private *i915 = engine->i915; struct drm_i915_private *i915 = engine->i915;
struct i915_wa_list *wal = &engine->ctx_wa_list;
/* WaForceContextSaveRestoreNonCoherent:cnl */ /* WaForceContextSaveRestoreNonCoherent:cnl */
WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0, WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
...@@ -528,10 +524,10 @@ static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine) ...@@ -528,10 +524,10 @@ static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT); WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
} }
static void icl_ctx_workarounds_init(struct intel_engine_cs *engine) static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{ {
struct drm_i915_private *i915 = engine->i915; struct drm_i915_private *i915 = engine->i915;
struct i915_wa_list *wal = &engine->ctx_wa_list;
/* Wa_1604370585:icl (pre-prod) /* Wa_1604370585:icl (pre-prod)
* Formerly known as WaPushConstantDereferenceHoldDisable * Formerly known as WaPushConstantDereferenceHoldDisable
...@@ -573,31 +569,36 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine) ...@@ -573,31 +569,36 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine)
GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL); GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
} }
void intel_engine_init_ctx_wa(struct intel_engine_cs *engine) static void
__intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
struct i915_wa_list *wal,
const char *name)
{ {
struct drm_i915_private *i915 = engine->i915; struct drm_i915_private *i915 = engine->i915;
struct i915_wa_list *wal = &engine->ctx_wa_list;
wa_init_start(wal, "context"); if (engine->class != RENDER_CLASS)
return;
wa_init_start(wal, name);
if (IS_GEN(i915, 11)) if (IS_GEN(i915, 11))
icl_ctx_workarounds_init(engine); icl_ctx_workarounds_init(engine, wal);
else if (IS_CANNONLAKE(i915)) else if (IS_CANNONLAKE(i915))
cnl_ctx_workarounds_init(engine); cnl_ctx_workarounds_init(engine, wal);
else if (IS_COFFEELAKE(i915)) else if (IS_COFFEELAKE(i915))
cfl_ctx_workarounds_init(engine); cfl_ctx_workarounds_init(engine, wal);
else if (IS_GEMINILAKE(i915)) else if (IS_GEMINILAKE(i915))
glk_ctx_workarounds_init(engine); glk_ctx_workarounds_init(engine, wal);
else if (IS_KABYLAKE(i915)) else if (IS_KABYLAKE(i915))
kbl_ctx_workarounds_init(engine); kbl_ctx_workarounds_init(engine, wal);
else if (IS_BROXTON(i915)) else if (IS_BROXTON(i915))
bxt_ctx_workarounds_init(engine); bxt_ctx_workarounds_init(engine, wal);
else if (IS_SKYLAKE(i915)) else if (IS_SKYLAKE(i915))
skl_ctx_workarounds_init(engine); skl_ctx_workarounds_init(engine, wal);
else if (IS_CHERRYVIEW(i915)) else if (IS_CHERRYVIEW(i915))
chv_ctx_workarounds_init(engine); chv_ctx_workarounds_init(engine, wal);
else if (IS_BROADWELL(i915)) else if (IS_BROADWELL(i915))
bdw_ctx_workarounds_init(engine); bdw_ctx_workarounds_init(engine, wal);
else if (INTEL_GEN(i915) < 8) else if (INTEL_GEN(i915) < 8)
return; return;
else else
...@@ -606,6 +607,11 @@ void intel_engine_init_ctx_wa(struct intel_engine_cs *engine) ...@@ -606,6 +607,11 @@ void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
wa_init_finish(wal); wa_init_finish(wal);
} }
void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
{
__intel_engine_init_ctx_wa(engine, &engine->ctx_wa_list, "context");
}
int intel_engine_emit_ctx_wa(struct i915_request *rq) int intel_engine_emit_ctx_wa(struct i915_request *rq)
{ {
struct i915_wa_list *wal = &rq->engine->ctx_wa_list; struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
...@@ -1338,7 +1344,7 @@ wa_list_srm(struct i915_request *rq, ...@@ -1338,7 +1344,7 @@ wa_list_srm(struct i915_request *rq,
return 0; return 0;
} }
static int engine_wa_list_verify(struct intel_engine_cs *engine, static int engine_wa_list_verify(struct intel_context *ce,
const struct i915_wa_list * const wal, const struct i915_wa_list * const wal,
const char *from) const char *from)
{ {
...@@ -1352,11 +1358,11 @@ static int engine_wa_list_verify(struct intel_engine_cs *engine, ...@@ -1352,11 +1358,11 @@ static int engine_wa_list_verify(struct intel_engine_cs *engine,
if (!wal->count) if (!wal->count)
return 0; return 0;
vma = create_scratch(&engine->i915->ggtt.vm, wal->count); vma = create_scratch(&ce->engine->i915->ggtt.vm, wal->count);
if (IS_ERR(vma)) if (IS_ERR(vma))
return PTR_ERR(vma); return PTR_ERR(vma);
rq = i915_request_create(engine->kernel_context); rq = intel_context_create_request(ce);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
err = PTR_ERR(rq); err = PTR_ERR(rq);
goto err_vma; goto err_vma;
...@@ -1394,7 +1400,9 @@ static int engine_wa_list_verify(struct intel_engine_cs *engine, ...@@ -1394,7 +1400,9 @@ static int engine_wa_list_verify(struct intel_engine_cs *engine,
int intel_engine_verify_workarounds(struct intel_engine_cs *engine, int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
const char *from) const char *from)
{ {
return engine_wa_list_verify(engine, &engine->wa_list, from); return engine_wa_list_verify(engine->kernel_context,
&engine->wa_list,
from);
} }
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
......
...@@ -22,12 +22,13 @@ static const struct wo_register { ...@@ -22,12 +22,13 @@ static const struct wo_register {
{ INTEL_GEMINILAKE, 0x731c } { INTEL_GEMINILAKE, 0x731c }
}; };
#define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 4) #define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 8)
struct wa_lists { struct wa_lists {
struct i915_wa_list gt_wa_list; struct i915_wa_list gt_wa_list;
struct { struct {
char name[REF_NAME_MAX]; char name[REF_NAME_MAX];
struct i915_wa_list wa_list; struct i915_wa_list wa_list;
struct i915_wa_list ctx_wa_list;
} engine[I915_NUM_ENGINES]; } engine[I915_NUM_ENGINES];
}; };
...@@ -52,6 +53,12 @@ reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists) ...@@ -52,6 +53,12 @@ reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
wa_init_start(wal, name); wa_init_start(wal, name);
engine_init_workarounds(engine, wal); engine_init_workarounds(engine, wal);
wa_init_finish(wal); wa_init_finish(wal);
snprintf(name, REF_NAME_MAX, "%s_CTX_REF", engine->name);
__intel_engine_init_ctx_wa(engine,
&lists->engine[id].ctx_wa_list,
name);
} }
} }
...@@ -1003,28 +1010,38 @@ static int live_isolated_whitelist(void *arg) ...@@ -1003,28 +1010,38 @@ static int live_isolated_whitelist(void *arg)
return err; return err;
} }
static bool verify_gt_engine_wa(struct drm_i915_private *i915, static bool
struct wa_lists *lists, const char *str) verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
const char *str)
{ {
struct intel_engine_cs *engine; struct drm_i915_private *i915 = ctx->i915;
enum intel_engine_id id; struct i915_gem_engines_iter it;
struct intel_context *ce;
bool ok = true; bool ok = true;
ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str); ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
for_each_engine(engine, i915, id) { for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
ok &= engine_wa_list_verify(engine, enum intel_engine_id id = ce->engine->id;
ok &= engine_wa_list_verify(ce,
&lists->engine[id].wa_list, &lists->engine[id].wa_list,
str) == 0; str) == 0;
ok &= engine_wa_list_verify(ce,
&lists->engine[id].ctx_wa_list,
str) == 0;
} }
i915_gem_context_unlock_engines(ctx);
return ok; return ok;
} }
static int static int
live_gpu_reset_gt_engine_workarounds(void *arg) live_gpu_reset_workarounds(void *arg)
{ {
struct drm_i915_private *i915 = arg; struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx;
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
struct wa_lists lists; struct wa_lists lists;
bool ok; bool ok;
...@@ -1032,6 +1049,10 @@ live_gpu_reset_gt_engine_workarounds(void *arg) ...@@ -1032,6 +1049,10 @@ live_gpu_reset_gt_engine_workarounds(void *arg)
if (!intel_has_gpu_reset(i915)) if (!intel_has_gpu_reset(i915))
return 0; return 0;
ctx = kernel_context(i915);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
pr_info("Verifying after GPU reset...\n"); pr_info("Verifying after GPU reset...\n");
igt_global_reset_lock(i915); igt_global_reset_lock(i915);
...@@ -1039,15 +1060,16 @@ live_gpu_reset_gt_engine_workarounds(void *arg) ...@@ -1039,15 +1060,16 @@ live_gpu_reset_gt_engine_workarounds(void *arg)
reference_lists_init(i915, &lists); reference_lists_init(i915, &lists);
ok = verify_gt_engine_wa(i915, &lists, "before reset"); ok = verify_wa_lists(ctx, &lists, "before reset");
if (!ok) if (!ok)
goto out; goto out;
i915_reset(i915, ALL_ENGINES, "live_workarounds"); i915_reset(i915, ALL_ENGINES, "live_workarounds");
ok = verify_gt_engine_wa(i915, &lists, "after reset"); ok = verify_wa_lists(ctx, &lists, "after reset");
out: out:
kernel_context_close(ctx);
reference_lists_fini(i915, &lists); reference_lists_fini(i915, &lists);
intel_runtime_pm_put(i915, wakeref); intel_runtime_pm_put(i915, wakeref);
igt_global_reset_unlock(i915); igt_global_reset_unlock(i915);
...@@ -1056,7 +1078,7 @@ live_gpu_reset_gt_engine_workarounds(void *arg) ...@@ -1056,7 +1078,7 @@ live_gpu_reset_gt_engine_workarounds(void *arg)
} }
static int static int
live_engine_reset_gt_engine_workarounds(void *arg) live_engine_reset_workarounds(void *arg)
{ {
struct drm_i915_private *i915 = arg; struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
...@@ -1085,7 +1107,7 @@ live_engine_reset_gt_engine_workarounds(void *arg) ...@@ -1085,7 +1107,7 @@ live_engine_reset_gt_engine_workarounds(void *arg)
pr_info("Verifying after %s reset...\n", engine->name); pr_info("Verifying after %s reset...\n", engine->name);
ok = verify_gt_engine_wa(i915, &lists, "before reset"); ok = verify_wa_lists(ctx, &lists, "before reset");
if (!ok) { if (!ok) {
ret = -ESRCH; ret = -ESRCH;
goto err; goto err;
...@@ -1093,7 +1115,7 @@ live_engine_reset_gt_engine_workarounds(void *arg) ...@@ -1093,7 +1115,7 @@ live_engine_reset_gt_engine_workarounds(void *arg)
i915_reset_engine(engine, "live_workarounds"); i915_reset_engine(engine, "live_workarounds");
ok = verify_gt_engine_wa(i915, &lists, "after idle reset"); ok = verify_wa_lists(ctx, &lists, "after idle reset");
if (!ok) { if (!ok) {
ret = -ESRCH; ret = -ESRCH;
goto err; goto err;
...@@ -1124,7 +1146,7 @@ live_engine_reset_gt_engine_workarounds(void *arg) ...@@ -1124,7 +1146,7 @@ live_engine_reset_gt_engine_workarounds(void *arg)
igt_spinner_end(&spin); igt_spinner_end(&spin);
igt_spinner_fini(&spin); igt_spinner_fini(&spin);
ok = verify_gt_engine_wa(i915, &lists, "after busy reset"); ok = verify_wa_lists(ctx, &lists, "after busy reset");
if (!ok) { if (!ok) {
ret = -ESRCH; ret = -ESRCH;
goto err; goto err;
...@@ -1148,8 +1170,8 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915) ...@@ -1148,8 +1170,8 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_dirty_whitelist), SUBTEST(live_dirty_whitelist),
SUBTEST(live_reset_whitelist), SUBTEST(live_reset_whitelist),
SUBTEST(live_isolated_whitelist), SUBTEST(live_isolated_whitelist),
SUBTEST(live_gpu_reset_gt_engine_workarounds), SUBTEST(live_gpu_reset_workarounds),
SUBTEST(live_engine_reset_gt_engine_workarounds), SUBTEST(live_engine_reset_workarounds),
}; };
int err; int err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment