Commit f663b0ca authored by Daniele Ceraolo Spurio's avatar Daniele Ceraolo Spurio Committed by Tvrtko Ursulin

drm/i915/selftests: recreate WA lists inside the selftest

By using the wa lists inside the live driver structures, we won't
catch issues where those are incorrectly setup or corrupted.
To cover this gap, update the workaround framework to allow saving the
wa lists to independent structures and use them in the selftests.
Suggested-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190110013232.8972-1-daniele.ceraolospurio@intel.com
[tursulin: Fixup checkpatch whitespace complaint in memset.]
parent d22ba0cb
......@@ -639,10 +639,9 @@ wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
wa_write_masked_or(wal, reg, val, val);
}
static void gen9_gt_workarounds_init(struct drm_i915_private *i915)
static void
gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
struct i915_wa_list *wal = &i915->gt_wa_list;
/* WaDisableKillLogic:bxt,skl,kbl */
if (!IS_COFFEELAKE(i915))
wa_write_or(wal,
......@@ -666,11 +665,10 @@ static void gen9_gt_workarounds_init(struct drm_i915_private *i915)
BDW_DISABLE_HDC_INVALIDATION);
}
static void skl_gt_workarounds_init(struct drm_i915_private *i915)
static void
skl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
struct i915_wa_list *wal = &i915->gt_wa_list;
gen9_gt_workarounds_init(i915);
gen9_gt_workarounds_init(i915, wal);
/* WaDisableGafsUnitClkGating:skl */
wa_write_or(wal,
......@@ -684,11 +682,10 @@ static void skl_gt_workarounds_init(struct drm_i915_private *i915)
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
static void bxt_gt_workarounds_init(struct drm_i915_private *i915)
static void
bxt_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
struct i915_wa_list *wal = &i915->gt_wa_list;
gen9_gt_workarounds_init(i915);
gen9_gt_workarounds_init(i915, wal);
/* WaInPlaceDecompressionHang:bxt */
wa_write_or(wal,
......@@ -696,11 +693,10 @@ static void bxt_gt_workarounds_init(struct drm_i915_private *i915)
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
static void kbl_gt_workarounds_init(struct drm_i915_private *i915)
static void
kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
struct i915_wa_list *wal = &i915->gt_wa_list;
gen9_gt_workarounds_init(i915);
gen9_gt_workarounds_init(i915, wal);
/* WaDisableDynamicCreditSharing:kbl */
if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
......@@ -719,16 +715,16 @@ static void kbl_gt_workarounds_init(struct drm_i915_private *i915)
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
static void glk_gt_workarounds_init(struct drm_i915_private *i915)
static void
glk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
gen9_gt_workarounds_init(i915);
gen9_gt_workarounds_init(i915, wal);
}
static void cfl_gt_workarounds_init(struct drm_i915_private *i915)
static void
cfl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
struct i915_wa_list *wal = &i915->gt_wa_list;
gen9_gt_workarounds_init(i915);
gen9_gt_workarounds_init(i915, wal);
/* WaDisableGafsUnitClkGating:cfl */
wa_write_or(wal,
......@@ -741,10 +737,10 @@ static void cfl_gt_workarounds_init(struct drm_i915_private *i915)
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
static void wa_init_mcr(struct drm_i915_private *dev_priv)
static void
wa_init_mcr(struct drm_i915_private *dev_priv, struct i915_wa_list *wal)
{
const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
struct i915_wa_list *wal = &dev_priv->gt_wa_list;
u32 mcr_slice_subslice_mask;
/*
......@@ -804,11 +800,10 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
intel_calculate_mcr_s_ss_select(dev_priv));
}
static void cnl_gt_workarounds_init(struct drm_i915_private *i915)
static void
cnl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
struct i915_wa_list *wal = &i915->gt_wa_list;
wa_init_mcr(i915);
wa_init_mcr(i915, wal);
/* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
......@@ -822,11 +817,10 @@ static void cnl_gt_workarounds_init(struct drm_i915_private *i915)
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
static void icl_gt_workarounds_init(struct drm_i915_private *i915)
static void
icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
struct i915_wa_list *wal = &i915->gt_wa_list;
wa_init_mcr(i915);
wa_init_mcr(i915, wal);
/* WaInPlaceDecompressionHang:icl */
wa_write_or(wal,
......@@ -879,12 +873,9 @@ static void icl_gt_workarounds_init(struct drm_i915_private *i915)
GAMT_CHKN_DISABLE_L3_COH_PIPE);
}
void intel_gt_init_workarounds(struct drm_i915_private *i915)
static void
gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
struct i915_wa_list *wal = &i915->gt_wa_list;
wa_init_start(wal, "GT");
if (INTEL_GEN(i915) < 8)
return;
else if (IS_BROADWELL(i915))
......@@ -892,22 +883,29 @@ void intel_gt_init_workarounds(struct drm_i915_private *i915)
else if (IS_CHERRYVIEW(i915))
return;
else if (IS_SKYLAKE(i915))
skl_gt_workarounds_init(i915);
skl_gt_workarounds_init(i915, wal);
else if (IS_BROXTON(i915))
bxt_gt_workarounds_init(i915);
bxt_gt_workarounds_init(i915, wal);
else if (IS_KABYLAKE(i915))
kbl_gt_workarounds_init(i915);
kbl_gt_workarounds_init(i915, wal);
else if (IS_GEMINILAKE(i915))
glk_gt_workarounds_init(i915);
glk_gt_workarounds_init(i915, wal);
else if (IS_COFFEELAKE(i915))
cfl_gt_workarounds_init(i915);
cfl_gt_workarounds_init(i915, wal);
else if (IS_CANNONLAKE(i915))
cnl_gt_workarounds_init(i915);
cnl_gt_workarounds_init(i915, wal);
else if (IS_ICELAKE(i915))
icl_gt_workarounds_init(i915);
icl_gt_workarounds_init(i915, wal);
else
MISSING_CASE(INTEL_GEN(i915));
}
void intel_gt_init_workarounds(struct drm_i915_private *i915)
{
struct i915_wa_list *wal = &i915->gt_wa_list;
wa_init_start(wal, "GT");
gt_init_workarounds(i915, wal);
wa_init_finish(wal);
}
......@@ -1126,10 +1124,10 @@ void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
i915_mmio_reg_offset(RING_NOPID(base)));
}
static void rcs_engine_wa_init(struct intel_engine_cs *engine)
static void
rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
struct i915_wa_list *wal = &engine->wa_list;
if (IS_ICELAKE(i915)) {
/* This is not an Wa. Enable for better image quality */
......@@ -1233,10 +1231,10 @@ static void rcs_engine_wa_init(struct intel_engine_cs *engine)
}
}
static void xcs_engine_wa_init(struct intel_engine_cs *engine)
static void
xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
struct i915_wa_list *wal = &engine->wa_list;
/* WaKBLVECSSemaphoreWaitPoll:kbl */
if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
......@@ -1246,6 +1244,18 @@ static void xcs_engine_wa_init(struct intel_engine_cs *engine)
}
}
static void
engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8))
return;
if (engine->id == RCS)
rcs_engine_wa_init(engine, wal);
else
xcs_engine_wa_init(engine, wal);
}
void intel_engine_init_workarounds(struct intel_engine_cs *engine)
{
struct i915_wa_list *wal = &engine->wa_list;
......@@ -1254,12 +1264,7 @@ void intel_engine_init_workarounds(struct intel_engine_cs *engine)
return;
wa_init_start(wal, engine->name);
if (engine->id == RCS)
rcs_engine_wa_init(engine);
else
xcs_engine_wa_init(engine);
engine_init_workarounds(engine, wal);
wa_init_finish(wal);
}
......@@ -1269,11 +1274,5 @@ void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
static bool intel_engine_verify_workarounds(struct intel_engine_cs *engine,
const char *from)
{
return wa_list_verify(engine->i915, &engine->wa_list, from);
}
#include "selftests/intel_workarounds.c"
#endif
......@@ -12,6 +12,51 @@
#include "igt_wedge_me.h"
#include "mock_context.h"
#define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 4)
struct wa_lists {
struct i915_wa_list gt_wa_list;
struct {
char name[REF_NAME_MAX];
struct i915_wa_list wa_list;
} engine[I915_NUM_ENGINES];
};
static void
reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
memset(lists, 0, sizeof(*lists));
wa_init_start(&lists->gt_wa_list, "GT_REF");
gt_init_workarounds(i915, &lists->gt_wa_list);
wa_init_finish(&lists->gt_wa_list);
for_each_engine(engine, i915, id) {
struct i915_wa_list *wal = &lists->engine[id].wa_list;
char *name = lists->engine[id].name;
snprintf(name, REF_NAME_MAX, "%s_REF", engine->name);
wa_init_start(wal, name);
engine_init_workarounds(engine, wal);
wa_init_finish(wal);
}
}
static void
reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, i915, id)
intel_wa_list_free(&lists->engine[id].wa_list);
intel_wa_list_free(&lists->gt_wa_list);
}
static struct drm_i915_gem_object *
read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
{
......@@ -326,15 +371,17 @@ static int live_reset_whitelist(void *arg)
return err;
}
static bool verify_gt_engine_wa(struct drm_i915_private *i915, const char *str)
static bool verify_gt_engine_wa(struct drm_i915_private *i915,
struct wa_lists *lists, const char *str)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
bool ok = true;
ok &= intel_gt_verify_workarounds(i915, str);
ok &= wa_list_verify(i915, &lists->gt_wa_list, str);
for_each_engine(engine, i915, id)
ok &= intel_engine_verify_workarounds(engine, str);
ok &= wa_list_verify(i915, &lists->engine[id].wa_list, str);
return ok;
}
......@@ -344,6 +391,7 @@ live_gpu_reset_gt_engine_workarounds(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gpu_error *error = &i915->gpu_error;
struct wa_lists lists;
bool ok;
if (!intel_has_gpu_reset(i915))
......@@ -353,17 +401,19 @@ live_gpu_reset_gt_engine_workarounds(void *arg)
igt_global_reset_lock(i915);
intel_runtime_pm_get(i915);
reference_lists_init(i915, &lists);
ok = verify_gt_engine_wa(i915, "before reset");
ok = verify_gt_engine_wa(i915, &lists, "before reset");
if (!ok)
goto out;
set_bit(I915_RESET_HANDOFF, &error->flags);
i915_reset(i915, ALL_ENGINES, "live_workarounds");
ok = verify_gt_engine_wa(i915, "after reset");
ok = verify_gt_engine_wa(i915, &lists, "after reset");
out:
reference_lists_fini(i915, &lists);
intel_runtime_pm_put(i915);
igt_global_reset_unlock(i915);
......@@ -379,6 +429,7 @@ live_engine_reset_gt_engine_workarounds(void *arg)
struct igt_spinner spin;
enum intel_engine_id id;
struct i915_request *rq;
struct wa_lists lists;
int ret = 0;
if (!intel_has_reset_engine(i915))
......@@ -390,13 +441,14 @@ live_engine_reset_gt_engine_workarounds(void *arg)
igt_global_reset_lock(i915);
intel_runtime_pm_get(i915);
reference_lists_init(i915, &lists);
for_each_engine(engine, i915, id) {
bool ok;
pr_info("Verifying after %s reset...\n", engine->name);
ok = verify_gt_engine_wa(i915, "before reset");
ok = verify_gt_engine_wa(i915, &lists, "before reset");
if (!ok) {
ret = -ESRCH;
goto err;
......@@ -404,7 +456,7 @@ live_engine_reset_gt_engine_workarounds(void *arg)
i915_reset_engine(engine, "live_workarounds");
ok = verify_gt_engine_wa(i915, "after idle reset");
ok = verify_gt_engine_wa(i915, &lists, "after idle reset");
if (!ok) {
ret = -ESRCH;
goto err;
......@@ -435,7 +487,7 @@ live_engine_reset_gt_engine_workarounds(void *arg)
igt_spinner_end(&spin);
igt_spinner_fini(&spin);
ok = verify_gt_engine_wa(i915, "after busy reset");
ok = verify_gt_engine_wa(i915, &lists, "after busy reset");
if (!ok) {
ret = -ESRCH;
goto err;
......@@ -443,6 +495,7 @@ live_engine_reset_gt_engine_workarounds(void *arg)
}
err:
reference_lists_fini(i915, &lists);
intel_runtime_pm_put(i915);
igt_global_reset_unlock(i915);
kernel_context_close(ctx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment