Commit bb3d4c9d authored by Chris Wilson's avatar Chris Wilson

drm/i915/selftests: Teach workarounds to take intel_gt as its argument

The workarounds selftests are hardware centric and so want to use the gt
as its target.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191016114902.24388-1-chris@chris-wilson.co.uk
parent 3b05c4f8
...@@ -58,7 +58,7 @@ static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin) ...@@ -58,7 +58,7 @@ static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
} }
static void static void
reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists) reference_lists_init(struct intel_gt *gt, struct wa_lists *lists)
{ {
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id; enum intel_engine_id id;
...@@ -66,10 +66,10 @@ reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists) ...@@ -66,10 +66,10 @@ reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
memset(lists, 0, sizeof(*lists)); memset(lists, 0, sizeof(*lists));
wa_init_start(&lists->gt_wa_list, "GT_REF", "global"); wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
gt_init_workarounds(i915, &lists->gt_wa_list); gt_init_workarounds(gt->i915, &lists->gt_wa_list);
wa_init_finish(&lists->gt_wa_list); wa_init_finish(&lists->gt_wa_list);
for_each_engine(engine, i915, id) { for_each_engine(engine, gt->i915, id) {
struct i915_wa_list *wal = &lists->engine[id].wa_list; struct i915_wa_list *wal = &lists->engine[id].wa_list;
wa_init_start(wal, "REF", engine->name); wa_init_start(wal, "REF", engine->name);
...@@ -83,12 +83,12 @@ reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists) ...@@ -83,12 +83,12 @@ reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
} }
static void static void
reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists) reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists)
{ {
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id; enum intel_engine_id id;
for_each_engine(engine, i915, id) for_each_engine(engine, gt->i915, id)
intel_wa_list_free(&lists->engine[id].wa_list); intel_wa_list_free(&lists->engine[id].wa_list);
intel_wa_list_free(&lists->gt_wa_list); intel_wa_list_free(&lists->gt_wa_list);
...@@ -215,10 +215,10 @@ static int check_whitelist(struct i915_gem_context *ctx, ...@@ -215,10 +215,10 @@ static int check_whitelist(struct i915_gem_context *ctx,
err = 0; err = 0;
i915_gem_object_lock(results); i915_gem_object_lock(results);
intel_wedge_on_timeout(&wedge, &ctx->i915->gt, HZ / 5) /* safety net! */ intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */
err = i915_gem_object_set_to_cpu_domain(results, false); err = i915_gem_object_set_to_cpu_domain(results, false);
i915_gem_object_unlock(results); i915_gem_object_unlock(results);
if (intel_gt_is_wedged(&ctx->i915->gt)) if (intel_gt_is_wedged(engine->gt))
err = -EIO; err = -EIO;
if (err) if (err)
goto out_put; goto out_put;
...@@ -605,7 +605,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx, ...@@ -605,7 +605,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
if (err) { if (err) {
pr_err("%s: Futzing %x timedout; cancelling test\n", pr_err("%s: Futzing %x timedout; cancelling test\n",
engine->name, reg); engine->name, reg);
intel_gt_set_wedged(&ctx->i915->gt); intel_gt_set_wedged(engine->gt);
goto out_batch; goto out_batch;
} }
...@@ -704,7 +704,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx, ...@@ -704,7 +704,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
static int live_dirty_whitelist(void *arg) static int live_dirty_whitelist(void *arg)
{ {
struct drm_i915_private *i915 = arg; struct intel_gt *gt = arg;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
enum intel_engine_id id; enum intel_engine_id id;
...@@ -713,20 +713,20 @@ static int live_dirty_whitelist(void *arg) ...@@ -713,20 +713,20 @@ static int live_dirty_whitelist(void *arg)
/* Can the user write to the whitelisted registers? */ /* Can the user write to the whitelisted registers? */
if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */ if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */
return 0; return 0;
file = mock_file(i915); file = mock_file(gt->i915);
if (IS_ERR(file)) if (IS_ERR(file))
return PTR_ERR(file); return PTR_ERR(file);
ctx = live_context(i915, file); ctx = live_context(gt->i915, file);
if (IS_ERR(ctx)) { if (IS_ERR(ctx)) {
err = PTR_ERR(ctx); err = PTR_ERR(ctx);
goto out_file; goto out_file;
} }
for_each_engine(engine, i915, id) { for_each_engine(engine, gt->i915, id) {
if (engine->whitelist.count == 0) if (engine->whitelist.count == 0)
continue; continue;
...@@ -736,24 +736,25 @@ static int live_dirty_whitelist(void *arg) ...@@ -736,24 +736,25 @@ static int live_dirty_whitelist(void *arg)
} }
out_file: out_file:
mock_file_free(i915, file); mock_file_free(gt->i915, file);
return err; return err;
} }
static int live_reset_whitelist(void *arg) static int live_reset_whitelist(void *arg)
{ {
struct drm_i915_private *i915 = arg; struct intel_gt *gt = arg;
struct intel_engine_cs *engine = i915->engine[RCS0]; struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = 0; int err = 0;
/* If we reset the gpu, we should not lose the RING_NONPRIV */ /* If we reset the gpu, we should not lose the RING_NONPRIV */
igt_global_reset_lock(gt);
if (!engine || engine->whitelist.count == 0) for_each_engine(engine, gt->i915, id) {
return 0; if (engine->whitelist.count == 0)
continue;
igt_global_reset_lock(&i915->gt);
if (intel_has_reset_engine(&i915->gt)) { if (intel_has_reset_engine(gt)) {
err = check_whitelist_across_reset(engine, err = check_whitelist_across_reset(engine,
do_engine_reset, do_engine_reset,
"engine"); "engine");
...@@ -761,16 +762,17 @@ static int live_reset_whitelist(void *arg) ...@@ -761,16 +762,17 @@ static int live_reset_whitelist(void *arg)
goto out; goto out;
} }
if (intel_has_gpu_reset(&i915->gt)) { if (intel_has_gpu_reset(gt)) {
err = check_whitelist_across_reset(engine, err = check_whitelist_across_reset(engine,
do_device_reset, do_device_reset,
"device"); "device");
if (err) if (err)
goto out; goto out;
} }
}
out: out:
igt_global_reset_unlock(&i915->gt); igt_global_reset_unlock(gt);
return err; return err;
} }
...@@ -996,7 +998,7 @@ check_whitelisted_registers(struct intel_engine_cs *engine, ...@@ -996,7 +998,7 @@ check_whitelisted_registers(struct intel_engine_cs *engine,
static int live_isolated_whitelist(void *arg) static int live_isolated_whitelist(void *arg)
{ {
struct drm_i915_private *i915 = arg; struct intel_gt *gt = arg;
struct { struct {
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
struct i915_vma *scratch[2]; struct i915_vma *scratch[2];
...@@ -1010,17 +1012,14 @@ static int live_isolated_whitelist(void *arg) ...@@ -1010,17 +1012,14 @@ static int live_isolated_whitelist(void *arg)
* invisible to a second context. * invisible to a second context.
*/ */
if (!intel_engines_has_context_isolation(i915)) if (!intel_engines_has_context_isolation(gt->i915))
return 0;
if (!i915->kernel_context->vm)
return 0; return 0;
for (i = 0; i < ARRAY_SIZE(client); i++) { for (i = 0; i < ARRAY_SIZE(client); i++) {
struct i915_address_space *vm; struct i915_address_space *vm;
struct i915_gem_context *c; struct i915_gem_context *c;
c = kernel_context(i915); c = kernel_context(gt->i915);
if (IS_ERR(c)) { if (IS_ERR(c)) {
err = PTR_ERR(c); err = PTR_ERR(c);
goto err; goto err;
...@@ -1049,7 +1048,10 @@ static int live_isolated_whitelist(void *arg) ...@@ -1049,7 +1048,10 @@ static int live_isolated_whitelist(void *arg)
i915_vm_put(vm); i915_vm_put(vm);
} }
for_each_engine(engine, i915, id) { for_each_engine(engine, gt->i915, id) {
if (!engine->kernel_context->vm)
continue;
if (!whitelist_writable_count(engine)) if (!whitelist_writable_count(engine))
continue; continue;
...@@ -1103,7 +1105,7 @@ static int live_isolated_whitelist(void *arg) ...@@ -1103,7 +1105,7 @@ static int live_isolated_whitelist(void *arg)
kernel_context_close(client[i].ctx); kernel_context_close(client[i].ctx);
} }
if (igt_flush_test(i915)) if (igt_flush_test(gt->i915))
err = -EIO; err = -EIO;
return err; return err;
...@@ -1138,16 +1140,16 @@ verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists, ...@@ -1138,16 +1140,16 @@ verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
static int static int
live_gpu_reset_workarounds(void *arg) live_gpu_reset_workarounds(void *arg)
{ {
struct drm_i915_private *i915 = arg; struct intel_gt *gt = arg;
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
struct wa_lists lists; struct wa_lists lists;
bool ok; bool ok;
if (!intel_has_gpu_reset(&i915->gt)) if (!intel_has_gpu_reset(gt))
return 0; return 0;
ctx = kernel_context(i915); ctx = kernel_context(gt->i915);
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);
...@@ -1155,25 +1157,25 @@ live_gpu_reset_workarounds(void *arg) ...@@ -1155,25 +1157,25 @@ live_gpu_reset_workarounds(void *arg)
pr_info("Verifying after GPU reset...\n"); pr_info("Verifying after GPU reset...\n");
igt_global_reset_lock(&i915->gt); igt_global_reset_lock(gt);
wakeref = intel_runtime_pm_get(&i915->runtime_pm); wakeref = intel_runtime_pm_get(gt->uncore->rpm);
reference_lists_init(i915, &lists); reference_lists_init(gt, &lists);
ok = verify_wa_lists(ctx, &lists, "before reset"); ok = verify_wa_lists(ctx, &lists, "before reset");
if (!ok) if (!ok)
goto out; goto out;
intel_gt_reset(&i915->gt, ALL_ENGINES, "live_workarounds"); intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
ok = verify_wa_lists(ctx, &lists, "after reset"); ok = verify_wa_lists(ctx, &lists, "after reset");
out: out:
i915_gem_context_unlock_engines(ctx); i915_gem_context_unlock_engines(ctx);
kernel_context_close(ctx); kernel_context_close(ctx);
reference_lists_fini(i915, &lists); reference_lists_fini(gt, &lists);
intel_runtime_pm_put(&i915->runtime_pm, wakeref); intel_runtime_pm_put(gt->uncore->rpm, wakeref);
igt_global_reset_unlock(&i915->gt); igt_global_reset_unlock(gt);
return ok ? 0 : -ESRCH; return ok ? 0 : -ESRCH;
} }
...@@ -1181,7 +1183,7 @@ live_gpu_reset_workarounds(void *arg) ...@@ -1181,7 +1183,7 @@ live_gpu_reset_workarounds(void *arg)
static int static int
live_engine_reset_workarounds(void *arg) live_engine_reset_workarounds(void *arg)
{ {
struct drm_i915_private *i915 = arg; struct intel_gt *gt = arg;
struct i915_gem_engines_iter it; struct i915_gem_engines_iter it;
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
struct intel_context *ce; struct intel_context *ce;
...@@ -1191,17 +1193,17 @@ live_engine_reset_workarounds(void *arg) ...@@ -1191,17 +1193,17 @@ live_engine_reset_workarounds(void *arg)
struct wa_lists lists; struct wa_lists lists;
int ret = 0; int ret = 0;
if (!intel_has_reset_engine(&i915->gt)) if (!intel_has_reset_engine(gt))
return 0; return 0;
ctx = kernel_context(i915); ctx = kernel_context(gt->i915);
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);
igt_global_reset_lock(&i915->gt); igt_global_reset_lock(gt);
wakeref = intel_runtime_pm_get(&i915->runtime_pm); wakeref = intel_runtime_pm_get(gt->uncore->rpm);
reference_lists_init(i915, &lists); reference_lists_init(gt, &lists);
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
struct intel_engine_cs *engine = ce->engine; struct intel_engine_cs *engine = ce->engine;
...@@ -1254,12 +1256,12 @@ live_engine_reset_workarounds(void *arg) ...@@ -1254,12 +1256,12 @@ live_engine_reset_workarounds(void *arg)
} }
err: err:
i915_gem_context_unlock_engines(ctx); i915_gem_context_unlock_engines(ctx);
reference_lists_fini(i915, &lists); reference_lists_fini(gt, &lists);
intel_runtime_pm_put(&i915->runtime_pm, wakeref); intel_runtime_pm_put(gt->uncore->rpm, wakeref);
igt_global_reset_unlock(&i915->gt); igt_global_reset_unlock(gt);
kernel_context_close(ctx); kernel_context_close(ctx);
igt_flush_test(i915); igt_flush_test(gt->i915);
return ret; return ret;
} }
...@@ -1277,5 +1279,5 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915) ...@@ -1277,5 +1279,5 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915)
if (intel_gt_is_wedged(&i915->gt)) if (intel_gt_is_wedged(&i915->gt))
return 0; return 0;
return i915_subtests(tests, i915); return intel_gt_live_subtests(tests, &i915->gt);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment