Commit c5def85c authored by Chris Wilson's avatar Chris Wilson

drm/i915/selftests: Test vm isolation

The vm of two contexts are supposed to be independent, such that a stray
write by one cannot be detected by another. Normally the GTT is filled
explicitly by userspace, but the space in between objects is filled with
a scratch page -- and that scratch page should not be able to form an
inter-context backchannel.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181029172925.10159-1-chris@chris-wilson.co.uk
parent 42882336
......@@ -760,6 +760,323 @@ static int igt_ctx_readonly(void *arg)
return err;
}
static int check_scratch(struct i915_gem_context *ctx, u64 offset)
{
struct drm_mm_node *node =
__drm_mm_interval_first(&ctx->ppgtt->vm.mm,
offset, offset + sizeof(u32) - 1);
if (!node || node->start > offset)
return 0;
GEM_BUG_ON(offset >= node->start + node->size);
pr_err("Target offset 0x%08x_%08x overlaps with a node in the mm!\n",
upper_32_bits(offset), lower_32_bits(offset));
return -EINVAL;
}
static int write_to_scratch(struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
u64 offset, u32 value)
{
struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_object *obj;
struct i915_request *rq;
struct i915_vma *vma;
u32 *cmd;
int err;
GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto err;
}
*cmd++ = MI_STORE_DWORD_IMM_GEN4;
if (INTEL_GEN(i915) >= 8) {
*cmd++ = lower_32_bits(offset);
*cmd++ = upper_32_bits(offset);
} else {
*cmd++ = 0;
*cmd++ = offset;
}
*cmd++ = value;
*cmd = MI_BATCH_BUFFER_END;
i915_gem_object_unpin_map(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false);
if (err)
goto err;
vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
}
err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
if (err)
goto err;
err = check_scratch(ctx, offset);
if (err)
goto err_unpin;
rq = i915_request_alloc(engine, ctx);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_unpin;
}
err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
if (err)
goto err_request;
err = i915_vma_move_to_active(vma, rq, 0);
if (err)
goto skip_request;
i915_gem_object_set_active_reference(obj);
i915_vma_unpin(vma);
i915_vma_close(vma);
i915_request_add(rq);
return 0;
skip_request:
i915_request_skip(rq, err);
err_request:
i915_request_add(rq);
err_unpin:
i915_vma_unpin(vma);
err:
i915_gem_object_put(obj);
return err;
}
static int read_from_scratch(struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
u64 offset, u32 *value)
{
struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_object *obj;
const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */
const u32 result = 0x100;
struct i915_request *rq;
struct i915_vma *vma;
u32 *cmd;
int err;
GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto err;
}
memset(cmd, POISON_INUSE, PAGE_SIZE);
if (INTEL_GEN(i915) >= 8) {
*cmd++ = MI_LOAD_REGISTER_MEM_GEN8;
*cmd++ = RCS_GPR0;
*cmd++ = lower_32_bits(offset);
*cmd++ = upper_32_bits(offset);
*cmd++ = MI_STORE_REGISTER_MEM_GEN8;
*cmd++ = RCS_GPR0;
*cmd++ = result;
*cmd++ = 0;
} else {
*cmd++ = MI_LOAD_REGISTER_MEM;
*cmd++ = RCS_GPR0;
*cmd++ = offset;
*cmd++ = MI_STORE_REGISTER_MEM;
*cmd++ = RCS_GPR0;
*cmd++ = result;
}
*cmd = MI_BATCH_BUFFER_END;
i915_gem_object_unpin_map(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false);
if (err)
goto err;
vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
}
err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
if (err)
goto err;
err = check_scratch(ctx, offset);
if (err)
goto err_unpin;
rq = i915_request_alloc(engine, ctx);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_unpin;
}
err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
if (err)
goto err_request;
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
if (err)
goto skip_request;
i915_vma_unpin(vma);
i915_vma_close(vma);
i915_request_add(rq);
err = i915_gem_object_set_to_cpu_domain(obj, false);
if (err)
goto err;
cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
goto err;
}
*value = cmd[result / sizeof(*cmd)];
i915_gem_object_unpin_map(obj);
i915_gem_object_put(obj);
return 0;
skip_request:
i915_request_skip(rq, err);
err_request:
i915_request_add(rq);
err_unpin:
i915_vma_unpin(vma);
err:
i915_gem_object_put(obj);
return err;
}
static int igt_vm_isolation(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_a, *ctx_b;
struct intel_engine_cs *engine;
struct drm_file *file;
I915_RND_STATE(prng);
unsigned long count;
struct live_test t;
unsigned int id;
u64 vm_total;
int err;
if (INTEL_GEN(i915) < 7)
return 0;
/*
* The simple goal here is that a write into one context is not
* observed in a second (separate page tables and scratch).
*/
file = mock_file(i915);
if (IS_ERR(file))
return PTR_ERR(file);
mutex_lock(&i915->drm.struct_mutex);
err = begin_live_test(&t, i915, __func__, "");
if (err)
goto out_unlock;
ctx_a = i915_gem_create_context(i915, file->driver_priv);
if (IS_ERR(ctx_a)) {
err = PTR_ERR(ctx_a);
goto out_unlock;
}
ctx_b = i915_gem_create_context(i915, file->driver_priv);
if (IS_ERR(ctx_b)) {
err = PTR_ERR(ctx_b);
goto out_unlock;
}
/* We can only test vm isolation, if the vm are distinct */
if (ctx_a->ppgtt == ctx_b->ppgtt)
goto out_unlock;
vm_total = ctx_a->ppgtt->vm.total;
GEM_BUG_ON(ctx_b->ppgtt->vm.total != vm_total);
vm_total -= I915_GTT_PAGE_SIZE;
intel_runtime_pm_get(i915);
count = 0;
for_each_engine(engine, i915, id) {
IGT_TIMEOUT(end_time);
unsigned long this = 0;
if (!intel_engine_can_store_dword(engine))
continue;
while (!__igt_timeout(end_time, NULL)) {
u32 value = 0xc5c5c5c5;
u64 offset;
div64_u64_rem(i915_prandom_u64_state(&prng),
vm_total, &offset);
offset &= ~sizeof(u32);
offset += I915_GTT_PAGE_SIZE;
err = write_to_scratch(ctx_a, engine,
offset, 0xdeadbeef);
if (err == 0)
err = read_from_scratch(ctx_b, engine,
offset, &value);
if (err)
goto out_rpm;
if (value) {
pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
engine->name, value,
upper_32_bits(offset),
lower_32_bits(offset),
this);
err = -EINVAL;
goto out_rpm;
}
this++;
}
count += this;
}
pr_info("Checked %lu scratch offsets across %d engines\n",
count, INTEL_INFO(i915)->num_rings);
out_rpm:
intel_runtime_pm_put(i915);
out_unlock:
if (end_live_test(&t))
err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file);
return err;
}
static __maybe_unused const char *
__engine_name(struct drm_i915_private *i915, unsigned int engines)
{
......@@ -915,6 +1232,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
SUBTEST(live_nop_switch),
SUBTEST(igt_ctx_exec),
SUBTEST(igt_ctx_readonly),
SUBTEST(igt_vm_isolation),
};
if (i915_terminally_wedged(&dev_priv->gpu_error))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment