Commit e0695db7 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Create/destroy VM (ppGTT) for use with contexts

In preparation to making the ppGTT binding for a context explicit (to
facilitate reusing the same ppGTT between different contexts), allow the
user to create and destroy named ppGTT.

v2: Replace global barrier for swapping over the ppgtt and tlbs with a
local context barrier (Tvrtko)
v3: serialise with struct_mutex; it's lazy but required dammit
v4: Rewrite igt_ctx_shared_exec to be more different (aimed to be more
similarly, turned out different!)

v5: Fix up test unwind for aliasing-ppgtt (snb)
v6: Tighten language for uapi struct drm_i915_gem_vm_control.
v7: Patch the context image for runtime ppgtt switching!

Testcase: igt/gem_vm_create
Testcase: igt/gem_ctx_param/vm
Testcase: igt/gem_ctx_clone/vm
Testcase: igt/gem_ctx_shared
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190322092325.5883-2-chris@chris-wilson.co.uk
parent 9d1305ef
...@@ -3122,6 +3122,8 @@ static const struct drm_ioctl_desc i915_ioctls[] = { ...@@ -3122,6 +3122,8 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
}; };
static struct drm_driver driver = { static struct drm_driver driver = {
......
...@@ -220,6 +220,9 @@ struct drm_i915_file_private { ...@@ -220,6 +220,9 @@ struct drm_i915_file_private {
struct idr context_idr; struct idr context_idr;
struct mutex context_idr_lock; /* guards context_idr */ struct mutex context_idr_lock; /* guards context_idr */
struct idr vm_idr;
struct mutex vm_idr_lock; /* guards vm_idr */
unsigned int bsd_engine; unsigned int bsd_engine;
/* /*
......
This diff is collapsed.
...@@ -148,6 +148,11 @@ void i915_gem_context_release(struct kref *ctx_ref); ...@@ -148,6 +148,11 @@ void i915_gem_context_release(struct kref *ctx_ref);
struct i915_gem_context * struct i915_gem_context *
i915_gem_context_create_gvt(struct drm_device *dev); i915_gem_context_create_gvt(struct drm_device *dev);
int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file); struct drm_file *file);
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
......
...@@ -1937,6 +1937,8 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) ...@@ -1937,6 +1937,8 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
int err; int err;
GEM_BUG_ON(ppgtt->base.vm.closed);
/* /*
* Workaround the limited maximum vma->pin_count and the aliasing_ppgtt * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
* which will be pinned into every active context. * which will be pinned into every active context.
...@@ -1975,6 +1977,17 @@ void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base) ...@@ -1975,6 +1977,17 @@ void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
i915_vma_unpin(ppgtt->vma); i915_vma_unpin(ppgtt->vma);
} }
void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base)
{
struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
if (!ppgtt->pin_count)
return;
ppgtt->pin_count = 0;
i915_vma_unpin(ppgtt->vma);
}
static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
{ {
struct i915_ggtt * const ggtt = &i915->ggtt; struct i915_ggtt * const ggtt = &i915->ggtt;
...@@ -2082,12 +2095,6 @@ i915_ppgtt_create(struct drm_i915_private *i915) ...@@ -2082,12 +2095,6 @@ i915_ppgtt_create(struct drm_i915_private *i915)
return ppgtt; return ppgtt;
} }
void i915_ppgtt_close(struct i915_address_space *vm)
{
GEM_BUG_ON(vm->closed);
vm->closed = true;
}
static void ppgtt_destroy_vma(struct i915_address_space *vm) static void ppgtt_destroy_vma(struct i915_address_space *vm)
{ {
struct list_head *phases[] = { struct list_head *phases[] = {
......
...@@ -396,6 +396,8 @@ struct i915_hw_ppgtt { ...@@ -396,6 +396,8 @@ struct i915_hw_ppgtt {
struct i915_page_directory_pointer pdp; /* GEN8+ */ struct i915_page_directory_pointer pdp; /* GEN8+ */
struct i915_page_directory pd; /* GEN6-7 */ struct i915_page_directory pd; /* GEN6-7 */
}; };
u32 user_handle;
}; };
struct gen6_hw_ppgtt { struct gen6_hw_ppgtt {
...@@ -605,13 +607,12 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv); ...@@ -605,13 +607,12 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv); int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv); struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv);
void i915_ppgtt_close(struct i915_address_space *vm);
void i915_ppgtt_release(struct kref *kref); void i915_ppgtt_release(struct kref *kref);
static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt) static inline struct i915_hw_ppgtt *i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
{ {
if (ppgtt)
kref_get(&ppgtt->ref); kref_get(&ppgtt->ref);
return ppgtt;
} }
static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt) static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
...@@ -622,6 +623,7 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt) ...@@ -622,6 +623,7 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
int gen6_ppgtt_pin(struct i915_hw_ppgtt *base); int gen6_ppgtt_pin(struct i915_hw_ppgtt *base);
void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base); void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base);
void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base);
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv); void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv); void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
......
...@@ -1732,7 +1732,6 @@ int i915_gem_huge_page_mock_selftests(void) ...@@ -1732,7 +1732,6 @@ int i915_gem_huge_page_mock_selftests(void)
err = i915_subtests(tests, ppgtt); err = i915_subtests(tests, ppgtt);
out_close: out_close:
i915_ppgtt_close(&ppgtt->vm);
i915_ppgtt_put(ppgtt); i915_ppgtt_put(ppgtt);
out_unlock: out_unlock:
......
...@@ -373,7 +373,8 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value) ...@@ -373,7 +373,8 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
return 0; return 0;
} }
static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max) static noinline int cpu_check(struct drm_i915_gem_object *obj,
unsigned int idx, unsigned int max)
{ {
unsigned int n, m, needs_flush; unsigned int n, m, needs_flush;
int err; int err;
...@@ -391,8 +392,10 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max) ...@@ -391,8 +392,10 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
for (m = 0; m < max; m++) { for (m = 0; m < max; m++) {
if (map[m] != m) { if (map[m] != m) {
pr_err("Invalid value at page %d, offset %d: found %x expected %x\n", pr_err("%pS: Invalid value at object %d page %d/%ld, offset %d/%d: found %x expected %x\n",
n, m, map[m], m); __builtin_return_address(0), idx,
n, real_page_count(obj), m, max,
map[m], m);
err = -EINVAL; err = -EINVAL;
goto out_unmap; goto out_unmap;
} }
...@@ -400,8 +403,9 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max) ...@@ -400,8 +403,9 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
for (; m < DW_PER_PAGE; m++) { for (; m < DW_PER_PAGE; m++) {
if (map[m] != STACK_MAGIC) { if (map[m] != STACK_MAGIC) {
pr_err("Invalid value at page %d, offset %d: found %x expected %x\n", pr_err("%pS: Invalid value at object %d page %d, offset %d: found %x expected %x (uninitialised)\n",
n, m, map[m], STACK_MAGIC); __builtin_return_address(0), idx, n, m,
map[m], STACK_MAGIC);
err = -EINVAL; err = -EINVAL;
goto out_unmap; goto out_unmap;
} }
...@@ -479,12 +483,8 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj) ...@@ -479,12 +483,8 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj)
static int igt_ctx_exec(void *arg) static int igt_ctx_exec(void *arg)
{ {
struct drm_i915_private *i915 = arg; struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj = NULL; struct intel_engine_cs *engine;
unsigned long ncontexts, ndwords, dw; enum intel_engine_id id;
struct igt_live_test t;
struct drm_file *file;
IGT_TIMEOUT(end_time);
LIST_HEAD(objects);
int err = -ENODEV; int err = -ENODEV;
/* /*
...@@ -496,13 +496,27 @@ static int igt_ctx_exec(void *arg) ...@@ -496,13 +496,27 @@ static int igt_ctx_exec(void *arg)
if (!DRIVER_CAPS(i915)->has_logical_contexts) if (!DRIVER_CAPS(i915)->has_logical_contexts)
return 0; return 0;
for_each_engine(engine, i915, id) {
struct drm_i915_gem_object *obj = NULL;
unsigned long ncontexts, ndwords, dw;
struct igt_live_test t;
struct drm_file *file;
IGT_TIMEOUT(end_time);
LIST_HEAD(objects);
if (!intel_engine_can_store_dword(engine))
continue;
if (!engine->context_size)
continue; /* No logical context support in HW */
file = mock_file(i915); file = mock_file(i915);
if (IS_ERR(file)) if (IS_ERR(file))
return PTR_ERR(file); return PTR_ERR(file);
mutex_lock(&i915->drm.struct_mutex); mutex_lock(&i915->drm.struct_mutex);
err = igt_live_test_begin(&t, i915, __func__, ""); err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err) if (err)
goto out_unlock; goto out_unlock;
...@@ -510,9 +524,8 @@ static int igt_ctx_exec(void *arg) ...@@ -510,9 +524,8 @@ static int igt_ctx_exec(void *arg)
ndwords = 0; ndwords = 0;
dw = 0; dw = 0;
while (!time_after(jiffies, end_time)) { while (!time_after(jiffies, end_time)) {
struct intel_engine_cs *engine;
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
unsigned int id; intel_wakeref_t wakeref;
ctx = live_context(i915, file); ctx = live_context(i915, file);
if (IS_ERR(ctx)) { if (IS_ERR(ctx)) {
...@@ -520,15 +533,6 @@ static int igt_ctx_exec(void *arg) ...@@ -520,15 +533,6 @@ static int igt_ctx_exec(void *arg)
goto out_unlock; goto out_unlock;
} }
for_each_engine(engine, i915, id) {
intel_wakeref_t wakeref;
if (!engine->context_size)
continue; /* No logical context support in HW */
if (!intel_engine_can_store_dword(engine))
continue;
if (!obj) { if (!obj) {
obj = create_test_object(ctx, file, &objects); obj = create_test_object(ctx, file, &objects);
if (IS_ERR(obj)) { if (IS_ERR(obj)) {
...@@ -537,7 +541,6 @@ static int igt_ctx_exec(void *arg) ...@@ -537,7 +541,6 @@ static int igt_ctx_exec(void *arg)
} }
} }
err = 0;
with_intel_runtime_pm(i915, wakeref) with_intel_runtime_pm(i915, wakeref)
err = gpu_fill(obj, ctx, engine, dw); err = gpu_fill(obj, ctx, engine, dw);
if (err) { if (err) {
...@@ -552,19 +555,20 @@ static int igt_ctx_exec(void *arg) ...@@ -552,19 +555,20 @@ static int igt_ctx_exec(void *arg)
obj = NULL; obj = NULL;
dw = 0; dw = 0;
} }
ndwords++; ndwords++;
}
ncontexts++; ncontexts++;
} }
pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n",
ncontexts, RUNTIME_INFO(i915)->num_engines, ndwords);
dw = 0; pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
ncontexts, engine->name, ndwords);
ncontexts = dw = 0;
list_for_each_entry(obj, &objects, st_link) { list_for_each_entry(obj, &objects, st_link) {
unsigned int rem = unsigned int rem =
min_t(unsigned int, ndwords - dw, max_dwords(obj)); min_t(unsigned int, ndwords - dw, max_dwords(obj));
err = cpu_check(obj, rem); err = cpu_check(obj, ncontexts++, rem);
if (err) if (err)
break; break;
...@@ -576,6 +580,129 @@ static int igt_ctx_exec(void *arg) ...@@ -576,6 +580,129 @@ static int igt_ctx_exec(void *arg)
err = -EIO; err = -EIO;
mutex_unlock(&i915->drm.struct_mutex); mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file);
if (err)
return err;
}
return 0;
}
static int igt_shared_ctx_exec(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *parent;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct igt_live_test t;
struct drm_file *file;
int err = 0;
/*
* Create a few different contexts with the same mm and write
* through each ctx using the GPU making sure those writes end
* up in the expected pages of our obj.
*/
if (!DRIVER_CAPS(i915)->has_logical_contexts)
return 0;
file = mock_file(i915);
if (IS_ERR(file))
return PTR_ERR(file);
mutex_lock(&i915->drm.struct_mutex);
parent = live_context(i915, file);
if (IS_ERR(parent)) {
err = PTR_ERR(parent);
goto out_unlock;
}
if (!parent->ppgtt) { /* not full-ppgtt; nothing to share */
err = 0;
goto out_unlock;
}
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
goto out_unlock;
for_each_engine(engine, i915, id) {
unsigned long ncontexts, ndwords, dw;
struct drm_i915_gem_object *obj = NULL;
IGT_TIMEOUT(end_time);
LIST_HEAD(objects);
if (!intel_engine_can_store_dword(engine))
continue;
dw = 0;
ndwords = 0;
ncontexts = 0;
while (!time_after(jiffies, end_time)) {
struct i915_gem_context *ctx;
intel_wakeref_t wakeref;
ctx = kernel_context(i915);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out_test;
}
__assign_ppgtt(ctx, parent->ppgtt);
if (!obj) {
obj = create_test_object(parent, file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
kernel_context_close(ctx);
goto out_test;
}
}
err = 0;
with_intel_runtime_pm(i915, wakeref)
err = gpu_fill(obj, ctx, engine, dw);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
engine->name, ctx->hw_id,
yesno(!!ctx->ppgtt), err);
kernel_context_close(ctx);
goto out_test;
}
if (++dw == max_dwords(obj)) {
obj = NULL;
dw = 0;
}
ndwords++;
ncontexts++;
kernel_context_close(ctx);
}
pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
ncontexts, engine->name, ndwords);
ncontexts = dw = 0;
list_for_each_entry(obj, &objects, st_link) {
unsigned int rem =
min_t(unsigned int, ndwords - dw, max_dwords(obj));
err = cpu_check(obj, ncontexts++, rem);
if (err)
goto out_test;
dw += rem;
}
}
out_test:
if (igt_live_test_end(&t))
err = -EIO;
out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file); mock_file_free(i915, file);
return err; return err;
} }
...@@ -1046,7 +1173,7 @@ static int igt_ctx_readonly(void *arg) ...@@ -1046,7 +1173,7 @@ static int igt_ctx_readonly(void *arg)
struct drm_i915_gem_object *obj = NULL; struct drm_i915_gem_object *obj = NULL;
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
struct i915_hw_ppgtt *ppgtt; struct i915_hw_ppgtt *ppgtt;
unsigned long ndwords, dw; unsigned long idx, ndwords, dw;
struct igt_live_test t; struct igt_live_test t;
struct drm_file *file; struct drm_file *file;
I915_RND_STATE(prng); I915_RND_STATE(prng);
...@@ -1127,6 +1254,7 @@ static int igt_ctx_readonly(void *arg) ...@@ -1127,6 +1254,7 @@ static int igt_ctx_readonly(void *arg)
ndwords, RUNTIME_INFO(i915)->num_engines); ndwords, RUNTIME_INFO(i915)->num_engines);
dw = 0; dw = 0;
idx = 0;
list_for_each_entry(obj, &objects, st_link) { list_for_each_entry(obj, &objects, st_link) {
unsigned int rem = unsigned int rem =
min_t(unsigned int, ndwords - dw, max_dwords(obj)); min_t(unsigned int, ndwords - dw, max_dwords(obj));
...@@ -1136,7 +1264,7 @@ static int igt_ctx_readonly(void *arg) ...@@ -1136,7 +1264,7 @@ static int igt_ctx_readonly(void *arg)
if (i915_gem_object_is_readonly(obj)) if (i915_gem_object_is_readonly(obj))
num_writes = 0; num_writes = 0;
err = cpu_check(obj, num_writes); err = cpu_check(obj, idx++, num_writes);
if (err) if (err)
break; break;
...@@ -1619,7 +1747,8 @@ static int mock_context_barrier(void *arg) ...@@ -1619,7 +1747,8 @@ static int mock_context_barrier(void *arg)
} }
counter = 0; counter = 0;
err = context_barrier_task(ctx, 0, mock_barrier_task, &counter); err = context_barrier_task(ctx, 0,
NULL, mock_barrier_task, &counter);
if (err) { if (err) {
pr_err("Failed at line %d, err=%d\n", __LINE__, err); pr_err("Failed at line %d, err=%d\n", __LINE__, err);
goto out; goto out;
...@@ -1631,8 +1760,8 @@ static int mock_context_barrier(void *arg) ...@@ -1631,8 +1760,8 @@ static int mock_context_barrier(void *arg)
} }
counter = 0; counter = 0;
err = context_barrier_task(ctx, err = context_barrier_task(ctx, ALL_ENGINES,
ALL_ENGINES, mock_barrier_task, &counter); NULL, mock_barrier_task, &counter);
if (err) { if (err) {
pr_err("Failed at line %d, err=%d\n", __LINE__, err); pr_err("Failed at line %d, err=%d\n", __LINE__, err);
goto out; goto out;
...@@ -1655,8 +1784,8 @@ static int mock_context_barrier(void *arg) ...@@ -1655,8 +1784,8 @@ static int mock_context_barrier(void *arg)
counter = 0; counter = 0;
context_barrier_inject_fault = BIT(RCS0); context_barrier_inject_fault = BIT(RCS0);
err = context_barrier_task(ctx, err = context_barrier_task(ctx, ALL_ENGINES,
ALL_ENGINES, mock_barrier_task, &counter); NULL, mock_barrier_task, &counter);
context_barrier_inject_fault = 0; context_barrier_inject_fault = 0;
if (err == -ENXIO) if (err == -ENXIO)
err = 0; err = 0;
...@@ -1670,8 +1799,8 @@ static int mock_context_barrier(void *arg) ...@@ -1670,8 +1799,8 @@ static int mock_context_barrier(void *arg)
goto out; goto out;
counter = 0; counter = 0;
err = context_barrier_task(ctx, err = context_barrier_task(ctx, ALL_ENGINES,
ALL_ENGINES, mock_barrier_task, &counter); NULL, mock_barrier_task, &counter);
if (err) { if (err) {
pr_err("Failed at line %d, err=%d\n", __LINE__, err); pr_err("Failed at line %d, err=%d\n", __LINE__, err);
goto out; goto out;
...@@ -1719,6 +1848,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv) ...@@ -1719,6 +1848,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
SUBTEST(igt_ctx_exec), SUBTEST(igt_ctx_exec),
SUBTEST(igt_ctx_readonly), SUBTEST(igt_ctx_readonly),
SUBTEST(igt_ctx_sseu), SUBTEST(igt_ctx_sseu),
SUBTEST(igt_shared_ctx_exec),
SUBTEST(igt_vm_isolation), SUBTEST(igt_vm_isolation),
}; };
......
...@@ -1020,7 +1020,6 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv, ...@@ -1020,7 +1020,6 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time); err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
i915_ppgtt_close(&ppgtt->vm);
i915_ppgtt_put(ppgtt); i915_ppgtt_put(ppgtt);
out_unlock: out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
......
...@@ -54,13 +54,17 @@ mock_context(struct drm_i915_private *i915, ...@@ -54,13 +54,17 @@ mock_context(struct drm_i915_private *i915,
goto err_handles; goto err_handles;
if (name) { if (name) {
struct i915_hw_ppgtt *ppgtt;
ctx->name = kstrdup(name, GFP_KERNEL); ctx->name = kstrdup(name, GFP_KERNEL);
if (!ctx->name) if (!ctx->name)
goto err_put; goto err_put;
ctx->ppgtt = mock_ppgtt(i915, name); ppgtt = mock_ppgtt(i915, name);
if (!ctx->ppgtt) if (!ppgtt)
goto err_put; goto err_put;
__set_ppgtt(ctx, ppgtt);
} }
return ctx; return ctx;
......
...@@ -343,6 +343,8 @@ typedef struct _drm_i915_sarea { ...@@ -343,6 +343,8 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_PERF_ADD_CONFIG 0x37 #define DRM_I915_PERF_ADD_CONFIG 0x37
#define DRM_I915_PERF_REMOVE_CONFIG 0x38 #define DRM_I915_PERF_REMOVE_CONFIG 0x38
#define DRM_I915_QUERY 0x39 #define DRM_I915_QUERY 0x39
#define DRM_I915_GEM_VM_CREATE 0x3a
#define DRM_I915_GEM_VM_DESTROY 0x3b
/* Must be kept compact -- no holes */ /* Must be kept compact -- no holes */
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
...@@ -402,6 +404,8 @@ typedef struct _drm_i915_sarea { ...@@ -402,6 +404,8 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config) #define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64) #define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
#define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query) #define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
#define DRM_IOCTL_I915_GEM_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
#define DRM_IOCTL_I915_GEM_VM_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
/* Allow drivers to submit batchbuffers directly to hardware, relying /* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware. * on the security mechanisms provided by hardware.
...@@ -1453,6 +1457,33 @@ struct drm_i915_gem_context_destroy { ...@@ -1453,6 +1457,33 @@ struct drm_i915_gem_context_destroy {
__u32 pad; __u32 pad;
}; };
/*
* DRM_I915_GEM_VM_CREATE -
*
* Create a new virtual memory address space (ppGTT) for use within a context
* on the same file. Extensions can be provided to configure exactly how the
* address space is setup upon creation.
*
* The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
* returned in the outparam @vm_id.
*
* No flags are defined, with all bits reserved and must be zero.
*
* An extension chain maybe provided, starting with @extensions, and terminated
* by the @next_extension being 0. Currently, no extensions are defined.
*
* DRM_I915_GEM_VM_DESTROY -
*
* Destroys a previously created VM id, specified in @vm_id.
*
* No extensions or flags are allowed currently, and so must be zero.
*/
struct drm_i915_gem_vm_control {
__u64 extensions;
__u32 flags;
__u32 vm_id;
};
struct drm_i915_reg_read { struct drm_i915_reg_read {
/* /*
* Register offset. * Register offset.
...@@ -1542,7 +1573,19 @@ struct drm_i915_gem_context_param { ...@@ -1542,7 +1573,19 @@ struct drm_i915_gem_context_param {
* On creation, all new contexts are marked as recoverable. * On creation, all new contexts are marked as recoverable.
*/ */
#define I915_CONTEXT_PARAM_RECOVERABLE 0x8 #define I915_CONTEXT_PARAM_RECOVERABLE 0x8
/*
* The id of the associated virtual memory address space (ppGTT) of
* this context. Can be retrieved and passed to another context
* (on the same fd) for both to use the same ppGTT and so share
* address layouts, and avoid reloading the page tables on context
* switches between themselves.
*
* See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY.
*/
#define I915_CONTEXT_PARAM_VM 0x9
/* Must be kept compact -- no holes and well documented */ /* Must be kept compact -- no holes and well documented */
__u64 value; __u64 value;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment