Commit 450cede7 authored by Thomas Hellström's avatar Thomas Hellström

drm/i915/gem: Fix the mman selftest

Using the I915_MMAP_TYPE_FIXED mmap type requires the TTM backend, so
for that mmap type, use __i915_gem_object_create_user() instead of
i915_gem_object_create_internal(), as we really want to tests objects
mmap-able by user-space.

This also means that the out-of-space error happens at object creation
and returns -ENXIO rather than -ENOSPC, so fix the code up to expect
that on out-of-offset-space errors.

Finally only use I915_MMAP_TYPE_FIXED for LMEM and SMEM for now if
testing on LMEM-capable devices. For stolen LMEM, we still take the
same path as for integrated, as that haven't been moved over to TTM yet,
and user-space should not be able to create out of stolen LMEM anyway.

v2:
 - Check the presence of the obj->ops->mmap_offset callback rather than
   hardcoding the supported mmap regions in can_mmap() (Maarten Lankhorst)

Fixes: 7961c5b6 ("drm/i915: Add TTM offset argument to mmap.")
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210831122931.157536-1-thomas.hellstrom@linux.intel.com
parent 5db18567
...@@ -581,6 +581,20 @@ static enum i915_mmap_type default_mapping(struct drm_i915_private *i915) ...@@ -581,6 +581,20 @@ static enum i915_mmap_type default_mapping(struct drm_i915_private *i915)
return I915_MMAP_TYPE_GTT; return I915_MMAP_TYPE_GTT;
} }
static struct drm_i915_gem_object *
create_sys_or_internal(struct drm_i915_private *i915,
unsigned long size)
{
if (HAS_LMEM(i915)) {
struct intel_memory_region *sys_region =
i915->mm.regions[INTEL_REGION_SMEM];
return __i915_gem_object_create_user(i915, size, &sys_region, 1);
}
return i915_gem_object_create_internal(i915, size);
}
static bool assert_mmap_offset(struct drm_i915_private *i915, static bool assert_mmap_offset(struct drm_i915_private *i915,
unsigned long size, unsigned long size,
int expected) int expected)
...@@ -589,7 +603,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915, ...@@ -589,7 +603,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
u64 offset; u64 offset;
int ret; int ret;
obj = i915_gem_object_create_internal(i915, size); obj = create_sys_or_internal(i915, size);
if (IS_ERR(obj)) if (IS_ERR(obj))
return expected && expected == PTR_ERR(obj); return expected && expected == PTR_ERR(obj);
...@@ -633,6 +647,7 @@ static int igt_mmap_offset_exhaustion(void *arg) ...@@ -633,6 +647,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
struct drm_mm_node *hole, *next; struct drm_mm_node *hole, *next;
int loop, err = 0; int loop, err = 0;
u64 offset; u64 offset;
int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC;
/* Disable background reaper */ /* Disable background reaper */
disable_retire_worker(i915); disable_retire_worker(i915);
...@@ -683,14 +698,14 @@ static int igt_mmap_offset_exhaustion(void *arg) ...@@ -683,14 +698,14 @@ static int igt_mmap_offset_exhaustion(void *arg)
} }
/* Too large */ /* Too large */
if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) { if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) {
pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n"); pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
err = -EINVAL; err = -EINVAL;
goto out; goto out;
} }
/* Fill the hole, further allocation attempts should then fail */ /* Fill the hole, further allocation attempts should then fail */
obj = i915_gem_object_create_internal(i915, PAGE_SIZE); obj = create_sys_or_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) { if (IS_ERR(obj)) {
err = PTR_ERR(obj); err = PTR_ERR(obj);
pr_err("Unable to create object for reclaimed hole\n"); pr_err("Unable to create object for reclaimed hole\n");
...@@ -703,7 +718,7 @@ static int igt_mmap_offset_exhaustion(void *arg) ...@@ -703,7 +718,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
goto err_obj; goto err_obj;
} }
if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) { if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) {
pr_err("Unexpectedly succeeded in inserting object into no holes!\n"); pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
err = -EINVAL; err = -EINVAL;
goto err_obj; goto err_obj;
...@@ -839,10 +854,9 @@ static int wc_check(struct drm_i915_gem_object *obj) ...@@ -839,10 +854,9 @@ static int wc_check(struct drm_i915_gem_object *obj)
static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type) static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
{ {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
bool no_map; bool no_map;
if (HAS_LMEM(i915)) if (obj->ops->mmap_offset)
return type == I915_MMAP_TYPE_FIXED; return type == I915_MMAP_TYPE_FIXED;
else if (type == I915_MMAP_TYPE_FIXED) else if (type == I915_MMAP_TYPE_FIXED)
return false; return false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment