Commit 11d723ce authored by Matthew Auld's avatar Matthew Auld Committed by Chris Wilson

drm/i915/selftests: prefer random sizes for the huge-GTT-page smoke tests

Ditch the dubious static list of sizes to enumerate, in favour of
choosing a random size within the limits of each backing store. With
repeated CI runs this should give us a wider range of object sizes, and
in turn more page-size combinations, while using less machine time.
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20191025153728.23689-6-chris@chris-wilson.co.uk
parent 23741bc8
...@@ -1317,204 +1317,137 @@ static int igt_ppgtt_exhaust_huge(void *arg) ...@@ -1317,204 +1317,137 @@ static int igt_ppgtt_exhaust_huge(void *arg)
return err; return err;
} }
static int igt_ppgtt_internal_huge(void *arg) typedef struct drm_i915_gem_object *
{ (*igt_create_fn)(struct drm_i915_private *i915, u32 size, u32 flags);
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_object *obj;
static const unsigned int sizes[] = {
SZ_64K,
SZ_128K,
SZ_256K,
SZ_512K,
SZ_1M,
SZ_2M,
};
int i;
int err;
/*
* Sanity check that the HW uses huge pages correctly through internal
* -- ensure that our writes land in the right place.
*/
for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
unsigned int size = sizes[i];
obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj))
return PTR_ERR(obj);
err = i915_gem_object_pin_pages(obj);
if (err)
goto out_put;
if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
pr_info("internal unable to allocate huge-page(s) with size=%u\n",
size);
goto out_unpin;
}
err = igt_write_huge(ctx, obj);
if (err) {
pr_err("internal write-huge failed with size=%u\n",
size);
goto out_unpin;
}
i915_gem_object_unpin_pages(obj);
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
i915_gem_object_put(obj);
}
return 0;
out_unpin:
i915_gem_object_unpin_pages(obj);
out_put:
i915_gem_object_put(obj);
return err;
}
static inline bool igt_can_allocate_thp(struct drm_i915_private *i915) static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
{ {
return i915->mm.gemfs && has_transparent_hugepage(); return i915->mm.gemfs && has_transparent_hugepage();
} }
static int igt_ppgtt_gemfs_huge(void *arg) static struct drm_i915_gem_object *
igt_create_shmem(struct drm_i915_private *i915, u32 size, u32 flags)
{ {
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_object *obj;
static const unsigned int sizes[] = {
SZ_2M,
SZ_4M,
SZ_8M,
SZ_16M,
SZ_32M,
};
int i;
int err;
/*
* Sanity check that the HW uses huge pages correctly through gemfs --
* ensure that our writes land in the right place.
*/
if (!igt_can_allocate_thp(i915)) { if (!igt_can_allocate_thp(i915)) {
pr_info("missing THP support, skipping\n"); pr_info("%s missing THP support, skipping\n", __func__);
return 0; return ERR_PTR(-ENODEV);
} }
for (i = 0; i < ARRAY_SIZE(sizes); ++i) { return i915_gem_object_create_shmem(i915, size);
unsigned int size = sizes[i]; }
obj = i915_gem_object_create_shmem(i915, size);
if (IS_ERR(obj))
return PTR_ERR(obj);
err = i915_gem_object_pin_pages(obj);
if (err)
goto out_put;
if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) { static struct drm_i915_gem_object *
pr_info("finishing test early, gemfs unable to allocate huge-page(s) with size=%u\n", igt_create_internal(struct drm_i915_private *i915, u32 size, u32 flags)
size); {
goto out_unpin; return i915_gem_object_create_internal(i915, size);
} }
err = igt_write_huge(ctx, obj); static struct drm_i915_gem_object *
if (err) { igt_create_local(struct drm_i915_private *i915, u32 size, u32 flags)
pr_err("gemfs write-huge failed with size=%u\n", {
size); return i915_gem_object_create_lmem(i915, size, flags);
goto out_unpin; }
}
i915_gem_object_unpin_pages(obj); static u32 igt_random_size(struct rnd_state *prng,
__i915_gem_object_put_pages(obj, I915_MM_NORMAL); u32 min_page_size,
i915_gem_object_put(obj); u32 max_page_size)
} {
u64 mask;
u32 size;
return 0; GEM_BUG_ON(!is_power_of_2(min_page_size));
GEM_BUG_ON(!is_power_of_2(max_page_size));
GEM_BUG_ON(min_page_size < PAGE_SIZE);
GEM_BUG_ON(min_page_size > max_page_size);
out_unpin: mask = ((max_page_size << 1ULL) - 1) & PAGE_MASK;
i915_gem_object_unpin_pages(obj); size = prandom_u32_state(prng) & mask;
out_put: if (size < min_page_size)
i915_gem_object_put(obj); size |= min_page_size;
return err; return size;
} }
static int igt_ppgtt_lmem_huge(void *arg) static int igt_ppgtt_smoke_huge(void *arg)
{ {
struct i915_gem_context *ctx = arg; struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915; struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
static const unsigned int sizes[] = { I915_RND_STATE(prng);
SZ_64K, struct {
SZ_512K, igt_create_fn fn;
SZ_1M, u32 min;
SZ_2M, u32 max;
} backends[] = {
{ igt_create_internal, SZ_64K, SZ_2M, },
{ igt_create_shmem, SZ_64K, SZ_32M, },
{ igt_create_local, SZ_64K, SZ_1G, },
}; };
int i;
int err; int err;
int i;
if (!HAS_LMEM(i915)) {
pr_info("device lacks LMEM support, skipping\n");
return 0;
}
/* /*
* Sanity check that the HW uses huge pages correctly through LMEM * Sanity check that the HW uses huge pages correctly through our
* -- ensure that our writes land in the right place. * various backends -- ensure that our writes land in the right place.
*/ */
for (i = 0; i < ARRAY_SIZE(sizes); ++i) { for (i = 0; i < ARRAY_SIZE(backends); ++i) {
unsigned int size = sizes[i]; u32 min = backends[i].min;
u32 max = backends[i].max;
u32 size = max;
try_again:
size = igt_random_size(&prng, min, rounddown_pow_of_two(size));
obj = i915_gem_object_create_lmem(i915, size, obj = backends[i].fn(i915, size, 0);
I915_BO_ALLOC_CONTIGUOUS);
if (IS_ERR(obj)) { if (IS_ERR(obj)) {
err = PTR_ERR(obj); err = PTR_ERR(obj);
if (err == -E2BIG) { if (err == -E2BIG) {
pr_info("object too big for region!\n"); size >>= 1;
return 0; goto try_again;
} else if (err == -ENODEV) {
err = 0;
continue;
} }
return err; return err;
} }
err = i915_gem_object_pin_pages(obj); err = i915_gem_object_pin_pages(obj);
if (err) if (err) {
if (err == -ENXIO) {
i915_gem_object_put(obj);
size >>= 1;
goto try_again;
}
goto out_put; goto out_put;
}
if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) { if (obj->mm.page_sizes.phys < min) {
pr_info("LMEM unable to allocate huge-page(s) with size=%u\n", pr_info("%s unable to allocate huge-page(s) with size=%u, i=%d\n",
size); __func__, size, i);
err = -ENOMEM;
goto out_unpin; goto out_unpin;
} }
err = igt_write_huge(ctx, obj); err = igt_write_huge(ctx, obj);
if (err) { if (err) {
pr_err("LMEM write-huge failed with size=%u\n", size); pr_err("%s write-huge failed with size=%u, i=%d\n",
goto out_unpin; __func__, size, i);
} }
out_unpin:
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
__i915_gem_object_put_pages(obj, I915_MM_NORMAL); __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
out_put:
i915_gem_object_put(obj); i915_gem_object_put(obj);
}
return 0; if (err == -ENOMEM || err == -ENXIO)
err = 0;
out_unpin: if (err)
i915_gem_object_unpin_pages(obj); break;
out_put:
i915_gem_object_put(obj);
if (err == -ENOMEM) cond_resched();
err = 0; }
return err; return err;
} }
...@@ -1878,9 +1811,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915) ...@@ -1878,9 +1811,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_ppgtt_pin_update), SUBTEST(igt_ppgtt_pin_update),
SUBTEST(igt_tmpfs_fallback), SUBTEST(igt_tmpfs_fallback),
SUBTEST(igt_ppgtt_exhaust_huge), SUBTEST(igt_ppgtt_exhaust_huge),
SUBTEST(igt_ppgtt_gemfs_huge), SUBTEST(igt_ppgtt_smoke_huge),
SUBTEST(igt_ppgtt_internal_huge),
SUBTEST(igt_ppgtt_lmem_huge),
}; };
struct drm_file *file; struct drm_file *file;
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment