Commit 85a9c0bc authored by Chris Wilson's avatar Chris Wilson

drm/i915/selftests: Tweak igt_ggtt_page to speed it up

Reduce the number of GGTT PTE operations to speed the test up, but we
reduce the likelihood of spotting a coherency error in those operations.
However, Broxton is sporadically timing on this test, presumably because
its GGTT operations are all uncached.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171223110407.21402-1-chris@chris-wilson.co.uk
parent ce6e2137
...@@ -1052,35 +1052,38 @@ static int igt_ggtt_page(void *arg) ...@@ -1052,35 +1052,38 @@ static int igt_ggtt_page(void *arg)
memset(&tmp, 0, sizeof(tmp)); memset(&tmp, 0, sizeof(tmp));
err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp, err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp,
1024 * PAGE_SIZE, 0, count * PAGE_SIZE, 0,
I915_COLOR_UNEVICTABLE, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end, 0, ggtt->mappable_end,
DRM_MM_INSERT_LOW); DRM_MM_INSERT_LOW);
if (err) if (err)
goto out_unpin; goto out_unpin;
intel_runtime_pm_get(i915);
for (n = 0; n < count; n++) {
u64 offset = tmp.start + n * PAGE_SIZE;
ggtt->base.insert_page(&ggtt->base,
i915_gem_object_get_dma_address(obj, 0),
offset, I915_CACHE_NONE, 0);
}
order = i915_random_order(count, &prng); order = i915_random_order(count, &prng);
if (!order) { if (!order) {
err = -ENOMEM; err = -ENOMEM;
goto out_remove; goto out_remove;
} }
intel_runtime_pm_get(i915);
for (n = 0; n < count; n++) { for (n = 0; n < count; n++) {
u64 offset = tmp.start + order[n] * PAGE_SIZE; u64 offset = tmp.start + order[n] * PAGE_SIZE;
u32 __iomem *vaddr; u32 __iomem *vaddr;
ggtt->base.insert_page(&ggtt->base,
i915_gem_object_get_dma_address(obj, 0),
offset, I915_CACHE_NONE, 0);
vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset); vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
iowrite32(n, vaddr + n); iowrite32(n, vaddr + n);
io_mapping_unmap_atomic(vaddr); io_mapping_unmap_atomic(vaddr);
wmb();
ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
} }
i915_gem_flush_ggtt_writes(i915);
i915_random_reorder(order, count, &prng); i915_random_reorder(order, count, &prng);
for (n = 0; n < count; n++) { for (n = 0; n < count; n++) {
...@@ -1088,16 +1091,10 @@ static int igt_ggtt_page(void *arg) ...@@ -1088,16 +1091,10 @@ static int igt_ggtt_page(void *arg)
u32 __iomem *vaddr; u32 __iomem *vaddr;
u32 val; u32 val;
ggtt->base.insert_page(&ggtt->base,
i915_gem_object_get_dma_address(obj, 0),
offset, I915_CACHE_NONE, 0);
vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset); vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
val = ioread32(vaddr + n); val = ioread32(vaddr + n);
io_mapping_unmap_atomic(vaddr); io_mapping_unmap_atomic(vaddr);
ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
if (val != n) { if (val != n) {
pr_err("insert page failed: found %d, expected %d\n", pr_err("insert page failed: found %d, expected %d\n",
val, n); val, n);
...@@ -1105,10 +1102,11 @@ static int igt_ggtt_page(void *arg) ...@@ -1105,10 +1102,11 @@ static int igt_ggtt_page(void *arg)
break; break;
} }
} }
intel_runtime_pm_put(i915);
kfree(order); kfree(order);
out_remove: out_remove:
ggtt->base.clear_range(&ggtt->base, tmp.start, tmp.size);
intel_runtime_pm_put(i915);
drm_mm_remove_node(&tmp); drm_mm_remove_node(&tmp);
out_unpin: out_unpin:
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment