Commit f6844a85 authored by Chris Wilson's avatar Chris Wilson

drm/i915/selftests: Replace opencoded clflush with drm_clflush_virt_range

We occasionally see that the clflush prior to a read of GPU data is
returning stale data, reminiscent of much earlier bugs fixed by adding a
second clflush for serialisation. As drm_clflush_virt_range() already
supplies the workaround, use it rather than open code the clflush
instruction.

References: 396f5d62 ("drm: Restore double clflush on the last partial cacheline")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180730075351.15569-3-chris@chris-wilson.co.uk
parent 39f3be16
...@@ -33,7 +33,8 @@ static int cpu_set(struct drm_i915_gem_object *obj, ...@@ -33,7 +33,8 @@ static int cpu_set(struct drm_i915_gem_object *obj,
{ {
unsigned int needs_clflush; unsigned int needs_clflush;
struct page *page; struct page *page;
u32 *map; void *map;
u32 *cpu;
int err; int err;
err = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush); err = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
...@@ -42,24 +43,19 @@ static int cpu_set(struct drm_i915_gem_object *obj, ...@@ -42,24 +43,19 @@ static int cpu_set(struct drm_i915_gem_object *obj,
page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
map = kmap_atomic(page); map = kmap_atomic(page);
cpu = map + offset_in_page(offset);
if (needs_clflush & CLFLUSH_BEFORE) { if (needs_clflush & CLFLUSH_BEFORE)
mb(); drm_clflush_virt_range(cpu, sizeof(*cpu));
clflush(map+offset_in_page(offset) / sizeof(*map));
mb();
}
map[offset_in_page(offset) / sizeof(*map)] = v; *cpu = v;
if (needs_clflush & CLFLUSH_AFTER) { if (needs_clflush & CLFLUSH_AFTER)
mb(); drm_clflush_virt_range(cpu, sizeof(*cpu));
clflush(map+offset_in_page(offset) / sizeof(*map));
mb();
}
kunmap_atomic(map); kunmap_atomic(map);
i915_gem_obj_finish_shmem_access(obj); i915_gem_obj_finish_shmem_access(obj);
return 0; return 0;
} }
...@@ -69,7 +65,8 @@ static int cpu_get(struct drm_i915_gem_object *obj, ...@@ -69,7 +65,8 @@ static int cpu_get(struct drm_i915_gem_object *obj,
{ {
unsigned int needs_clflush; unsigned int needs_clflush;
struct page *page; struct page *page;
u32 *map; void *map;
u32 *cpu;
int err; int err;
err = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush); err = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
...@@ -78,17 +75,16 @@ static int cpu_get(struct drm_i915_gem_object *obj, ...@@ -78,17 +75,16 @@ static int cpu_get(struct drm_i915_gem_object *obj,
page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
map = kmap_atomic(page); map = kmap_atomic(page);
cpu = map + offset_in_page(offset);
if (needs_clflush & CLFLUSH_BEFORE) { if (needs_clflush & CLFLUSH_BEFORE)
mb(); drm_clflush_virt_range(cpu, sizeof(*cpu));
clflush(map+offset_in_page(offset) / sizeof(*map));
mb();
}
*v = map[offset_in_page(offset) / sizeof(*map)]; *v = *cpu;
kunmap_atomic(map);
kunmap_atomic(map);
i915_gem_obj_finish_shmem_access(obj); i915_gem_obj_finish_shmem_access(obj);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment