Commit 9bad40a2 authored by Chris Wilson's avatar Chris Wilson

drm/i915/selftests: Always flush before unpining after writing

Be consistent, and even when we know we had used a WC, flush the mapped
object after writing into it. The flush understands the mapping type and
will only clflush if !I915_MAP_WC, but will always insert a wmb [sfence]
so that we can be sure that all writes are visible.

v2: Add the unconditional wmb so we are know that we always flush the
writes to memory/HW at that point.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200511141304.599-1-chris@chris-wilson.co.uk
parent b0a997ae
...@@ -78,10 +78,12 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce, ...@@ -78,10 +78,12 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
} while (rem); } while (rem);
*cmd = MI_BATCH_BUFFER_END; *cmd = MI_BATCH_BUFFER_END;
intel_gt_chipset_flush(ce->vm->gt);
i915_gem_object_flush_map(pool->obj);
i915_gem_object_unpin_map(pool->obj); i915_gem_object_unpin_map(pool->obj);
intel_gt_chipset_flush(ce->vm->gt);
batch = i915_vma_instance(pool->obj, ce->vm, NULL); batch = i915_vma_instance(pool->obj, ce->vm, NULL);
if (IS_ERR(batch)) { if (IS_ERR(batch)) {
err = PTR_ERR(batch); err = PTR_ERR(batch);
...@@ -289,10 +291,12 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce, ...@@ -289,10 +291,12 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
} while (rem); } while (rem);
*cmd = MI_BATCH_BUFFER_END; *cmd = MI_BATCH_BUFFER_END;
intel_gt_chipset_flush(ce->vm->gt);
i915_gem_object_flush_map(pool->obj);
i915_gem_object_unpin_map(pool->obj); i915_gem_object_unpin_map(pool->obj);
intel_gt_chipset_flush(ce->vm->gt);
batch = i915_vma_instance(pool->obj, ce->vm, NULL); batch = i915_vma_instance(pool->obj, ce->vm, NULL);
if (IS_ERR(batch)) { if (IS_ERR(batch)) {
err = PTR_ERR(batch); err = PTR_ERR(batch);
......
...@@ -391,6 +391,7 @@ void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, ...@@ -391,6 +391,7 @@ void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
GEM_BUG_ON(range_overflows_t(typeof(obj->base.size), GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
offset, size, obj->base.size)); offset, size, obj->base.size));
wmb(); /* let all previous writes be visible to coherent partners */
obj->mm.dirty = true; obj->mm.dirty = true;
if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
......
...@@ -158,6 +158,8 @@ static int wc_set(struct context *ctx, unsigned long offset, u32 v) ...@@ -158,6 +158,8 @@ static int wc_set(struct context *ctx, unsigned long offset, u32 v)
return PTR_ERR(map); return PTR_ERR(map);
map[offset / sizeof(*map)] = v; map[offset / sizeof(*map)] = v;
__i915_gem_object_flush_map(ctx->obj, offset, sizeof(*map));
i915_gem_object_unpin_map(ctx->obj); i915_gem_object_unpin_map(ctx->obj);
return 0; return 0;
......
...@@ -84,6 +84,7 @@ igt_emit_store_dw(struct i915_vma *vma, ...@@ -84,6 +84,7 @@ igt_emit_store_dw(struct i915_vma *vma,
} }
*cmd = MI_BATCH_BUFFER_END; *cmd = MI_BATCH_BUFFER_END;
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj); i915_gem_object_unpin_map(obj);
intel_gt_chipset_flush(vma->vm->gt); intel_gt_chipset_flush(vma->vm->gt);
......
...@@ -54,6 +54,8 @@ static struct i915_vma *create_wally(struct intel_engine_cs *engine) ...@@ -54,6 +54,8 @@ static struct i915_vma *create_wally(struct intel_engine_cs *engine)
*cs++ = STACK_MAGIC; *cs++ = STACK_MAGIC;
*cs++ = MI_BATCH_BUFFER_END; *cs++ = MI_BATCH_BUFFER_END;
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj); i915_gem_object_unpin_map(obj);
vma->private = intel_context_create(engine); /* dummy residuals */ vma->private = intel_context_create(engine); /* dummy residuals */
......
...@@ -727,6 +727,7 @@ int live_rps_frequency_cs(void *arg) ...@@ -727,6 +727,7 @@ int live_rps_frequency_cs(void *arg)
err_vma: err_vma:
*cancel = MI_BATCH_BUFFER_END; *cancel = MI_BATCH_BUFFER_END;
i915_gem_object_flush_map(vma->obj);
i915_gem_object_unpin_map(vma->obj); i915_gem_object_unpin_map(vma->obj);
i915_vma_unpin(vma); i915_vma_unpin(vma);
i915_vma_put(vma); i915_vma_put(vma);
...@@ -868,6 +869,7 @@ int live_rps_frequency_srm(void *arg) ...@@ -868,6 +869,7 @@ int live_rps_frequency_srm(void *arg)
err_vma: err_vma:
*cancel = MI_BATCH_BUFFER_END; *cancel = MI_BATCH_BUFFER_END;
i915_gem_object_flush_map(vma->obj);
i915_gem_object_unpin_map(vma->obj); i915_gem_object_unpin_map(vma->obj);
i915_vma_unpin(vma); i915_vma_unpin(vma);
i915_vma_put(vma); i915_vma_put(vma);
......
...@@ -816,10 +816,12 @@ static int recursive_batch_resolve(struct i915_vma *batch) ...@@ -816,10 +816,12 @@ static int recursive_batch_resolve(struct i915_vma *batch)
return PTR_ERR(cmd); return PTR_ERR(cmd);
*cmd = MI_BATCH_BUFFER_END; *cmd = MI_BATCH_BUFFER_END;
intel_gt_chipset_flush(batch->vm->gt);
__i915_gem_object_flush_map(batch->obj, 0, sizeof(*cmd));
i915_gem_object_unpin_map(batch->obj); i915_gem_object_unpin_map(batch->obj);
intel_gt_chipset_flush(batch->vm->gt);
return 0; return 0;
} }
...@@ -1060,9 +1062,12 @@ static int live_sequential_engines(void *arg) ...@@ -1060,9 +1062,12 @@ static int live_sequential_engines(void *arg)
I915_MAP_WC); I915_MAP_WC);
if (!IS_ERR(cmd)) { if (!IS_ERR(cmd)) {
*cmd = MI_BATCH_BUFFER_END; *cmd = MI_BATCH_BUFFER_END;
intel_gt_chipset_flush(engine->gt);
__i915_gem_object_flush_map(request[idx]->batch->obj,
0, sizeof(*cmd));
i915_gem_object_unpin_map(request[idx]->batch->obj); i915_gem_object_unpin_map(request[idx]->batch->obj);
intel_gt_chipset_flush(engine->gt);
} }
i915_vma_put(request[idx]->batch); i915_vma_put(request[idx]->batch);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment