Commit 70d6894d authored by Chris Wilson's avatar Chris Wilson

drm/i915: Serialize against vma moves

Make sure that when submitting requests, we always serialize against
potential vma moves and clflushes.

Time for a i915_request_await_vma() interface!
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190819112033.30638-1-chris@chris-wilson.co.uk
parent a1e37b02
...@@ -1192,8 +1192,9 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, ...@@ -1192,8 +1192,9 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto skip_request; goto skip_request;
i915_vma_lock(batch); i915_vma_lock(batch);
GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true)); err = i915_request_await_object(rq, batch->obj, false);
err = i915_vma_move_to_active(batch, rq, 0); if (err == 0)
err = i915_vma_move_to_active(batch, rq, 0);
i915_vma_unlock(batch); i915_vma_unlock(batch);
if (err) if (err)
goto skip_request; goto skip_request;
......
...@@ -106,7 +106,9 @@ int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq) ...@@ -106,7 +106,9 @@ int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq)
int err; int err;
i915_vma_lock(vma); i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, 0); err = i915_request_await_object(rq, vma->obj, false);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, 0);
i915_vma_unlock(vma); i915_vma_unlock(vma);
if (unlikely(err)) if (unlikely(err))
return err; return err;
...@@ -171,7 +173,9 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj, ...@@ -171,7 +173,9 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
} }
i915_vma_lock(vma); i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); err = i915_request_await_object(rq, vma->obj, true);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma); i915_vma_unlock(vma);
if (unlikely(err)) if (unlikely(err))
goto out_request; goto out_request;
......
...@@ -228,7 +228,9 @@ static int gpu_set(struct drm_i915_gem_object *obj, ...@@ -228,7 +228,9 @@ static int gpu_set(struct drm_i915_gem_object *obj,
intel_ring_advance(rq, cs); intel_ring_advance(rq, cs);
i915_vma_lock(vma); i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); err = i915_request_await_object(rq, vma->obj, true);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma); i915_vma_unlock(vma);
i915_vma_unpin(vma); i915_vma_unpin(vma);
......
...@@ -666,13 +666,17 @@ emit_rpcs_query(struct drm_i915_gem_object *obj, ...@@ -666,13 +666,17 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
goto err_request; goto err_request;
i915_vma_lock(batch); i915_vma_lock(batch);
err = i915_vma_move_to_active(batch, rq, 0); err = i915_request_await_object(rq, batch->obj, false);
if (err == 0)
err = i915_vma_move_to_active(batch, rq, 0);
i915_vma_unlock(batch); i915_vma_unlock(batch);
if (err) if (err)
goto skip_request; goto skip_request;
i915_vma_lock(vma); i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); err = i915_request_await_object(rq, vma->obj, true);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma); i915_vma_unlock(vma);
if (err) if (err)
goto skip_request; goto skip_request;
...@@ -1218,7 +1222,9 @@ static int write_to_scratch(struct i915_gem_context *ctx, ...@@ -1218,7 +1222,9 @@ static int write_to_scratch(struct i915_gem_context *ctx,
goto err_request; goto err_request;
i915_vma_lock(vma); i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, 0); err = i915_request_await_object(rq, vma->obj, false);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, 0);
i915_vma_unlock(vma); i915_vma_unlock(vma);
if (err) if (err)
goto skip_request; goto skip_request;
...@@ -1315,7 +1321,9 @@ static int read_from_scratch(struct i915_gem_context *ctx, ...@@ -1315,7 +1321,9 @@ static int read_from_scratch(struct i915_gem_context *ctx,
goto err_request; goto err_request;
i915_vma_lock(vma); i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); err = i915_request_await_object(rq, vma->obj, true);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma); i915_vma_unlock(vma);
if (err) if (err)
goto skip_request; goto skip_request;
......
...@@ -351,7 +351,10 @@ static int make_obj_busy(struct drm_i915_gem_object *obj) ...@@ -351,7 +351,10 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
} }
i915_vma_lock(vma); i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); err = i915_request_await_object(rq, vma->obj, true);
if (err == 0)
err = i915_vma_move_to_active(vma, rq,
EXEC_OBJECT_WRITE);
i915_vma_unlock(vma); i915_vma_unlock(vma);
i915_request_add(rq); i915_request_add(rq);
......
...@@ -139,13 +139,17 @@ int igt_gpu_fill_dw(struct i915_vma *vma, ...@@ -139,13 +139,17 @@ int igt_gpu_fill_dw(struct i915_vma *vma,
goto err_request; goto err_request;
i915_vma_lock(batch); i915_vma_lock(batch);
err = i915_vma_move_to_active(batch, rq, 0); err = i915_request_await_object(rq, batch->obj, false);
if (err == 0)
err = i915_vma_move_to_active(batch, rq, 0);
i915_vma_unlock(batch); i915_vma_unlock(batch);
if (err) if (err)
goto skip_request; goto skip_request;
i915_vma_lock(vma); i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); err = i915_request_await_object(rq, vma->obj, true);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma); i915_vma_unlock(vma);
if (err) if (err)
goto skip_request; goto skip_request;
......
...@@ -222,7 +222,9 @@ int intel_renderstate_emit(struct i915_request *rq) ...@@ -222,7 +222,9 @@ int intel_renderstate_emit(struct i915_request *rq)
} }
i915_vma_lock(so.vma); i915_vma_lock(so.vma);
err = i915_vma_move_to_active(so.vma, rq, 0); err = i915_request_await_object(rq, so.vma->obj, false);
if (err == 0)
err = i915_vma_move_to_active(so.vma, rq, 0);
i915_vma_unlock(so.vma); i915_vma_unlock(so.vma);
err_unpin: err_unpin:
i915_vma_unpin(so.vma); i915_vma_unpin(so.vma);
......
...@@ -118,7 +118,10 @@ static int move_to_active(struct i915_vma *vma, ...@@ -118,7 +118,10 @@ static int move_to_active(struct i915_vma *vma,
int err; int err;
i915_vma_lock(vma); i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, flags); err = i915_request_await_object(rq, vma->obj,
flags & EXEC_OBJECT_WRITE);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, flags);
i915_vma_unlock(vma); i915_vma_unlock(vma);
return err; return err;
...@@ -1237,7 +1240,10 @@ static int __igt_reset_evict_vma(struct intel_gt *gt, ...@@ -1237,7 +1240,10 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
} }
i915_vma_lock(arg.vma); i915_vma_lock(arg.vma);
err = i915_vma_move_to_active(arg.vma, rq, flags); err = i915_request_await_object(rq, arg.vma->obj,
flags & EXEC_OBJECT_WRITE);
if (err == 0)
err = i915_vma_move_to_active(arg.vma, rq, flags);
i915_vma_unlock(arg.vma); i915_vma_unlock(arg.vma);
if (flags & EXEC_OBJECT_NEEDS_FENCE) if (flags & EXEC_OBJECT_NEEDS_FENCE)
......
...@@ -1459,11 +1459,13 @@ static int smoke_submit(struct preempt_smoke *smoke, ...@@ -1459,11 +1459,13 @@ static int smoke_submit(struct preempt_smoke *smoke,
if (vma) { if (vma) {
i915_vma_lock(vma); i915_vma_lock(vma);
err = rq->engine->emit_bb_start(rq, err = i915_request_await_object(rq, vma->obj, false);
vma->node.start,
PAGE_SIZE, 0);
if (!err) if (!err)
err = i915_vma_move_to_active(vma, rq, 0); err = i915_vma_move_to_active(vma, rq, 0);
if (!err)
err = rq->engine->emit_bb_start(rq,
vma->node.start,
PAGE_SIZE, 0);
i915_vma_unlock(vma); i915_vma_unlock(vma);
} }
......
...@@ -113,7 +113,9 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) ...@@ -113,7 +113,9 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
} }
i915_vma_lock(vma); i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); err = i915_request_await_object(rq, vma->obj, true);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma); i915_vma_unlock(vma);
if (err) if (err)
goto err_req; goto err_req;
......
...@@ -876,7 +876,9 @@ static int live_all_engines(void *arg) ...@@ -876,7 +876,9 @@ static int live_all_engines(void *arg)
request[id]->batch = batch; request[id]->batch = batch;
i915_vma_lock(batch); i915_vma_lock(batch);
err = i915_vma_move_to_active(batch, request[id], 0); err = i915_request_await_object(request[id], batch->obj, 0);
if (err == 0)
err = i915_vma_move_to_active(batch, request[id], 0);
i915_vma_unlock(batch); i915_vma_unlock(batch);
GEM_BUG_ON(err); GEM_BUG_ON(err);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment