Commit 15b6c924 authored by Maarten Lankhorst's avatar Maarten Lankhorst Committed by Joonas Lahtinen

drm/i915: Move i915_vma_lock in the selftests to avoid lock inversion, v3.

Make sure vma_lock is not used as inner lock when kernel context is used,
and add ww handling where appropriate.

Ensure that execbuf selftests keep passing by using ww handling.

Changes since v2:
- Fix i915_gem_context finally.
Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: default avatarThomas Hellström <thomas.hellstrom@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200819140904.1708856-22-maarten.lankhorst@linux.intel.comSigned-off-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
parent 8a929c9e
...@@ -201,25 +201,25 @@ static int gpu_set(struct context *ctx, unsigned long offset, u32 v) ...@@ -201,25 +201,25 @@ static int gpu_set(struct context *ctx, unsigned long offset, u32 v)
i915_gem_object_lock(ctx->obj, NULL); i915_gem_object_lock(ctx->obj, NULL);
err = i915_gem_object_set_to_gtt_domain(ctx->obj, true); err = i915_gem_object_set_to_gtt_domain(ctx->obj, true);
i915_gem_object_unlock(ctx->obj);
if (err) if (err)
return err; goto out_unlock;
vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, 0); vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, 0);
if (IS_ERR(vma)) if (IS_ERR(vma)) {
return PTR_ERR(vma); err = PTR_ERR(vma);
goto out_unlock;
}
rq = intel_engine_create_kernel_request(ctx->engine); rq = intel_engine_create_kernel_request(ctx->engine);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
i915_vma_unpin(vma); err = PTR_ERR(rq);
return PTR_ERR(rq); goto out_unpin;
} }
cs = intel_ring_begin(rq, 4); cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs)) { if (IS_ERR(cs)) {
i915_request_add(rq); err = PTR_ERR(cs);
i915_vma_unpin(vma); goto out_rq;
return PTR_ERR(cs);
} }
if (INTEL_GEN(ctx->engine->i915) >= 8) { if (INTEL_GEN(ctx->engine->i915) >= 8) {
...@@ -240,14 +240,16 @@ static int gpu_set(struct context *ctx, unsigned long offset, u32 v) ...@@ -240,14 +240,16 @@ static int gpu_set(struct context *ctx, unsigned long offset, u32 v)
} }
intel_ring_advance(rq, cs); intel_ring_advance(rq, cs);
i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, true); err = i915_request_await_object(rq, vma->obj, true);
if (err == 0) if (err == 0)
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
i915_vma_unpin(vma);
out_rq:
i915_request_add(rq); i915_request_add(rq);
out_unpin:
i915_vma_unpin(vma);
out_unlock:
i915_gem_object_unlock(ctx->obj);
return err; return err;
} }
......
...@@ -893,24 +893,15 @@ static int igt_shared_ctx_exec(void *arg) ...@@ -893,24 +893,15 @@ static int igt_shared_ctx_exec(void *arg)
return err; return err;
} }
static struct i915_vma *rpcs_query_batch(struct i915_vma *vma) static int rpcs_query_batch(struct drm_i915_gem_object *rpcs, struct i915_vma *vma)
{ {
struct drm_i915_gem_object *obj;
u32 *cmd; u32 *cmd;
int err;
if (INTEL_GEN(vma->vm->i915) < 8)
return ERR_PTR(-EINVAL);
obj = i915_gem_object_create_internal(vma->vm->i915, PAGE_SIZE); GEM_BUG_ON(INTEL_GEN(vma->vm->i915) < 8);
if (IS_ERR(obj))
return ERR_CAST(obj);
cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); cmd = i915_gem_object_pin_map(rpcs, I915_MAP_WB);
if (IS_ERR(cmd)) { if (IS_ERR(cmd))
err = PTR_ERR(cmd); return PTR_ERR(cmd);
goto err;
}
*cmd++ = MI_STORE_REGISTER_MEM_GEN8; *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
*cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE); *cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE);
...@@ -918,26 +909,12 @@ static struct i915_vma *rpcs_query_batch(struct i915_vma *vma) ...@@ -918,26 +909,12 @@ static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
*cmd++ = upper_32_bits(vma->node.start); *cmd++ = upper_32_bits(vma->node.start);
*cmd = MI_BATCH_BUFFER_END; *cmd = MI_BATCH_BUFFER_END;
__i915_gem_object_flush_map(obj, 0, 64); __i915_gem_object_flush_map(rpcs, 0, 64);
i915_gem_object_unpin_map(obj); i915_gem_object_unpin_map(rpcs);
intel_gt_chipset_flush(vma->vm->gt); intel_gt_chipset_flush(vma->vm->gt);
vma = i915_vma_instance(obj, vma->vm, NULL); return 0;
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
}
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err)
goto err;
return vma;
err:
i915_gem_object_put(obj);
return ERR_PTR(err);
} }
static int static int
...@@ -945,32 +922,52 @@ emit_rpcs_query(struct drm_i915_gem_object *obj, ...@@ -945,32 +922,52 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
struct intel_context *ce, struct intel_context *ce,
struct i915_request **rq_out) struct i915_request **rq_out)
{ {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_request *rq; struct i915_request *rq;
struct i915_gem_ww_ctx ww;
struct i915_vma *batch; struct i915_vma *batch;
struct i915_vma *vma; struct i915_vma *vma;
struct drm_i915_gem_object *rpcs;
int err; int err;
GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine)); GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
if (INTEL_GEN(i915) < 8)
return -EINVAL;
vma = i915_vma_instance(obj, ce->vm, NULL); vma = i915_vma_instance(obj, ce->vm, NULL);
if (IS_ERR(vma)) if (IS_ERR(vma))
return PTR_ERR(vma); return PTR_ERR(vma);
i915_gem_object_lock(obj, NULL); rpcs = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(rpcs))
return PTR_ERR(rpcs);
batch = i915_vma_instance(rpcs, ce->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto err_put;
}
i915_gem_ww_ctx_init(&ww, false);
retry:
err = i915_gem_object_lock(obj, &ww);
if (!err)
err = i915_gem_object_lock(rpcs, &ww);
if (!err)
err = i915_gem_object_set_to_gtt_domain(obj, false); err = i915_gem_object_set_to_gtt_domain(obj, false);
i915_gem_object_unlock(obj); if (!err)
err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
if (err) if (err)
return err; goto err_put;
err = i915_vma_pin(vma, 0, 0, PIN_USER); err = i915_vma_pin_ww(batch, &ww, 0, 0, PIN_USER);
if (err) if (err)
return err;
batch = rpcs_query_batch(vma);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto err_vma; goto err_vma;
}
err = rpcs_query_batch(rpcs, vma);
if (err)
goto err_batch;
rq = i915_request_create(ce); rq = i915_request_create(ce);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
...@@ -978,19 +975,15 @@ emit_rpcs_query(struct drm_i915_gem_object *obj, ...@@ -978,19 +975,15 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
goto err_batch; goto err_batch;
} }
i915_vma_lock(batch);
err = i915_request_await_object(rq, batch->obj, false); err = i915_request_await_object(rq, batch->obj, false);
if (err == 0) if (err == 0)
err = i915_vma_move_to_active(batch, rq, 0); err = i915_vma_move_to_active(batch, rq, 0);
i915_vma_unlock(batch);
if (err) if (err)
goto skip_request; goto skip_request;
i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, true); err = i915_request_await_object(rq, vma->obj, true);
if (err == 0) if (err == 0)
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
if (err) if (err)
goto skip_request; goto skip_request;
...@@ -1006,23 +999,24 @@ emit_rpcs_query(struct drm_i915_gem_object *obj, ...@@ -1006,23 +999,24 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
if (err) if (err)
goto skip_request; goto skip_request;
i915_vma_unpin_and_release(&batch, 0);
i915_vma_unpin(vma);
*rq_out = i915_request_get(rq); *rq_out = i915_request_get(rq);
i915_request_add(rq);
return 0;
skip_request: skip_request:
if (err)
i915_request_set_error_once(rq, err); i915_request_set_error_once(rq, err);
i915_request_add(rq); i915_request_add(rq);
err_batch: err_batch:
i915_vma_unpin_and_release(&batch, 0); i915_vma_unpin(batch);
err_vma: err_vma:
i915_vma_unpin(vma); i915_vma_unpin(vma);
err_put:
if (err == -EDEADLK) {
err = i915_gem_ww_ctx_backoff(&ww);
if (!err)
goto retry;
}
i915_gem_ww_ctx_fini(&ww);
i915_gem_object_put(rpcs);
return err; return err;
} }
......
...@@ -528,31 +528,42 @@ static int make_obj_busy(struct drm_i915_gem_object *obj) ...@@ -528,31 +528,42 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
for_each_uabi_engine(engine, i915) { for_each_uabi_engine(engine, i915) {
struct i915_request *rq; struct i915_request *rq;
struct i915_vma *vma; struct i915_vma *vma;
struct i915_gem_ww_ctx ww;
int err; int err;
vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) if (IS_ERR(vma))
return PTR_ERR(vma); return PTR_ERR(vma);
err = i915_vma_pin(vma, 0, 0, PIN_USER); i915_gem_ww_ctx_init(&ww, false);
retry:
err = i915_gem_object_lock(obj, &ww);
if (!err)
err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
if (err) if (err)
return err; goto err;
rq = intel_engine_create_kernel_request(engine); rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
i915_vma_unpin(vma); err = PTR_ERR(rq);
return PTR_ERR(rq); goto err_unpin;
} }
i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, true); err = i915_request_await_object(rq, vma->obj, true);
if (err == 0) if (err == 0)
err = i915_vma_move_to_active(vma, rq, err = i915_vma_move_to_active(vma, rq,
EXEC_OBJECT_WRITE); EXEC_OBJECT_WRITE);
i915_vma_unlock(vma);
i915_request_add(rq); i915_request_add(rq);
err_unpin:
i915_vma_unpin(vma); i915_vma_unpin(vma);
err:
if (err == -EDEADLK) {
err = i915_gem_ww_ctx_backoff(&ww);
if (!err)
goto retry;
}
i915_gem_ww_ctx_fini(&ww);
if (err) if (err)
return err; return err;
} }
...@@ -1123,6 +1134,7 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915, ...@@ -1123,6 +1134,7 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915,
for_each_uabi_engine(engine, i915) { for_each_uabi_engine(engine, i915) {
struct i915_request *rq; struct i915_request *rq;
struct i915_vma *vma; struct i915_vma *vma;
struct i915_gem_ww_ctx ww;
vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL); vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
...@@ -1130,9 +1142,13 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915, ...@@ -1130,9 +1142,13 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915,
goto out_unmap; goto out_unmap;
} }
err = i915_vma_pin(vma, 0, 0, PIN_USER); i915_gem_ww_ctx_init(&ww, false);
retry:
err = i915_gem_object_lock(obj, &ww);
if (!err)
err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
if (err) if (err)
goto out_unmap; goto out_ww;
rq = i915_request_create(engine->kernel_context); rq = i915_request_create(engine->kernel_context);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
...@@ -1140,11 +1156,9 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915, ...@@ -1140,11 +1156,9 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915,
goto out_unpin; goto out_unpin;
} }
i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, false); err = i915_request_await_object(rq, vma->obj, false);
if (err == 0) if (err == 0)
err = i915_vma_move_to_active(vma, rq, 0); err = i915_vma_move_to_active(vma, rq, 0);
i915_vma_unlock(vma);
err = engine->emit_bb_start(rq, vma->node.start, 0, 0); err = engine->emit_bb_start(rq, vma->node.start, 0, 0);
i915_request_get(rq); i915_request_get(rq);
...@@ -1166,6 +1180,13 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915, ...@@ -1166,6 +1180,13 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915,
out_unpin: out_unpin:
i915_vma_unpin(vma); i915_vma_unpin(vma);
out_ww:
if (err == -EDEADLK) {
err = i915_gem_ww_ctx_backoff(&ww);
if (!err)
goto retry;
}
i915_gem_ww_ctx_fini(&ww);
if (err) if (err)
goto out_unmap; goto out_unmap;
} }
......
...@@ -77,20 +77,20 @@ create_spin_counter(struct intel_engine_cs *engine, ...@@ -77,20 +77,20 @@ create_spin_counter(struct intel_engine_cs *engine,
vma = i915_vma_instance(obj, vm, NULL); vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
i915_gem_object_put(obj); err = PTR_ERR(vma);
return vma; goto err_put;
} }
err = i915_vma_pin(vma, 0, 0, PIN_USER); err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err) { if (err)
i915_vma_put(vma); goto err_unlock;
return ERR_PTR(err);
} i915_vma_lock(vma);
base = i915_gem_object_pin_map(obj, I915_MAP_WC); base = i915_gem_object_pin_map(obj, I915_MAP_WC);
if (IS_ERR(base)) { if (IS_ERR(base)) {
i915_gem_object_put(obj); err = PTR_ERR(base);
return ERR_CAST(base); goto err_unpin;
} }
cs = base; cs = base;
...@@ -134,6 +134,14 @@ create_spin_counter(struct intel_engine_cs *engine, ...@@ -134,6 +134,14 @@ create_spin_counter(struct intel_engine_cs *engine,
*cancel = base + loop; *cancel = base + loop;
*counter = srm ? memset32(base + end, 0, 1) : NULL; *counter = srm ? memset32(base + end, 0, 1) : NULL;
return vma; return vma;
err_unpin:
i915_vma_unpin(vma);
err_unlock:
i915_vma_unlock(vma);
err_put:
i915_gem_object_put(obj);
return ERR_PTR(err);
} }
static u8 wait_for_freq(struct intel_rps *rps, u8 freq, int timeout_ms) static u8 wait_for_freq(struct intel_rps *rps, u8 freq, int timeout_ms)
...@@ -639,7 +647,6 @@ int live_rps_frequency_cs(void *arg) ...@@ -639,7 +647,6 @@ int live_rps_frequency_cs(void *arg)
goto err_vma; goto err_vma;
} }
i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, false); err = i915_request_await_object(rq, vma->obj, false);
if (!err) if (!err)
err = i915_vma_move_to_active(vma, rq, 0); err = i915_vma_move_to_active(vma, rq, 0);
...@@ -647,7 +654,6 @@ int live_rps_frequency_cs(void *arg) ...@@ -647,7 +654,6 @@ int live_rps_frequency_cs(void *arg)
err = rq->engine->emit_bb_start(rq, err = rq->engine->emit_bb_start(rq,
vma->node.start, vma->node.start,
PAGE_SIZE, 0); PAGE_SIZE, 0);
i915_vma_unlock(vma);
i915_request_add(rq); i915_request_add(rq);
if (err) if (err)
goto err_vma; goto err_vma;
...@@ -708,6 +714,7 @@ int live_rps_frequency_cs(void *arg) ...@@ -708,6 +714,7 @@ int live_rps_frequency_cs(void *arg)
i915_gem_object_flush_map(vma->obj); i915_gem_object_flush_map(vma->obj);
i915_gem_object_unpin_map(vma->obj); i915_gem_object_unpin_map(vma->obj);
i915_vma_unpin(vma); i915_vma_unpin(vma);
i915_vma_unlock(vma);
i915_vma_put(vma); i915_vma_put(vma);
st_engine_heartbeat_enable(engine); st_engine_heartbeat_enable(engine);
...@@ -781,7 +788,6 @@ int live_rps_frequency_srm(void *arg) ...@@ -781,7 +788,6 @@ int live_rps_frequency_srm(void *arg)
goto err_vma; goto err_vma;
} }
i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, false); err = i915_request_await_object(rq, vma->obj, false);
if (!err) if (!err)
err = i915_vma_move_to_active(vma, rq, 0); err = i915_vma_move_to_active(vma, rq, 0);
...@@ -789,7 +795,6 @@ int live_rps_frequency_srm(void *arg) ...@@ -789,7 +795,6 @@ int live_rps_frequency_srm(void *arg)
err = rq->engine->emit_bb_start(rq, err = rq->engine->emit_bb_start(rq,
vma->node.start, vma->node.start,
PAGE_SIZE, 0); PAGE_SIZE, 0);
i915_vma_unlock(vma);
i915_request_add(rq); i915_request_add(rq);
if (err) if (err)
goto err_vma; goto err_vma;
...@@ -849,6 +854,7 @@ int live_rps_frequency_srm(void *arg) ...@@ -849,6 +854,7 @@ int live_rps_frequency_srm(void *arg)
i915_gem_object_flush_map(vma->obj); i915_gem_object_flush_map(vma->obj);
i915_gem_object_unpin_map(vma->obj); i915_gem_object_unpin_map(vma->obj);
i915_vma_unpin(vma); i915_vma_unpin(vma);
i915_vma_unlock(vma);
i915_vma_put(vma); i915_vma_put(vma);
st_engine_heartbeat_enable(engine); st_engine_heartbeat_enable(engine);
......
...@@ -862,6 +862,8 @@ static int live_all_engines(void *arg) ...@@ -862,6 +862,8 @@ static int live_all_engines(void *arg)
goto out_free; goto out_free;
} }
i915_vma_lock(batch);
idx = 0; idx = 0;
for_each_uabi_engine(engine, i915) { for_each_uabi_engine(engine, i915) {
request[idx] = intel_engine_create_kernel_request(engine); request[idx] = intel_engine_create_kernel_request(engine);
...@@ -872,11 +874,9 @@ static int live_all_engines(void *arg) ...@@ -872,11 +874,9 @@ static int live_all_engines(void *arg)
goto out_request; goto out_request;
} }
i915_vma_lock(batch);
err = i915_request_await_object(request[idx], batch->obj, 0); err = i915_request_await_object(request[idx], batch->obj, 0);
if (err == 0) if (err == 0)
err = i915_vma_move_to_active(batch, request[idx], 0); err = i915_vma_move_to_active(batch, request[idx], 0);
i915_vma_unlock(batch);
GEM_BUG_ON(err); GEM_BUG_ON(err);
err = engine->emit_bb_start(request[idx], err = engine->emit_bb_start(request[idx],
...@@ -891,6 +891,8 @@ static int live_all_engines(void *arg) ...@@ -891,6 +891,8 @@ static int live_all_engines(void *arg)
idx++; idx++;
} }
i915_vma_unlock(batch);
idx = 0; idx = 0;
for_each_uabi_engine(engine, i915) { for_each_uabi_engine(engine, i915) {
if (i915_request_completed(request[idx])) { if (i915_request_completed(request[idx])) {
...@@ -981,12 +983,13 @@ static int live_sequential_engines(void *arg) ...@@ -981,12 +983,13 @@ static int live_sequential_engines(void *arg)
goto out_free; goto out_free;
} }
i915_vma_lock(batch);
request[idx] = intel_engine_create_kernel_request(engine); request[idx] = intel_engine_create_kernel_request(engine);
if (IS_ERR(request[idx])) { if (IS_ERR(request[idx])) {
err = PTR_ERR(request[idx]); err = PTR_ERR(request[idx]);
pr_err("%s: Request allocation failed for %s with err=%d\n", pr_err("%s: Request allocation failed for %s with err=%d\n",
__func__, engine->name, err); __func__, engine->name, err);
goto out_request; goto out_unlock;
} }
if (prev) { if (prev) {
...@@ -996,16 +999,14 @@ static int live_sequential_engines(void *arg) ...@@ -996,16 +999,14 @@ static int live_sequential_engines(void *arg)
i915_request_add(request[idx]); i915_request_add(request[idx]);
pr_err("%s: Request await failed for %s with err=%d\n", pr_err("%s: Request await failed for %s with err=%d\n",
__func__, engine->name, err); __func__, engine->name, err);
goto out_request; goto out_unlock;
} }
} }
i915_vma_lock(batch);
err = i915_request_await_object(request[idx], err = i915_request_await_object(request[idx],
batch->obj, false); batch->obj, false);
if (err == 0) if (err == 0)
err = i915_vma_move_to_active(batch, request[idx], 0); err = i915_vma_move_to_active(batch, request[idx], 0);
i915_vma_unlock(batch);
GEM_BUG_ON(err); GEM_BUG_ON(err);
err = engine->emit_bb_start(request[idx], err = engine->emit_bb_start(request[idx],
...@@ -1020,6 +1021,11 @@ static int live_sequential_engines(void *arg) ...@@ -1020,6 +1021,11 @@ static int live_sequential_engines(void *arg)
prev = request[idx]; prev = request[idx];
idx++; idx++;
out_unlock:
i915_vma_unlock(batch);
if (err)
goto out_request;
} }
idx = 0; idx = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment