Commit 8f98d4ba authored by Chris Wilson's avatar Chris Wilson

drm/i915/selftests: Reorder request allocation vs vma pinning

Impose a restraint that we have all vma pinned for a request prior to
its allocation. This is to simplify request construction, and should
facilitate unravelling the lock interdependencies later.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181204141522.13640-3-chris@chris-wilson.co.uk
parent 4377d4e0
......@@ -972,7 +972,6 @@ static int gpu_write(struct i915_vma *vma,
{
struct i915_request *rq;
struct i915_vma *batch;
int flags = 0;
int err;
GEM_BUG_ON(!intel_engine_can_store_dword(engine));
......@@ -981,14 +980,14 @@ static int gpu_write(struct i915_vma *vma,
if (err)
return err;
rq = i915_request_alloc(engine, ctx);
if (IS_ERR(rq))
return PTR_ERR(rq);
batch = gpu_write_dw(vma, dword * sizeof(u32), value);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto err_request;
if (IS_ERR(batch))
return PTR_ERR(batch);
rq = i915_request_alloc(engine, ctx);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_batch;
}
err = i915_vma_move_to_active(batch, rq, 0);
......@@ -996,21 +995,21 @@ static int gpu_write(struct i915_vma *vma,
goto err_request;
i915_gem_object_set_active_reference(batch->obj);
i915_vma_unpin(batch);
i915_vma_close(batch);
err = engine->emit_bb_start(rq,
batch->node.start, batch->node.size,
flags);
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
if (err)
goto err_request;
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
err = engine->emit_bb_start(rq,
batch->node.start, batch->node.size,
0);
err_request:
if (err)
i915_request_skip(rq, err);
err_request:
i915_request_add(rq);
err_batch:
i915_vma_unpin(batch);
i915_vma_close(batch);
return err;
}
......
......@@ -68,48 +68,65 @@ static u64 hws_address(const struct i915_vma *hws,
return hws->node.start + seqno_offset(rq->fence.context);
}
static int emit_recurse_batch(struct igt_spinner *spin,
struct i915_request *rq,
u32 arbitration_command)
static int move_to_active(struct i915_vma *vma,
struct i915_request *rq,
unsigned int flags)
{
struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
int err;
err = i915_vma_move_to_active(vma, rq, flags);
if (err)
return err;
if (!i915_gem_object_has_active_reference(vma->obj)) {
i915_gem_object_get(vma->obj);
i915_gem_object_set_active_reference(vma->obj);
}
return 0;
}
struct i915_request *
igt_spinner_create_request(struct igt_spinner *spin,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
u32 arbitration_command)
{
struct i915_address_space *vm = &ctx->ppgtt->vm;
struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
u32 *batch;
int err;
vma = i915_vma_instance(spin->obj, vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
return ERR_CAST(vma);
hws = i915_vma_instance(spin->hws, vm, NULL);
if (IS_ERR(hws))
return PTR_ERR(hws);
return ERR_CAST(hws);
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err)
return err;
return ERR_PTR(err);
err = i915_vma_pin(hws, 0, 0, PIN_USER);
if (err)
goto unpin_vma;
err = i915_vma_move_to_active(vma, rq, 0);
if (err)
rq = i915_request_alloc(engine, ctx);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto unpin_hws;
if (!i915_gem_object_has_active_reference(vma->obj)) {
i915_gem_object_get(vma->obj);
i915_gem_object_set_active_reference(vma->obj);
}
err = i915_vma_move_to_active(hws, rq, 0);
err = move_to_active(vma, rq, 0);
if (err)
goto unpin_hws;
goto cancel_rq;
if (!i915_gem_object_has_active_reference(hws->obj)) {
i915_gem_object_get(hws->obj);
i915_gem_object_set_active_reference(hws->obj);
}
err = move_to_active(hws, rq, 0);
if (err)
goto cancel_rq;
batch = spin->batch;
......@@ -127,35 +144,18 @@ static int emit_recurse_batch(struct igt_spinner *spin,
i915_gem_chipset_flush(spin->i915);
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
cancel_rq:
if (err) {
i915_request_skip(rq, err);
i915_request_add(rq);
}
unpin_hws:
i915_vma_unpin(hws);
unpin_vma:
i915_vma_unpin(vma);
return err;
}
struct i915_request *
igt_spinner_create_request(struct igt_spinner *spin,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
u32 arbitration_command)
{
struct i915_request *rq;
int err;
rq = i915_request_alloc(engine, ctx);
if (IS_ERR(rq))
return rq;
err = emit_recurse_batch(spin, rq, arbitration_command);
if (err) {
i915_request_add(rq);
return ERR_PTR(err);
}
return rq;
return err ? ERR_PTR(err) : rq;
}
static u32
......
......@@ -103,52 +103,87 @@ static u64 hws_address(const struct i915_vma *hws,
return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context);
}
static int emit_recurse_batch(struct hang *h,
struct i915_request *rq)
static int move_to_active(struct i915_vma *vma,
struct i915_request *rq,
unsigned int flags)
{
int err;
err = i915_vma_move_to_active(vma, rq, flags);
if (err)
return err;
if (!i915_gem_object_has_active_reference(vma->obj)) {
i915_gem_object_get(vma->obj);
i915_gem_object_set_active_reference(vma->obj);
}
return 0;
}
static struct i915_request *
hang_create_request(struct hang *h, struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = h->i915;
struct i915_address_space *vm =
rq->gem_context->ppgtt ?
&rq->gem_context->ppgtt->vm :
&i915->ggtt.vm;
h->ctx->ppgtt ? &h->ctx->ppgtt->vm : &i915->ggtt.vm;
struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
unsigned int flags;
u32 *batch;
int err;
if (i915_gem_object_is_active(h->obj)) {
struct drm_i915_gem_object *obj;
void *vaddr;
obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE);
if (IS_ERR(obj))
return ERR_CAST(obj);
vaddr = i915_gem_object_pin_map(obj,
i915_coherent_map_type(h->i915));
if (IS_ERR(vaddr)) {
i915_gem_object_put(obj);
return ERR_CAST(vaddr);
}
i915_gem_object_unpin_map(h->obj);
i915_gem_object_put(h->obj);
h->obj = obj;
h->batch = vaddr;
}
vma = i915_vma_instance(h->obj, vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
return ERR_CAST(vma);
hws = i915_vma_instance(h->hws, vm, NULL);
if (IS_ERR(hws))
return PTR_ERR(hws);
return ERR_CAST(hws);
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err)
return err;
return ERR_PTR(err);
err = i915_vma_pin(hws, 0, 0, PIN_USER);
if (err)
goto unpin_vma;
err = i915_vma_move_to_active(vma, rq, 0);
if (err)
rq = i915_request_alloc(engine, h->ctx);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto unpin_hws;
if (!i915_gem_object_has_active_reference(vma->obj)) {
i915_gem_object_get(vma->obj);
i915_gem_object_set_active_reference(vma->obj);
}
err = i915_vma_move_to_active(hws, rq, 0);
err = move_to_active(vma, rq, 0);
if (err)
goto unpin_hws;
goto cancel_rq;
if (!i915_gem_object_has_active_reference(hws->obj)) {
i915_gem_object_get(hws->obj);
i915_gem_object_set_active_reference(hws->obj);
}
err = move_to_active(hws, rq, 0);
if (err)
goto cancel_rq;
batch = h->batch;
if (INTEL_GEN(i915) >= 8) {
......@@ -213,52 +248,16 @@ static int emit_recurse_batch(struct hang *h,
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
cancel_rq:
if (err) {
i915_request_skip(rq, err);
i915_request_add(rq);
}
unpin_hws:
i915_vma_unpin(hws);
unpin_vma:
i915_vma_unpin(vma);
return err;
}
static struct i915_request *
hang_create_request(struct hang *h, struct intel_engine_cs *engine)
{
struct i915_request *rq;
int err;
if (i915_gem_object_is_active(h->obj)) {
struct drm_i915_gem_object *obj;
void *vaddr;
obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE);
if (IS_ERR(obj))
return ERR_CAST(obj);
vaddr = i915_gem_object_pin_map(obj,
i915_coherent_map_type(h->i915));
if (IS_ERR(vaddr)) {
i915_gem_object_put(obj);
return ERR_CAST(vaddr);
}
i915_gem_object_unpin_map(h->obj);
i915_gem_object_put(h->obj);
h->obj = obj;
h->batch = vaddr;
}
rq = i915_request_alloc(engine, h->ctx);
if (IS_ERR(rq))
return rq;
err = emit_recurse_batch(h, rq);
if (err) {
i915_request_add(rq);
return ERR_PTR(err);
}
return rq;
return err ? ERR_PTR(err) : rq;
}
static u32 hws_seqno(const struct hang *h, const struct i915_request *rq)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment