Commit 56d7bd74 authored by Andrzej Hajda's avatar Andrzej Hajda Committed by Matthew Auld

drm/i915/selftests: add igt_vma_move_to_active_unlocked

All calls to i915_vma_move_to_active are surrounded by vma lock
and/or there are multiple local helpers for it in particular tests.
Let's replace it by common helper.
The patch should not introduce functional changes.
Signed-off-by: default avatarAndrzej Hajda <andrzej.hajda@intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221019215906.295296-3-andrzej.hajda@intel.com
parent 2a76fc89
......@@ -13,6 +13,7 @@
#include "gt/intel_gt_regs.h"
#include "gem/i915_gem_lmem.h"
#include "gem/selftests/igt_gem_utils.h"
#include "selftests/igt_flush_test.h"
#include "selftests/mock_drm.h"
#include "selftests/i915_random.h"
......@@ -457,19 +458,6 @@ static int verify_buffer(const struct tiled_blits *t,
return ret;
}
static int move_to_active(struct i915_vma *vma,
struct i915_request *rq,
unsigned int flags)
{
int err;
i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, flags);
i915_vma_unlock(vma);
return err;
}
static int pin_buffer(struct i915_vma *vma, u64 addr)
{
int err;
......@@ -523,11 +511,11 @@ tiled_blit(struct tiled_blits *t,
goto err_bb;
}
err = move_to_active(t->batch, rq, 0);
err = igt_vma_move_to_active_unlocked(t->batch, rq, 0);
if (!err)
err = move_to_active(src->vma, rq, 0);
err = igt_vma_move_to_active_unlocked(src->vma, rq, 0);
if (!err)
err = move_to_active(dst->vma, rq, 0);
err = igt_vma_move_to_active_unlocked(dst->vma, rq, 0);
if (!err)
err = rq->engine->emit_bb_start(rq,
t->batch->node.start,
......
......@@ -9,6 +9,8 @@
#include <linux/types.h>
#include "i915_vma.h"
struct i915_request;
struct i915_gem_context;
struct i915_vma;
......@@ -29,4 +31,16 @@ int igt_gpu_fill_dw(struct intel_context *ce,
struct i915_vma *vma, u64 offset,
unsigned long count, u32 val);
static inline int __must_check
igt_vma_move_to_active_unlocked(struct i915_vma *vma, struct i915_request *rq,
unsigned int flags)
{
int err;
i915_vma_lock(vma);
err = _i915_vma_move_to_active(vma, rq, &rq->fence, flags);
i915_vma_unlock(vma);
return err;
}
#endif /* __IGT_GEM_UTILS_H__ */
......@@ -99,19 +99,6 @@ static u64 hws_address(const struct i915_vma *hws,
return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context);
}
static int move_to_active(struct i915_vma *vma,
struct i915_request *rq,
unsigned int flags)
{
int err;
i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, flags);
i915_vma_unlock(vma);
return err;
}
static struct i915_request *
hang_create_request(struct hang *h, struct intel_engine_cs *engine)
{
......@@ -172,11 +159,11 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
goto unpin_hws;
}
err = move_to_active(vma, rq, 0);
err = igt_vma_move_to_active_unlocked(vma, rq, 0);
if (err)
goto cancel_rq;
err = move_to_active(hws, rq, 0);
err = igt_vma_move_to_active_unlocked(hws, rq, 0);
if (err)
goto cancel_rq;
......@@ -1516,13 +1503,10 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
}
}
i915_vma_lock(arg.vma);
err = i915_vma_move_to_active(arg.vma, rq, flags);
err = igt_vma_move_to_active_unlocked(arg.vma, rq, flags);
if (err)
pr_err("[%s] Move to active failed: %d!\n", engine->name, err);
i915_vma_unlock(arg.vma);
if (flags & EXEC_OBJECT_NEEDS_FENCE)
i915_vma_unpin_fence(arg.vma);
i915_vma_unpin(arg.vma);
......
......@@ -1049,19 +1049,6 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
return batch;
}
static int move_to_active(struct i915_request *rq,
struct i915_vma *vma,
unsigned int flags)
{
int err;
i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, flags);
i915_vma_unlock(vma);
return err;
}
static struct i915_request *
record_registers(struct intel_context *ce,
struct i915_vma *before,
......@@ -1087,19 +1074,19 @@ record_registers(struct intel_context *ce,
if (IS_ERR(rq))
goto err_after;
err = move_to_active(rq, before, EXEC_OBJECT_WRITE);
err = igt_vma_move_to_active_unlocked(before, rq, EXEC_OBJECT_WRITE);
if (err)
goto err_rq;
err = move_to_active(rq, b_before, 0);
err = igt_vma_move_to_active_unlocked(b_before, rq, 0);
if (err)
goto err_rq;
err = move_to_active(rq, after, EXEC_OBJECT_WRITE);
err = igt_vma_move_to_active_unlocked(after, rq, EXEC_OBJECT_WRITE);
if (err)
goto err_rq;
err = move_to_active(rq, b_after, 0);
err = igt_vma_move_to_active_unlocked(b_after, rq, 0);
if (err)
goto err_rq;
......@@ -1237,7 +1224,7 @@ static int poison_registers(struct intel_context *ce, u32 poison, u32 *sema)
goto err_batch;
}
err = move_to_active(rq, batch, 0);
err = igt_vma_move_to_active_unlocked(batch, rq, 0);
if (err)
goto err_rq;
......
......@@ -7,6 +7,7 @@
#include "gt/intel_gpu_commands.h"
#include "i915_selftest.h"
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
#include "selftests/igt_reset.h"
#include "selftests/igt_spinner.h"
......
......@@ -119,19 +119,6 @@ static u64 hws_address(const struct i915_vma *hws,
return hws->node.start + seqno_offset(rq->fence.context);
}
static int move_to_active(struct i915_vma *vma,
struct i915_request *rq,
unsigned int flags)
{
int err;
i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, flags);
i915_vma_unlock(vma);
return err;
}
struct i915_request *
igt_spinner_create_request(struct igt_spinner *spin,
struct intel_context *ce,
......@@ -162,11 +149,11 @@ igt_spinner_create_request(struct igt_spinner *spin,
if (IS_ERR(rq))
return ERR_CAST(rq);
err = move_to_active(vma, rq, 0);
err = igt_vma_move_to_active_unlocked(vma, rq, 0);
if (err)
goto cancel_rq;
err = move_to_active(hws, rq, 0);
err = igt_vma_move_to_active_unlocked(hws, rq, 0);
if (err)
goto cancel_rq;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment