Commit de5825be authored by Chris Wilson's avatar Chris Wilson

drm/i915: Serialise with engine-pm around requests on the kernel_context

As the engine->kernel_context is used within the engine-pm barrier, we
have to be careful when emitting requests outside of the barrier, as the
strict timeline locking rules do not apply. Instead, we must ensure the
engine_park() cannot be entered as we build the request, which is
simplest by taking an explicit engine-pm wakeref around the request
construction.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191125105858.1718307-1-chris@chris-wilson.co.uk
parent da0ef77e
...@@ -70,6 +70,7 @@ ...@@ -70,6 +70,7 @@
#include <drm/i915_drm.h> #include <drm/i915_drm.h>
#include "gt/intel_engine_heartbeat.h" #include "gt/intel_engine_heartbeat.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_user.h" #include "gt/intel_engine_user.h"
#include "gt/intel_lrc_reg.h" #include "gt/intel_lrc_reg.h"
#include "gt/intel_ring.h" #include "gt/intel_ring.h"
...@@ -1265,7 +1266,7 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu) ...@@ -1265,7 +1266,7 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
if (!intel_context_is_pinned(ce)) if (!intel_context_is_pinned(ce))
return 0; return 0;
rq = i915_request_create(ce->engine->kernel_context); rq = intel_engine_create_kernel_request(ce->engine);
if (IS_ERR(rq)) if (IS_ERR(rq))
return PTR_ERR(rq); return PTR_ERR(rq);
......
...@@ -24,6 +24,7 @@ static int __igt_client_fill(struct intel_engine_cs *engine) ...@@ -24,6 +24,7 @@ static int __igt_client_fill(struct intel_engine_cs *engine)
prandom_seed_state(&prng, i915_selftest.random_seed); prandom_seed_state(&prng, i915_selftest.random_seed);
intel_engine_pm_get(engine);
do { do {
const u32 max_block_size = S16_MAX * PAGE_SIZE; const u32 max_block_size = S16_MAX * PAGE_SIZE;
u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng)); u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng));
...@@ -99,6 +100,7 @@ static int __igt_client_fill(struct intel_engine_cs *engine) ...@@ -99,6 +100,7 @@ static int __igt_client_fill(struct intel_engine_cs *engine)
err_flush: err_flush:
if (err == -ENOMEM) if (err == -ENOMEM)
err = 0; err = 0;
intel_engine_pm_put(engine);
return err; return err;
} }
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/prime_numbers.h> #include <linux/prime_numbers.h>
#include "gt/intel_engine_pm.h"
#include "gt/intel_gt.h" #include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h" #include "gt/intel_gt_pm.h"
#include "gt/intel_ring.h" #include "gt/intel_ring.h"
...@@ -200,7 +201,7 @@ static int gpu_set(struct context *ctx, unsigned long offset, u32 v) ...@@ -200,7 +201,7 @@ static int gpu_set(struct context *ctx, unsigned long offset, u32 v)
if (IS_ERR(vma)) if (IS_ERR(vma))
return PTR_ERR(vma); return PTR_ERR(vma);
rq = i915_request_create(ctx->engine->kernel_context); rq = intel_engine_create_kernel_request(ctx->engine);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
i915_vma_unpin(vma); i915_vma_unpin(vma);
return PTR_ERR(rq); return PTR_ERR(rq);
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/prime_numbers.h> #include <linux/prime_numbers.h>
#include "gem/i915_gem_pm.h" #include "gem/i915_gem_pm.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_gt.h" #include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h" #include "gt/intel_gt_requests.h"
#include "gt/intel_reset.h" #include "gt/intel_reset.h"
...@@ -1190,9 +1191,11 @@ __sseu_test(const char *name, ...@@ -1190,9 +1191,11 @@ __sseu_test(const char *name,
struct igt_spinner *spin = NULL; struct igt_spinner *spin = NULL;
int ret; int ret;
intel_engine_pm_get(ce->engine);
ret = __sseu_prepare(name, flags, ce, &spin); ret = __sseu_prepare(name, flags, ce, &spin);
if (ret) if (ret)
return ret; goto out_pm;
ret = intel_context_reconfigure_sseu(ce, sseu); ret = intel_context_reconfigure_sseu(ce, sseu);
if (ret) if (ret)
...@@ -1207,6 +1210,8 @@ __sseu_test(const char *name, ...@@ -1207,6 +1210,8 @@ __sseu_test(const char *name,
igt_spinner_fini(spin); igt_spinner_fini(spin);
kfree(spin); kfree(spin);
} }
out_pm:
intel_engine_pm_put(ce->engine);
return ret; return ret;
} }
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/prime_numbers.h> #include <linux/prime_numbers.h>
#include "gt/intel_engine_pm.h"
#include "gt/intel_gt.h" #include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h" #include "gt/intel_gt_pm.h"
#include "huge_gem_object.h" #include "huge_gem_object.h"
...@@ -536,7 +537,7 @@ static int make_obj_busy(struct drm_i915_gem_object *obj) ...@@ -536,7 +537,7 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
if (err) if (err)
return err; return err;
rq = i915_request_create(engine->kernel_context); rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
i915_vma_unpin(vma); i915_vma_unpin(vma);
return PTR_ERR(rq); return PTR_ERR(rq);
......
...@@ -41,6 +41,7 @@ static int __perf_fill_blt(struct drm_i915_gem_object *obj) ...@@ -41,6 +41,7 @@ static int __perf_fill_blt(struct drm_i915_gem_object *obj)
if (!engine) if (!engine)
return 0; return 0;
intel_engine_pm_get(engine);
for (pass = 0; pass < ARRAY_SIZE(t); pass++) { for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
struct intel_context *ce = engine->kernel_context; struct intel_context *ce = engine->kernel_context;
ktime_t t0, t1; ktime_t t0, t1;
...@@ -49,17 +50,20 @@ static int __perf_fill_blt(struct drm_i915_gem_object *obj) ...@@ -49,17 +50,20 @@ static int __perf_fill_blt(struct drm_i915_gem_object *obj)
err = i915_gem_object_fill_blt(obj, ce, 0); err = i915_gem_object_fill_blt(obj, ce, 0);
if (err) if (err)
return err; break;
err = i915_gem_object_wait(obj, err = i915_gem_object_wait(obj,
I915_WAIT_ALL, I915_WAIT_ALL,
MAX_SCHEDULE_TIMEOUT); MAX_SCHEDULE_TIMEOUT);
if (err) if (err)
return err; break;
t1 = ktime_get(); t1 = ktime_get();
t[pass] = ktime_sub(t1, t0); t[pass] = ktime_sub(t1, t0);
} }
intel_engine_pm_put(engine);
if (err)
return err;
sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL); sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
pr_info("%s: blt %zd KiB fill: %lld MiB/s\n", pr_info("%s: blt %zd KiB fill: %lld MiB/s\n",
...@@ -109,6 +113,7 @@ static int __perf_copy_blt(struct drm_i915_gem_object *src, ...@@ -109,6 +113,7 @@ static int __perf_copy_blt(struct drm_i915_gem_object *src,
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
ktime_t t[5]; ktime_t t[5];
int pass; int pass;
int err = 0;
engine = intel_engine_lookup_user(i915, engine = intel_engine_lookup_user(i915,
I915_ENGINE_CLASS_COPY, I915_ENGINE_CLASS_COPY,
...@@ -116,26 +121,29 @@ static int __perf_copy_blt(struct drm_i915_gem_object *src, ...@@ -116,26 +121,29 @@ static int __perf_copy_blt(struct drm_i915_gem_object *src,
if (!engine) if (!engine)
return 0; return 0;
intel_engine_pm_get(engine);
for (pass = 0; pass < ARRAY_SIZE(t); pass++) { for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
struct intel_context *ce = engine->kernel_context; struct intel_context *ce = engine->kernel_context;
ktime_t t0, t1; ktime_t t0, t1;
int err;
t0 = ktime_get(); t0 = ktime_get();
err = i915_gem_object_copy_blt(src, dst, ce); err = i915_gem_object_copy_blt(src, dst, ce);
if (err) if (err)
return err; break;
err = i915_gem_object_wait(dst, err = i915_gem_object_wait(dst,
I915_WAIT_ALL, I915_WAIT_ALL,
MAX_SCHEDULE_TIMEOUT); MAX_SCHEDULE_TIMEOUT);
if (err) if (err)
return err; break;
t1 = ktime_get(); t1 = ktime_get();
t[pass] = ktime_sub(t1, t0); t[pass] = ktime_sub(t1, t0);
} }
intel_engine_pm_put(engine);
if (err)
return err;
sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL); sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
pr_info("%s: blt %zd KiB copy: %lld MiB/s\n", pr_info("%s: blt %zd KiB copy: %lld MiB/s\n",
......
...@@ -215,18 +215,26 @@ int intel_engine_pulse(struct intel_engine_cs *engine) ...@@ -215,18 +215,26 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
int intel_engine_flush_barriers(struct intel_engine_cs *engine) int intel_engine_flush_barriers(struct intel_engine_cs *engine)
{ {
struct i915_request *rq; struct i915_request *rq;
int err = 0;
if (llist_empty(&engine->barrier_tasks)) if (llist_empty(&engine->barrier_tasks))
return 0; return 0;
if (!intel_engine_pm_get_if_awake(engine))
return 0;
rq = i915_request_create(engine->kernel_context); rq = i915_request_create(engine->kernel_context);
if (IS_ERR(rq)) if (IS_ERR(rq)) {
return PTR_ERR(rq); err = PTR_ERR(rq);
goto out_rpm;
}
idle_pulse(engine, rq); idle_pulse(engine, rq);
i915_request_add(rq); i915_request_add(rq);
return 0; out_rpm:
intel_engine_pm_put(engine);
return err;
} }
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#ifndef INTEL_ENGINE_PM_H #ifndef INTEL_ENGINE_PM_H
#define INTEL_ENGINE_PM_H #define INTEL_ENGINE_PM_H
#include "i915_request.h"
#include "intel_engine_types.h" #include "intel_engine_types.h"
#include "intel_wakeref.h" #include "intel_wakeref.h"
...@@ -41,6 +42,26 @@ static inline void intel_engine_pm_flush(struct intel_engine_cs *engine) ...@@ -41,6 +42,26 @@ static inline void intel_engine_pm_flush(struct intel_engine_cs *engine)
intel_wakeref_unlock_wait(&engine->wakeref); intel_wakeref_unlock_wait(&engine->wakeref);
} }
static inline struct i915_request *
intel_engine_create_kernel_request(struct intel_engine_cs *engine)
{
struct i915_request *rq;
/*
* The engine->kernel_context is special as it is used inside
* the engine-pm barrier (see __engine_park()), circumventing
* the usual mutexes and relying on the engine-pm barrier
* instead. So whenever we use the engine->kernel_context
* outside of the barrier, we must manually handle the
* engine wakeref to serialise with the use inside.
*/
intel_engine_pm_get(engine);
rq = i915_request_create(engine->kernel_context);
intel_engine_pm_put(engine);
return rq;
}
void intel_engine_init__pm(struct intel_engine_cs *engine); void intel_engine_init__pm(struct intel_engine_cs *engine);
#endif /* INTEL_ENGINE_PM_H */ #endif /* INTEL_ENGINE_PM_H */
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include "i915_drv.h" #include "i915_drv.h"
#include "intel_context.h" #include "intel_context.h"
#include "intel_engine_pm.h"
#include "intel_gt.h" #include "intel_gt.h"
#include "intel_ring.h" #include "intel_ring.h"
#include "intel_workarounds.h" #include "intel_workarounds.h"
...@@ -1582,7 +1583,9 @@ static int engine_wa_list_verify(struct intel_context *ce, ...@@ -1582,7 +1583,9 @@ static int engine_wa_list_verify(struct intel_context *ce,
if (IS_ERR(vma)) if (IS_ERR(vma))
return PTR_ERR(vma); return PTR_ERR(vma);
intel_engine_pm_get(ce->engine);
rq = intel_context_create_request(ce); rq = intel_context_create_request(ce);
intel_engine_pm_put(ce->engine);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
err = PTR_ERR(rq); err = PTR_ERR(rq);
goto err_vma; goto err_vma;
......
...@@ -121,7 +121,7 @@ static int __live_context_size(struct intel_engine_cs *engine, ...@@ -121,7 +121,7 @@ static int __live_context_size(struct intel_engine_cs *engine,
goto err_unpin; goto err_unpin;
/* Force the context switch */ /* Force the context switch */
rq = i915_request_create(engine->kernel_context); rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
err = PTR_ERR(rq); err = PTR_ERR(rq);
goto err_unpin; goto err_unpin;
......
...@@ -132,14 +132,18 @@ static int perf_mi_bb_start(void *arg) ...@@ -132,14 +132,18 @@ static int perf_mi_bb_start(void *arg)
u32 cycles[COUNT]; u32 cycles[COUNT];
int i; int i;
intel_engine_pm_get(engine);
batch = create_empty_batch(ce); batch = create_empty_batch(ce);
if (IS_ERR(batch)) { if (IS_ERR(batch)) {
err = PTR_ERR(batch); err = PTR_ERR(batch);
intel_engine_pm_put(engine);
break; break;
} }
err = i915_vma_sync(batch); err = i915_vma_sync(batch);
if (err) { if (err) {
intel_engine_pm_put(engine);
i915_vma_put(batch); i915_vma_put(batch);
break; break;
} }
...@@ -180,6 +184,7 @@ static int perf_mi_bb_start(void *arg) ...@@ -180,6 +184,7 @@ static int perf_mi_bb_start(void *arg)
cycles[i] = rq->hwsp_seqno[3] - rq->hwsp_seqno[2]; cycles[i] = rq->hwsp_seqno[3] - rq->hwsp_seqno[2];
} }
i915_vma_put(batch); i915_vma_put(batch);
intel_engine_pm_put(engine);
if (err) if (err)
break; break;
...@@ -251,15 +256,19 @@ static int perf_mi_noop(void *arg) ...@@ -251,15 +256,19 @@ static int perf_mi_noop(void *arg)
u32 cycles[COUNT]; u32 cycles[COUNT];
int i; int i;
intel_engine_pm_get(engine);
base = create_empty_batch(ce); base = create_empty_batch(ce);
if (IS_ERR(base)) { if (IS_ERR(base)) {
err = PTR_ERR(base); err = PTR_ERR(base);
intel_engine_pm_put(engine);
break; break;
} }
err = i915_vma_sync(base); err = i915_vma_sync(base);
if (err) { if (err) {
i915_vma_put(base); i915_vma_put(base);
intel_engine_pm_put(engine);
break; break;
} }
...@@ -267,6 +276,7 @@ static int perf_mi_noop(void *arg) ...@@ -267,6 +276,7 @@ static int perf_mi_noop(void *arg)
if (IS_ERR(nop)) { if (IS_ERR(nop)) {
err = PTR_ERR(nop); err = PTR_ERR(nop);
i915_vma_put(base); i915_vma_put(base);
intel_engine_pm_put(engine);
break; break;
} }
...@@ -274,6 +284,7 @@ static int perf_mi_noop(void *arg) ...@@ -274,6 +284,7 @@ static int perf_mi_noop(void *arg)
if (err) { if (err) {
i915_vma_put(nop); i915_vma_put(nop);
i915_vma_put(base); i915_vma_put(base);
intel_engine_pm_put(engine);
break; break;
} }
...@@ -327,6 +338,7 @@ static int perf_mi_noop(void *arg) ...@@ -327,6 +338,7 @@ static int perf_mi_noop(void *arg)
} }
i915_vma_put(nop); i915_vma_put(nop);
i915_vma_put(base); i915_vma_put(base);
intel_engine_pm_put(engine);
if (err) if (err)
break; break;
......
...@@ -348,7 +348,7 @@ release_queue(struct intel_engine_cs *engine, ...@@ -348,7 +348,7 @@ release_queue(struct intel_engine_cs *engine,
struct i915_request *rq; struct i915_request *rq;
u32 *cs; u32 *cs;
rq = i915_request_create(engine->kernel_context); rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq)) if (IS_ERR(rq))
return PTR_ERR(rq); return PTR_ERR(rq);
...@@ -497,7 +497,7 @@ static struct i915_request *nop_request(struct intel_engine_cs *engine) ...@@ -497,7 +497,7 @@ static struct i915_request *nop_request(struct intel_engine_cs *engine)
{ {
struct i915_request *rq; struct i915_request *rq;
rq = i915_request_create(engine->kernel_context); rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq)) if (IS_ERR(rq))
return rq; return rq;
...@@ -3698,7 +3698,7 @@ static int gpr_make_dirty(struct intel_engine_cs *engine) ...@@ -3698,7 +3698,7 @@ static int gpr_make_dirty(struct intel_engine_cs *engine)
u32 *cs; u32 *cs;
int n; int n;
rq = i915_request_create(engine->kernel_context); rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq)) if (IS_ERR(rq))
return PTR_ERR(rq); return PTR_ERR(rq);
......
...@@ -261,7 +261,9 @@ static int live_mocs_kernel(void *arg) ...@@ -261,7 +261,9 @@ static int live_mocs_kernel(void *arg)
return err; return err;
for_each_engine(engine, gt, id) { for_each_engine(engine, gt, id) {
intel_engine_pm_get(engine);
err = check_mocs_engine(&mocs, engine->kernel_context); err = check_mocs_engine(&mocs, engine->kernel_context);
intel_engine_pm_put(engine);
if (err) if (err)
break; break;
} }
......
...@@ -458,7 +458,7 @@ tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value) ...@@ -458,7 +458,7 @@ tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
goto out; goto out;
} }
rq = i915_request_create(engine->kernel_context); rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq)) if (IS_ERR(rq))
goto out_unpin; goto out_unpin;
...@@ -675,9 +675,7 @@ static int live_hwsp_wrap(void *arg) ...@@ -675,9 +675,7 @@ static int live_hwsp_wrap(void *arg)
if (!intel_engine_can_store_dword(engine)) if (!intel_engine_can_store_dword(engine))
continue; continue;
intel_engine_pm_get(engine); rq = intel_engine_create_kernel_request(engine);
rq = i915_request_create(engine->kernel_context);
intel_engine_pm_put(engine);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
err = PTR_ERR(rq); err = PTR_ERR(rq);
goto out; goto out;
......
...@@ -1968,7 +1968,9 @@ static int emit_oa_config(struct i915_perf_stream *stream, ...@@ -1968,7 +1968,9 @@ static int emit_oa_config(struct i915_perf_stream *stream,
if (err) if (err)
goto err_vma_put; goto err_vma_put;
intel_engine_pm_get(ce->engine);
rq = i915_request_create(ce); rq = i915_request_create(ce);
intel_engine_pm_put(ce->engine);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
err = PTR_ERR(rq); err = PTR_ERR(rq);
goto err_vma_unpin; goto err_vma_unpin;
...@@ -2165,7 +2167,7 @@ static int gen8_modify_context(struct intel_context *ce, ...@@ -2165,7 +2167,7 @@ static int gen8_modify_context(struct intel_context *ce,
lockdep_assert_held(&ce->pin_mutex); lockdep_assert_held(&ce->pin_mutex);
rq = i915_request_create(ce->engine->kernel_context); rq = intel_engine_create_kernel_request(ce->engine);
if (IS_ERR(rq)) if (IS_ERR(rq))
return PTR_ERR(rq); return PTR_ERR(rq);
......
...@@ -99,7 +99,7 @@ __live_active_setup(struct drm_i915_private *i915) ...@@ -99,7 +99,7 @@ __live_active_setup(struct drm_i915_private *i915)
for_each_uabi_engine(engine, i915) { for_each_uabi_engine(engine, i915) {
struct i915_request *rq; struct i915_request *rq;
rq = i915_request_create(engine->kernel_context); rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
err = PTR_ERR(rq); err = PTR_ERR(rq);
break; break;
......
...@@ -132,7 +132,7 @@ static int live_noa_delay(void *arg) ...@@ -132,7 +132,7 @@ static int live_noa_delay(void *arg)
for (i = 0; i < 4; i++) for (i = 0; i < 4; i++)
intel_write_status_page(stream->engine, 0x100 + i, 0); intel_write_status_page(stream->engine, 0x100 + i, 0);
rq = i915_request_create(stream->engine->kernel_context); rq = intel_engine_create_kernel_request(stream->engine);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
err = PTR_ERR(rq); err = PTR_ERR(rq);
goto out; goto out;
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include "gem/i915_gem_pm.h" #include "gem/i915_gem_pm.h"
#include "gem/selftests/mock_context.h" #include "gem/selftests/mock_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_gt.h" #include "gt/intel_gt.h"
#include "i915_random.h" #include "i915_random.h"
...@@ -541,6 +542,7 @@ static int live_nop_request(void *arg) ...@@ -541,6 +542,7 @@ static int live_nop_request(void *arg)
if (err) if (err)
return err; return err;
intel_engine_pm_get(engine);
for_each_prime_number_from(prime, 1, 8192) { for_each_prime_number_from(prime, 1, 8192) {
struct i915_request *request = NULL; struct i915_request *request = NULL;
...@@ -579,6 +581,7 @@ static int live_nop_request(void *arg) ...@@ -579,6 +581,7 @@ static int live_nop_request(void *arg)
if (__igt_timeout(end_time, NULL)) if (__igt_timeout(end_time, NULL))
break; break;
} }
intel_engine_pm_put(engine);
err = igt_live_test_end(&t); err = igt_live_test_end(&t);
if (err) if (err)
...@@ -693,10 +696,13 @@ static int live_empty_request(void *arg) ...@@ -693,10 +696,13 @@ static int live_empty_request(void *arg)
if (err) if (err)
goto out_batch; goto out_batch;
intel_engine_pm_get(engine);
/* Warmup / preload */ /* Warmup / preload */
request = empty_request(engine, batch); request = empty_request(engine, batch);
if (IS_ERR(request)) { if (IS_ERR(request)) {
err = PTR_ERR(request); err = PTR_ERR(request);
intel_engine_pm_put(engine);
goto out_batch; goto out_batch;
} }
i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT); i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
...@@ -709,6 +715,7 @@ static int live_empty_request(void *arg) ...@@ -709,6 +715,7 @@ static int live_empty_request(void *arg)
request = empty_request(engine, batch); request = empty_request(engine, batch);
if (IS_ERR(request)) { if (IS_ERR(request)) {
err = PTR_ERR(request); err = PTR_ERR(request);
intel_engine_pm_put(engine);
goto out_batch; goto out_batch;
} }
} }
...@@ -722,6 +729,7 @@ static int live_empty_request(void *arg) ...@@ -722,6 +729,7 @@ static int live_empty_request(void *arg)
break; break;
} }
i915_request_put(request); i915_request_put(request);
intel_engine_pm_put(engine);
err = igt_live_test_end(&t); err = igt_live_test_end(&t);
if (err) if (err)
...@@ -846,7 +854,7 @@ static int live_all_engines(void *arg) ...@@ -846,7 +854,7 @@ static int live_all_engines(void *arg)
idx = 0; idx = 0;
for_each_uabi_engine(engine, i915) { for_each_uabi_engine(engine, i915) {
request[idx] = i915_request_create(engine->kernel_context); request[idx] = intel_engine_create_kernel_request(engine);
if (IS_ERR(request[idx])) { if (IS_ERR(request[idx])) {
err = PTR_ERR(request[idx]); err = PTR_ERR(request[idx]);
pr_err("%s: Request allocation failed with err=%d\n", pr_err("%s: Request allocation failed with err=%d\n",
...@@ -963,7 +971,7 @@ static int live_sequential_engines(void *arg) ...@@ -963,7 +971,7 @@ static int live_sequential_engines(void *arg)
goto out_free; goto out_free;
} }
request[idx] = i915_request_create(engine->kernel_context); request[idx] = intel_engine_create_kernel_request(engine);
if (IS_ERR(request[idx])) { if (IS_ERR(request[idx])) {
err = PTR_ERR(request[idx]); err = PTR_ERR(request[idx]);
pr_err("%s: Request allocation failed for %s with err=%d\n", pr_err("%s: Request allocation failed for %s with err=%d\n",
...@@ -1068,15 +1076,19 @@ static int __live_parallel_engine1(void *arg) ...@@ -1068,15 +1076,19 @@ static int __live_parallel_engine1(void *arg)
struct intel_engine_cs *engine = arg; struct intel_engine_cs *engine = arg;
IGT_TIMEOUT(end_time); IGT_TIMEOUT(end_time);
unsigned long count; unsigned long count;
int err = 0;
count = 0; count = 0;
intel_engine_pm_get(engine);
do { do {
struct i915_request *rq; struct i915_request *rq;
int err;
rq = i915_request_create(engine->kernel_context); rq = i915_request_create(engine->kernel_context);
if (IS_ERR(rq)) if (IS_ERR(rq)) {
return PTR_ERR(rq); err = PTR_ERR(rq);
if (err)
break;
}
i915_request_get(rq); i915_request_get(rq);
i915_request_add(rq); i915_request_add(rq);
...@@ -1086,13 +1098,14 @@ static int __live_parallel_engine1(void *arg) ...@@ -1086,13 +1098,14 @@ static int __live_parallel_engine1(void *arg)
err = -ETIME; err = -ETIME;
i915_request_put(rq); i915_request_put(rq);
if (err) if (err)
return err; break;
count++; count++;
} while (!__igt_timeout(end_time, NULL)); } while (!__igt_timeout(end_time, NULL));
intel_engine_pm_put(engine);
pr_info("%s: %lu request + sync\n", engine->name, count); pr_info("%s: %lu request + sync\n", engine->name, count);
return 0; return err;
} }
static int __live_parallel_engineN(void *arg) static int __live_parallel_engineN(void *arg)
...@@ -1100,21 +1113,26 @@ static int __live_parallel_engineN(void *arg) ...@@ -1100,21 +1113,26 @@ static int __live_parallel_engineN(void *arg)
struct intel_engine_cs *engine = arg; struct intel_engine_cs *engine = arg;
IGT_TIMEOUT(end_time); IGT_TIMEOUT(end_time);
unsigned long count; unsigned long count;
int err = 0;
count = 0; count = 0;
intel_engine_pm_get(engine);
do { do {
struct i915_request *rq; struct i915_request *rq;
rq = i915_request_create(engine->kernel_context); rq = i915_request_create(engine->kernel_context);
if (IS_ERR(rq)) if (IS_ERR(rq)) {
return PTR_ERR(rq); err = PTR_ERR(rq);
break;
}
i915_request_add(rq); i915_request_add(rq);
count++; count++;
} while (!__igt_timeout(end_time, NULL)); } while (!__igt_timeout(end_time, NULL));
intel_engine_pm_put(engine);
pr_info("%s: %lu requests\n", engine->name, count); pr_info("%s: %lu requests\n", engine->name, count);
return 0; return err;
} }
static bool wake_all(struct drm_i915_private *i915) static bool wake_all(struct drm_i915_private *i915)
...@@ -1158,9 +1176,11 @@ static int __live_parallel_spin(void *arg) ...@@ -1158,9 +1176,11 @@ static int __live_parallel_spin(void *arg)
return -ENOMEM; return -ENOMEM;
} }
intel_engine_pm_get(engine);
rq = igt_spinner_create_request(&spin, rq = igt_spinner_create_request(&spin,
engine->kernel_context, engine->kernel_context,
MI_NOOP); /* no preemption */ MI_NOOP); /* no preemption */
intel_engine_pm_put(engine);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
err = PTR_ERR(rq); err = PTR_ERR(rq);
if (err == -ENODEV) if (err == -ENODEV)
......
...@@ -506,7 +506,9 @@ static int igt_lmem_write_cpu(void *arg) ...@@ -506,7 +506,9 @@ static int igt_lmem_write_cpu(void *arg)
} }
/* Put the pages into a known state -- from the gpu for added fun */ /* Put the pages into a known state -- from the gpu for added fun */
intel_engine_pm_get(engine);
err = i915_gem_object_fill_blt(obj, engine->kernel_context, 0xdeadbeaf); err = i915_gem_object_fill_blt(obj, engine->kernel_context, 0xdeadbeaf);
intel_engine_pm_put(engine);
if (err) if (err)
goto out_unpin; goto out_unpin;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment