Commit 2ccdf6a1 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Pass intel_context to i915_request_create()

Start acquiring the logical intel_context and using that as our primary
means for request allocation. This is the initial step to allow us to
avoid requiring struct_mutex for request allocation along the
perma-pinned kernel context, but it also provides a foundation for
breaking up the complex request allocation to handle different scenarios
inside execbuf.

For the purpose of emitting a request from inside retirement (see the
next patch for engine power management), we also need to lift control
over the timeline mutex to the caller.

v2: Note that the request carries the active reference upon construction.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-4-chris@chris-wilson.co.uk
parent 6eee33e8
......@@ -105,4 +105,16 @@ static inline void intel_context_put(struct intel_context *ce)
kref_put(&ce->ref, ce->ops->destroy);
}
static inline void intel_context_timeline_lock(struct intel_context *ce)
__acquires(&ce->ring->timeline->mutex)
{
mutex_lock(&ce->ring->timeline->mutex);
}
static inline void intel_context_timeline_unlock(struct intel_context *ce)
__releases(&ce->ring->timeline->mutex)
{
mutex_unlock(&ce->ring->timeline->mutex);
}
#endif /* __INTEL_CONTEXT_H__ */
......@@ -783,7 +783,7 @@ static void restart_work(struct work_struct *work)
if (!intel_engine_is_idle(engine))
continue;
rq = i915_request_alloc(engine, i915->kernel_context);
rq = i915_request_create(engine->kernel_context);
if (!IS_ERR(rq))
i915_request_add(rq);
}
......
......@@ -1772,7 +1772,6 @@ static int switch_context(struct i915_request *rq)
u32 hw_flags = 0;
int ret, i;
lockdep_assert_held(&rq->i915->drm.struct_mutex);
GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
if (ppgtt) {
......@@ -1902,8 +1901,6 @@ static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
struct i915_request *target;
long timeout;
lockdep_assert_held(&ring->vma->vm->i915->drm.struct_mutex);
if (intel_ring_update_space(ring) >= bytes)
return 0;
......
......@@ -1356,7 +1356,7 @@ static int engine_wa_list_verify(struct intel_engine_cs *engine,
if (IS_ERR(vma))
return PTR_ERR(vma);
rq = i915_request_alloc(engine, engine->kernel_context->gem_context);
rq = i915_request_create(engine->kernel_context);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_vma;
......
......@@ -942,7 +942,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
struct intel_ring *ring;
struct i915_request *rq;
rq = i915_request_alloc(engine, i915->kernel_context);
rq = i915_request_create(engine->kernel_context);
if (IS_ERR(rq))
return PTR_ERR(rq);
......@@ -1188,7 +1188,7 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
/* Submitting requests etc needs the hw awake. */
wakeref = intel_runtime_pm_get(i915);
rq = i915_request_alloc(ce->engine, i915->kernel_context);
rq = i915_request_create(ce->engine->kernel_context);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
goto out_put;
......
......@@ -1762,7 +1762,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
* Apply the configuration by doing one context restore of the edited
* context image.
*/
rq = i915_request_alloc(engine, dev_priv->kernel_context);
rq = i915_request_create(engine->kernel_context);
if (IS_ERR(rq))
return PTR_ERR(rq);
......
This diff is collapsed.
......@@ -239,6 +239,13 @@ static inline bool dma_fence_is_i915(const struct dma_fence *fence)
return fence->ops == &i915_fence_ops;
}
struct i915_request * __must_check
__i915_request_create(struct intel_context *ce, gfp_t gfp);
struct i915_request * __must_check
i915_request_create(struct intel_context *ce);
struct i915_request *__i915_request_commit(struct i915_request *request);
struct i915_request * __must_check
i915_request_alloc(struct intel_engine_cs *engine,
struct i915_gem_context *ctx);
......
......@@ -235,10 +235,9 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
static struct i915_request *alloc_request(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
struct intel_engine_cs *engine = dev_priv->engine[RCS0];
struct intel_engine_cs *engine = overlay->i915->engine[RCS0];
return i915_request_alloc(engine, dev_priv->kernel_context);
return i915_request_create(engine->kernel_context);
}
/* overlay needs to be disable in OCMD reg */
......
......@@ -46,7 +46,7 @@ static int __live_active_setup(struct drm_i915_private *i915,
for_each_engine(engine, i915, id) {
struct i915_request *rq;
rq = i915_request_alloc(engine, i915->kernel_context);
rq = i915_request_create(engine->kernel_context);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
break;
......
......@@ -202,7 +202,7 @@ static int gpu_set(struct drm_i915_gem_object *obj,
if (IS_ERR(vma))
return PTR_ERR(vma);
rq = i915_request_alloc(i915->engine[RCS0], i915->kernel_context);
rq = i915_request_create(i915->engine[RCS0]->kernel_context);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
......
......@@ -468,7 +468,7 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
if (err)
return err;
rq = i915_request_alloc(i915->engine[RCS0], i915->kernel_context);
rq = i915_request_create(i915->engine[RCS0]->kernel_context);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
......
......@@ -551,8 +551,7 @@ static int live_nop_request(void *arg)
times[1] = ktime_get_raw();
for (n = 0; n < prime; n++) {
request = i915_request_alloc(engine,
i915->kernel_context);
request = i915_request_create(engine->kernel_context);
if (IS_ERR(request)) {
err = PTR_ERR(request);
goto out_unlock;
......@@ -649,7 +648,7 @@ empty_request(struct intel_engine_cs *engine,
struct i915_request *request;
int err;
request = i915_request_alloc(engine, engine->i915->kernel_context);
request = i915_request_create(engine->kernel_context);
if (IS_ERR(request))
return request;
......@@ -853,7 +852,7 @@ static int live_all_engines(void *arg)
}
for_each_engine(engine, i915, id) {
request[id] = i915_request_alloc(engine, i915->kernel_context);
request[id] = i915_request_create(engine->kernel_context);
if (IS_ERR(request[id])) {
err = PTR_ERR(request[id]);
pr_err("%s: Request allocation failed with err=%d\n",
......@@ -962,7 +961,7 @@ static int live_sequential_engines(void *arg)
goto out_unlock;
}
request[id] = i915_request_alloc(engine, i915->kernel_context);
request[id] = i915_request_create(engine->kernel_context);
if (IS_ERR(request[id])) {
err = PTR_ERR(request[id]);
pr_err("%s: Request allocation failed for %s with err=%d\n",
......
......@@ -454,7 +454,7 @@ tl_write(struct i915_timeline *tl, struct intel_engine_cs *engine, u32 value)
goto out;
}
rq = i915_request_alloc(engine, engine->i915->kernel_context);
rq = i915_request_create(engine->kernel_context);
if (IS_ERR(rq))
goto out_unpin;
......@@ -678,7 +678,7 @@ static int live_hwsp_wrap(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
rq = i915_request_alloc(engine, i915->kernel_context);
rq = i915_request_create(engine->kernel_context);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment