Commit 24f1d3cc authored by Chris Wilson's avatar Chris Wilson

drm/i915: Refactor execlists default context pinning

Refactor pinning and unpinning of contexts, such that the default
context for an engine is pinned during initialisation and unpinned
during teardown (pinning of the context handles the reference counting).
Thus we can eliminate the special case handling of the default context
that was required to mask that it was not being pinned normally.

v2: Rebalance context_queue after rebasing.
v3: Rebase to -nightly (not 40 patches in)
v4: Rebase onto request_alloc unwinding
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1461833819-3991-19-git-send-email-chris@chris-wilson.co.uk
parent 7069b144
...@@ -2095,9 +2095,8 @@ static int i915_dump_lrc(struct seq_file *m, void *unused) ...@@ -2095,9 +2095,8 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
return ret; return ret;
list_for_each_entry(ctx, &dev_priv->context_list, link) list_for_each_entry(ctx, &dev_priv->context_list, link)
if (ctx != dev_priv->kernel_context) for_each_engine(engine, dev_priv)
for_each_engine(engine, dev_priv) i915_dump_lrc_obj(m, ctx, engine);
i915_dump_lrc_obj(m, ctx, engine);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
......
...@@ -868,6 +868,7 @@ struct intel_context { ...@@ -868,6 +868,7 @@ struct intel_context {
struct i915_vma *lrc_vma; struct i915_vma *lrc_vma;
u64 lrc_desc; u64 lrc_desc;
uint32_t *lrc_reg_state; uint32_t *lrc_reg_state;
bool initialised;
} engine[I915_NUM_ENGINES]; } engine[I915_NUM_ENGINES];
struct list_head link; struct list_head link;
......
...@@ -2722,7 +2722,7 @@ void i915_gem_request_free(struct kref *req_ref) ...@@ -2722,7 +2722,7 @@ void i915_gem_request_free(struct kref *req_ref)
i915_gem_request_remove_from_client(req); i915_gem_request_remove_from_client(req);
if (ctx) { if (ctx) {
if (i915.enable_execlists && ctx != req->i915->kernel_context) if (i915.enable_execlists)
intel_lr_context_unpin(ctx, req->engine); intel_lr_context_unpin(ctx, req->engine);
i915_gem_context_unreference(ctx); i915_gem_context_unreference(ctx);
......
...@@ -592,9 +592,7 @@ static void execlists_context_queue(struct drm_i915_gem_request *request) ...@@ -592,9 +592,7 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
struct drm_i915_gem_request *cursor; struct drm_i915_gem_request *cursor;
int num_elements = 0; int num_elements = 0;
if (request->ctx != request->i915->kernel_context) intel_lr_context_pin(request->ctx, request->engine);
intel_lr_context_pin(request->ctx, engine);
i915_gem_request_reference(request); i915_gem_request_reference(request);
spin_lock_bh(&engine->execlist_lock); spin_lock_bh(&engine->execlist_lock);
...@@ -678,6 +676,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req, ...@@ -678,6 +676,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request) int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
{ {
struct intel_engine_cs *engine = request->engine;
int ret; int ret;
/* Flush enough space to reduce the likelihood of waiting after /* Flush enough space to reduce the likelihood of waiting after
...@@ -686,7 +685,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request ...@@ -686,7 +685,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
*/ */
request->reserved_space += MIN_SPACE_FOR_ADD_REQUEST; request->reserved_space += MIN_SPACE_FOR_ADD_REQUEST;
request->ringbuf = request->ctx->engine[request->engine->id].ringbuf; request->ringbuf = request->ctx->engine[engine->id].ringbuf;
if (i915.enable_guc_submission) { if (i915.enable_guc_submission) {
/* /*
...@@ -701,22 +700,34 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request ...@@ -701,22 +700,34 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
return ret; return ret;
} }
if (request->ctx != request->i915->kernel_context) { ret = intel_lr_context_pin(request->ctx, engine);
ret = intel_lr_context_pin(request->ctx, request->engine); if (ret)
if (ret) return ret;
return ret;
}
ret = intel_ring_begin(request, 0); ret = intel_ring_begin(request, 0);
if (ret) if (ret)
goto err_unpin; goto err_unpin;
if (!request->ctx->engine[engine->id].initialised) {
ret = engine->init_context(request);
if (ret)
goto err_unpin;
request->ctx->engine[engine->id].initialised = true;
}
/* Note that after this point, we have committed to using
* this request as it is being used to both track the
* state of engine initialisation and liveness of the
* golden renderstate above. Think twice before you try
* to cancel/unwind this request now.
*/
request->reserved_space -= MIN_SPACE_FOR_ADD_REQUEST; request->reserved_space -= MIN_SPACE_FOR_ADD_REQUEST;
return 0; return 0;
err_unpin: err_unpin:
if (request->ctx != request->i915->kernel_context) intel_lr_context_unpin(request->ctx, engine);
intel_lr_context_unpin(request->ctx, request->engine);
return ret; return ret;
} }
...@@ -755,12 +766,8 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) ...@@ -755,12 +766,8 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
if (engine->last_context != request->ctx) { if (engine->last_context != request->ctx) {
if (engine->last_context) if (engine->last_context)
intel_lr_context_unpin(engine->last_context, engine); intel_lr_context_unpin(engine->last_context, engine);
if (request->ctx != request->i915->kernel_context) { intel_lr_context_pin(request->ctx, engine);
intel_lr_context_pin(request->ctx, engine); engine->last_context = request->ctx;
engine->last_context = request->ctx;
} else {
engine->last_context = NULL;
}
} }
if (dev_priv->guc.execbuf_client) if (dev_priv->guc.execbuf_client)
...@@ -880,12 +887,7 @@ void intel_execlists_retire_requests(struct intel_engine_cs *engine) ...@@ -880,12 +887,7 @@ void intel_execlists_retire_requests(struct intel_engine_cs *engine)
spin_unlock_bh(&engine->execlist_lock); spin_unlock_bh(&engine->execlist_lock);
list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) { list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
struct intel_context *ctx = req->ctx; intel_lr_context_unpin(req->ctx, engine);
struct drm_i915_gem_object *ctx_obj =
ctx->engine[engine->id].state;
if (ctx_obj && (ctx != req->i915->kernel_context))
intel_lr_context_unpin(ctx, engine);
list_del(&req->execlist_link); list_del(&req->execlist_link);
i915_gem_request_unreference(req); i915_gem_request_unreference(req);
...@@ -930,23 +932,26 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req) ...@@ -930,23 +932,26 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
return 0; return 0;
} }
static int intel_lr_context_do_pin(struct intel_context *ctx, static int intel_lr_context_pin(struct intel_context *ctx,
struct intel_engine_cs *engine) struct intel_engine_cs *engine)
{ {
struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = ctx->i915;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *ctx_obj;
struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; struct intel_ringbuffer *ringbuf;
struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf;
void *vaddr; void *vaddr;
u32 *lrc_reg_state; u32 *lrc_reg_state;
int ret; int ret;
WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex)); lockdep_assert_held(&ctx->i915->dev->struct_mutex);
if (ctx->engine[engine->id].pin_count++)
return 0;
ctx_obj = ctx->engine[engine->id].state;
ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
PIN_OFFSET_BIAS | GUC_WOPCM_TOP); PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
if (ret) if (ret)
return ret; goto err;
vaddr = i915_gem_object_pin_map(ctx_obj); vaddr = i915_gem_object_pin_map(ctx_obj);
if (IS_ERR(vaddr)) { if (IS_ERR(vaddr)) {
...@@ -956,10 +961,12 @@ static int intel_lr_context_do_pin(struct intel_context *ctx, ...@@ -956,10 +961,12 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
ringbuf = ctx->engine[engine->id].ringbuf;
ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf); ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf);
if (ret) if (ret)
goto unpin_map; goto unpin_map;
i915_gem_context_reference(ctx);
ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj); ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
intel_lr_context_descriptor_update(ctx, engine); intel_lr_context_descriptor_update(ctx, engine);
lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start; lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
...@@ -970,51 +977,39 @@ static int intel_lr_context_do_pin(struct intel_context *ctx, ...@@ -970,51 +977,39 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
if (i915.enable_guc_submission) if (i915.enable_guc_submission)
I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
return ret; return 0;
unpin_map: unpin_map:
i915_gem_object_unpin_map(ctx_obj); i915_gem_object_unpin_map(ctx_obj);
unpin_ctx_obj: unpin_ctx_obj:
i915_gem_object_ggtt_unpin(ctx_obj); i915_gem_object_ggtt_unpin(ctx_obj);
err:
ctx->engine[engine->id].pin_count = 0;
return ret; return ret;
} }
static int intel_lr_context_pin(struct intel_context *ctx, void intel_lr_context_unpin(struct intel_context *ctx,
struct intel_engine_cs *engine) struct intel_engine_cs *engine)
{ {
int ret = 0; struct drm_i915_gem_object *ctx_obj;
if (ctx->engine[engine->id].pin_count++ == 0) { lockdep_assert_held(&ctx->i915->dev->struct_mutex);
ret = intel_lr_context_do_pin(ctx, engine); GEM_BUG_ON(ctx->engine[engine->id].pin_count == 0);
if (ret)
goto reset_pin_count;
i915_gem_context_reference(ctx); if (--ctx->engine[engine->id].pin_count)
} return;
return ret;
reset_pin_count: intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
ctx->engine[engine->id].pin_count = 0;
return ret;
}
void intel_lr_context_unpin(struct intel_context *ctx, ctx_obj = ctx->engine[engine->id].state;
struct intel_engine_cs *engine) i915_gem_object_unpin_map(ctx_obj);
{ i915_gem_object_ggtt_unpin(ctx_obj);
struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex)); ctx->engine[engine->id].lrc_vma = NULL;
if (--ctx->engine[engine->id].pin_count == 0) { ctx->engine[engine->id].lrc_desc = 0;
i915_gem_object_unpin_map(ctx_obj); ctx->engine[engine->id].lrc_reg_state = NULL;
intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
ctx->engine[engine->id].lrc_vma = NULL;
ctx->engine[engine->id].lrc_desc = 0;
ctx->engine[engine->id].lrc_reg_state = NULL;
i915_gem_context_unreference(ctx); i915_gem_context_unreference(ctx);
}
} }
static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
...@@ -1914,6 +1909,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) ...@@ -1914,6 +1909,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
i915_gem_object_unpin_map(engine->status_page.obj); i915_gem_object_unpin_map(engine->status_page.obj);
engine->status_page.obj = NULL; engine->status_page.obj = NULL;
} }
intel_lr_context_unpin(dev_priv->kernel_context, engine);
engine->idle_lite_restore_wa = 0; engine->idle_lite_restore_wa = 0;
engine->disable_lite_restore_wa = false; engine->disable_lite_restore_wa = false;
...@@ -2017,11 +2013,10 @@ logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine) ...@@ -2017,11 +2013,10 @@ logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
goto error; goto error;
/* As this is the default context, always pin it */ /* As this is the default context, always pin it */
ret = intel_lr_context_do_pin(dctx, engine); ret = intel_lr_context_pin(dctx, engine);
if (ret) { if (ret) {
DRM_ERROR( DRM_ERROR("Failed to pin context for %s: %d\n",
"Failed to pin and map ringbuffer %s: %d\n", engine->name, ret);
engine->name, ret);
goto error; goto error;
} }
...@@ -2442,12 +2437,6 @@ void intel_lr_context_free(struct intel_context *ctx) ...@@ -2442,12 +2437,6 @@ void intel_lr_context_free(struct intel_context *ctx)
if (!ctx_obj) if (!ctx_obj)
continue; continue;
if (ctx == ctx->i915->kernel_context) {
intel_unpin_ringbuffer_obj(ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
i915_gem_object_unpin_map(ctx_obj);
}
WARN_ON(ctx->engine[i].pin_count); WARN_ON(ctx->engine[i].pin_count);
intel_ringbuffer_free(ringbuf); intel_ringbuffer_free(ringbuf);
drm_gem_object_unreference(&ctx_obj->base); drm_gem_object_unreference(&ctx_obj->base);
...@@ -2543,25 +2532,8 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx, ...@@ -2543,25 +2532,8 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
ctx->engine[engine->id].ringbuf = ringbuf; ctx->engine[engine->id].ringbuf = ringbuf;
ctx->engine[engine->id].state = ctx_obj; ctx->engine[engine->id].state = ctx_obj;
ctx->engine[engine->id].initialised = engine->init_context == NULL;
if (ctx != ctx->i915->kernel_context && engine->init_context) {
struct drm_i915_gem_request *req;
req = i915_gem_request_alloc(engine, ctx);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
DRM_ERROR("ring create req: %d\n", ret);
goto error_ringbuf;
}
ret = engine->init_context(req);
i915_add_request_no_flush(req);
if (ret) {
DRM_ERROR("ring init context: %d\n",
ret);
goto error_ringbuf;
}
}
return 0; return 0;
error_ringbuf: error_ringbuf:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment