Commit d6376374 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Only allocate preempt context when required

If we remove some hardcoded assumptions about the preempt context having
a fixed id, reserved from use by normal user contexts, we may only
allocate the i915_gem_context when required. Then the subsequent
decisions on using preemption reduce to having the preempt context
available.

v2: Include an assert that we don't allocate the preempt context twice.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Michal Winiarski <michal.winiarski@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Arkadiusz Hiler <arkadiusz.hiler@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Acked-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180207210544.26351-3-chris@chris-wilson.co.ukReviewed-by: default avatarMichel Thierry <michel.thierry@intel.com>
parent 3fed1808
...@@ -449,12 +449,18 @@ destroy_kernel_context(struct i915_gem_context **ctxp) ...@@ -449,12 +449,18 @@ destroy_kernel_context(struct i915_gem_context **ctxp)
i915_gem_context_free(ctx); i915_gem_context_free(ctx);
} }
static bool needs_preempt_context(struct drm_i915_private *i915)
{
return HAS_LOGICAL_RING_PREEMPTION(i915);
}
int i915_gem_contexts_init(struct drm_i915_private *dev_priv) int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
{ {
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
int err;
/* Reassure ourselves we are only called once */
GEM_BUG_ON(dev_priv->kernel_context); GEM_BUG_ON(dev_priv->kernel_context);
GEM_BUG_ON(dev_priv->preempt_context);
INIT_LIST_HEAD(&dev_priv->contexts.list); INIT_LIST_HEAD(&dev_priv->contexts.list);
INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker); INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker);
...@@ -468,8 +474,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv) ...@@ -468,8 +474,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN); ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
if (IS_ERR(ctx)) { if (IS_ERR(ctx)) {
DRM_ERROR("Failed to create default global context\n"); DRM_ERROR("Failed to create default global context\n");
err = PTR_ERR(ctx); return PTR_ERR(ctx);
goto err;
} }
/* /*
* For easy recognisablity, we want the kernel context to be 0 and then * For easy recognisablity, we want the kernel context to be 0 and then
...@@ -479,23 +484,18 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv) ...@@ -479,23 +484,18 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
dev_priv->kernel_context = ctx; dev_priv->kernel_context = ctx;
/* highest priority; preempting task */ /* highest priority; preempting task */
ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX); if (needs_preempt_context(dev_priv)) {
if (IS_ERR(ctx)) { ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
DRM_ERROR("Failed to create default preempt context\n"); if (!IS_ERR(ctx))
err = PTR_ERR(ctx); dev_priv->preempt_context = ctx;
goto err_kernel_context; else
DRM_ERROR("Failed to create preempt context; disabling preemption\n");
} }
dev_priv->preempt_context = ctx;
DRM_DEBUG_DRIVER("%s context support initialized\n", DRM_DEBUG_DRIVER("%s context support initialized\n",
dev_priv->engine[RCS]->context_size ? "logical" : dev_priv->engine[RCS]->context_size ? "logical" :
"fake"); "fake");
return 0; return 0;
err_kernel_context:
destroy_kernel_context(&dev_priv->kernel_context);
err:
return err;
} }
void i915_gem_contexts_lost(struct drm_i915_private *dev_priv) void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
...@@ -521,7 +521,8 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915) ...@@ -521,7 +521,8 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915)
{ {
lockdep_assert_held(&i915->drm.struct_mutex); lockdep_assert_held(&i915->drm.struct_mutex);
destroy_kernel_context(&i915->preempt_context); if (i915->preempt_context)
destroy_kernel_context(&i915->preempt_context);
destroy_kernel_context(&i915->kernel_context); destroy_kernel_context(&i915->kernel_context);
/* Must free all deferred contexts (via flush_workqueue) first */ /* Must free all deferred contexts (via flush_workqueue) first */
......
...@@ -631,7 +631,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine) ...@@ -631,7 +631,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
* Similarly the preempt context must always be available so that * Similarly the preempt context must always be available so that
* we can interrupt the engine at any time. * we can interrupt the engine at any time.
*/ */
if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) { if (engine->i915->preempt_context) {
ring = engine->context_pin(engine, ring = engine->context_pin(engine,
engine->i915->preempt_context); engine->i915->preempt_context);
if (IS_ERR(ring)) { if (IS_ERR(ring)) {
...@@ -656,7 +656,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine) ...@@ -656,7 +656,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
err_breadcrumbs: err_breadcrumbs:
intel_engine_fini_breadcrumbs(engine); intel_engine_fini_breadcrumbs(engine);
err_unpin_preempt: err_unpin_preempt:
if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) if (engine->i915->preempt_context)
engine->context_unpin(engine, engine->i915->preempt_context); engine->context_unpin(engine, engine->i915->preempt_context);
err_unpin_kernel: err_unpin_kernel:
engine->context_unpin(engine, engine->i915->kernel_context); engine->context_unpin(engine, engine->i915->kernel_context);
...@@ -686,7 +686,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) ...@@ -686,7 +686,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
if (engine->default_state) if (engine->default_state)
i915_gem_object_put(engine->default_state); i915_gem_object_put(engine->default_state);
if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) if (engine->i915->preempt_context)
engine->context_unpin(engine, engine->i915->preempt_context); engine->context_unpin(engine, engine->i915->preempt_context);
engine->context_unpin(engine, engine->i915->kernel_context); engine->context_unpin(engine, engine->i915->kernel_context);
} }
......
...@@ -688,7 +688,7 @@ static void guc_dequeue(struct intel_engine_cs *engine) ...@@ -688,7 +688,7 @@ static void guc_dequeue(struct intel_engine_cs *engine)
goto unlock; goto unlock;
if (port_isset(port)) { if (port_isset(port)) {
if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) { if (engine->i915->preempt_context) {
struct guc_preempt_work *preempt_work = struct guc_preempt_work *preempt_work =
&engine->i915->guc.preempt_work[engine->id]; &engine->i915->guc.preempt_work[engine->id];
...@@ -984,17 +984,19 @@ static int guc_clients_create(struct intel_guc *guc) ...@@ -984,17 +984,19 @@ static int guc_clients_create(struct intel_guc *guc)
} }
guc->execbuf_client = client; guc->execbuf_client = client;
client = guc_client_alloc(dev_priv, if (dev_priv->preempt_context) {
INTEL_INFO(dev_priv)->ring_mask, client = guc_client_alloc(dev_priv,
GUC_CLIENT_PRIORITY_KMD_HIGH, INTEL_INFO(dev_priv)->ring_mask,
dev_priv->preempt_context); GUC_CLIENT_PRIORITY_KMD_HIGH,
if (IS_ERR(client)) { dev_priv->preempt_context);
DRM_ERROR("Failed to create GuC client for preemption!\n"); if (IS_ERR(client)) {
guc_client_free(guc->execbuf_client); DRM_ERROR("Failed to create GuC client for preemption!\n");
guc->execbuf_client = NULL; guc_client_free(guc->execbuf_client);
return PTR_ERR(client); guc->execbuf_client = NULL;
return PTR_ERR(client);
}
guc->preempt_client = client;
} }
guc->preempt_client = client;
return 0; return 0;
} }
......
...@@ -161,7 +161,6 @@ ...@@ -161,7 +161,6 @@
#define EXECLISTS_REQUEST_SIZE 64 /* bytes */ #define EXECLISTS_REQUEST_SIZE 64 /* bytes */
#define WA_TAIL_DWORDS 2 #define WA_TAIL_DWORDS 2
#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS) #define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
#define PREEMPT_ID 0x1
static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
struct intel_engine_cs *engine); struct intel_engine_cs *engine);
...@@ -448,7 +447,8 @@ static void inject_preempt_context(struct intel_engine_cs *engine) ...@@ -448,7 +447,8 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
&engine->i915->preempt_context->engine[engine->id]; &engine->i915->preempt_context->engine[engine->id];
unsigned int n; unsigned int n;
GEM_BUG_ON(engine->i915->preempt_context->hw_id != PREEMPT_ID); GEM_BUG_ON(engine->execlists.preempt_complete_status !=
upper_32_bits(ce->lrc_desc));
GEM_BUG_ON(!IS_ALIGNED(ce->ring->size, WA_TAIL_BYTES)); GEM_BUG_ON(!IS_ALIGNED(ce->ring->size, WA_TAIL_BYTES));
memset(ce->ring->vaddr + ce->ring->tail, 0, WA_TAIL_BYTES); memset(ce->ring->vaddr + ce->ring->tail, 0, WA_TAIL_BYTES);
...@@ -528,7 +528,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -528,7 +528,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK)) if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
goto unlock; goto unlock;
if (HAS_LOGICAL_RING_PREEMPTION(engine->i915) && if (engine->i915->preempt_context &&
rb_entry(rb, struct i915_priolist, node)->priority > rb_entry(rb, struct i915_priolist, node)->priority >
max(last->priotree.priority, 0)) { max(last->priotree.priority, 0)) {
/* /*
...@@ -844,7 +844,7 @@ static void execlists_submission_tasklet(unsigned long data) ...@@ -844,7 +844,7 @@ static void execlists_submission_tasklet(unsigned long data)
GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE); GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE);
if (status & GEN8_CTX_STATUS_COMPLETE && if (status & GEN8_CTX_STATUS_COMPLETE &&
buf[2*head + 1] == PREEMPT_ID) { buf[2*head + 1] == execlists->preempt_complete_status) {
GEM_TRACE("%s preempt-idle\n", engine->name); GEM_TRACE("%s preempt-idle\n", engine->name);
execlists_cancel_port_requests(execlists); execlists_cancel_port_requests(execlists);
...@@ -1967,7 +1967,7 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine) ...@@ -1967,7 +1967,7 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine)
engine->i915->caps.scheduler = engine->i915->caps.scheduler =
I915_SCHEDULER_CAP_ENABLED | I915_SCHEDULER_CAP_ENABLED |
I915_SCHEDULER_CAP_PRIORITY; I915_SCHEDULER_CAP_PRIORITY;
if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) if (engine->i915->preempt_context)
engine->i915->caps.scheduler |= I915_SCHEDULER_CAP_PREEMPTION; engine->i915->caps.scheduler |= I915_SCHEDULER_CAP_PREEMPTION;
} }
...@@ -2045,6 +2045,11 @@ static int logical_ring_init(struct intel_engine_cs *engine) ...@@ -2045,6 +2045,11 @@ static int logical_ring_init(struct intel_engine_cs *engine)
engine->execlists.elsp = engine->execlists.elsp =
engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine)); engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
engine->execlists.preempt_complete_status = ~0u;
if (engine->i915->preempt_context)
engine->execlists.preempt_complete_status =
upper_32_bits(engine->i915->preempt_context->engine[engine->id].lrc_desc);
return 0; return 0;
error: error:
...@@ -2307,7 +2312,7 @@ populate_lr_context(struct i915_gem_context *ctx, ...@@ -2307,7 +2312,7 @@ populate_lr_context(struct i915_gem_context *ctx,
if (!engine->default_state) if (!engine->default_state)
regs[CTX_CONTEXT_CONTROL + 1] |= regs[CTX_CONTEXT_CONTROL + 1] |=
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
if (ctx->hw_id == PREEMPT_ID) if (ctx == ctx->i915->preempt_context)
regs[CTX_CONTEXT_CONTROL + 1] |= regs[CTX_CONTEXT_CONTROL + 1] |=
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT); CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT);
......
...@@ -279,6 +279,11 @@ struct intel_engine_execlists { ...@@ -279,6 +279,11 @@ struct intel_engine_execlists {
* @csb_use_mmio: access csb through mmio, instead of hwsp * @csb_use_mmio: access csb through mmio, instead of hwsp
*/ */
bool csb_use_mmio; bool csb_use_mmio;
/**
* @preempt_complete_status: expected CSB upon completing preemption
*/
u32 preempt_complete_status;
}; };
#define INTEL_ENGINE_CS_MAX_NAME 8 #define INTEL_ENGINE_CS_MAX_NAME 8
......
...@@ -243,16 +243,10 @@ struct drm_i915_private *mock_gem_device(void) ...@@ -243,16 +243,10 @@ struct drm_i915_private *mock_gem_device(void)
if (!i915->kernel_context) if (!i915->kernel_context)
goto err_engine; goto err_engine;
i915->preempt_context = mock_context(i915, NULL);
if (!i915->preempt_context)
goto err_kernel_context;
WARN_ON(i915_gemfs_init(i915)); WARN_ON(i915_gemfs_init(i915));
return i915; return i915;
err_kernel_context:
i915_gem_context_put(i915->kernel_context);
err_engine: err_engine:
for_each_engine(engine, i915, id) for_each_engine(engine, i915, id)
mock_engine_free(engine); mock_engine_free(engine);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment