Commit 71b0846c authored by Chris Wilson's avatar Chris Wilson

drm/i915/guc: Remove preemption support for current fw

Preemption via GuC submission is not being supported with its current
legacy incarnation. The current FW does support a similar pre-emption
flow via H2G, but it is class-based instead of being instance-based,
which doesn't fit well with the i915 tracking. To fix this, the
firmware is being updated to better support our needs with a new flow,
so we can safely remove the old code.

v2 (Daniele): resurrect & rebase, reword commit message, remove
preempt_context as well
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: John Harrison <John.C.Harrison@Intel.com>
Acked-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarMichał Winiarski <michal.winiarski@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190710005437.3496-2-daniele.ceraolospurio@intel.com
parent bf1315b8
......@@ -644,18 +644,12 @@ static void init_contexts(struct drm_i915_private *i915)
init_llist_head(&i915->contexts.free_list);
}
static bool needs_preempt_context(struct drm_i915_private *i915)
{
return USES_GUC_SUBMISSION(i915);
}
int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
{
struct i915_gem_context *ctx;
/* Reassure ourselves we are only called once */
GEM_BUG_ON(dev_priv->kernel_context);
GEM_BUG_ON(dev_priv->preempt_context);
init_contexts(dev_priv);
......@@ -676,15 +670,6 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
dev_priv->kernel_context = ctx;
/* highest priority; preempting task */
if (needs_preempt_context(dev_priv)) {
ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
if (!IS_ERR(ctx))
dev_priv->preempt_context = ctx;
else
DRM_ERROR("Failed to create preempt context; disabling preemption\n");
}
DRM_DEBUG_DRIVER("%s context support initialized\n",
DRIVER_CAPS(dev_priv)->has_logical_contexts ?
"logical" : "fake");
......@@ -695,8 +680,6 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915)
{
lockdep_assert_held(&i915->drm.struct_mutex);
if (i915->preempt_context)
destroy_kernel_context(&i915->preempt_context);
destroy_kernel_context(&i915->kernel_context);
/* Must free all deferred contexts (via flush_workqueue) first */
......
......@@ -841,15 +841,6 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
if (ret)
return ret;
/*
* Similarly the preempt context must always be available so that
* we can interrupt the engine at any time. However, as preemption
* is optional, we allow it to fail.
*/
if (i915->preempt_context)
pin_context(i915->preempt_context, engine,
&engine->preempt_context);
ret = measure_breadcrumb_dw(engine);
if (ret < 0)
goto err_unpin;
......@@ -861,8 +852,6 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
return 0;
err_unpin:
if (engine->preempt_context)
intel_context_unpin(engine->preempt_context);
intel_context_unpin(engine->kernel_context);
return ret;
}
......@@ -887,8 +876,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
if (engine->default_state)
i915_gem_object_put(engine->default_state);
if (engine->preempt_context)
intel_context_unpin(engine->preempt_context);
intel_context_unpin(engine->kernel_context);
GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
......
......@@ -288,7 +288,6 @@ struct intel_engine_cs {
struct llist_head barrier_tasks;
struct intel_context *kernel_context; /* pinned */
struct intel_context *preempt_context; /* pinned; optional */
intel_engine_mask_t saturated; /* submitting semaphores too late? */
......
......@@ -145,10 +145,6 @@ int intel_gt_resume(struct intel_gt *gt)
if (ce)
ce->ops->reset(ce);
ce = engine->preempt_context;
if (ce)
ce->ops->reset(ce);
engine->serial++; /* kernel context lost */
err = engine->resume(engine);
......
......@@ -2010,11 +2010,6 @@ static int i915_guc_info(struct seq_file *m, void *data)
seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
i915_guc_client_info(m, dev_priv, guc->execbuf_client);
if (guc->preempt_client) {
seq_printf(m, "\nGuC preempt client @ %p:\n",
guc->preempt_client);
i915_guc_client_info(m, dev_priv, guc->preempt_client);
}
/* Add more as required ... */
......
......@@ -1378,8 +1378,6 @@ struct drm_i915_private {
struct intel_engine_cs *engine[I915_NUM_ENGINES];
/* Context used internally to idle the GPU and setup initial state */
struct i915_gem_context *kernel_context;
/* Context only to be used for injecting preemption commands */
struct i915_gem_context *preempt_context;
struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
[MAX_ENGINE_INSTANCE + 1];
......
......@@ -101,8 +101,6 @@ void intel_guc_init_early(struct intel_guc *guc)
static int guc_init_wq(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
/*
* GuC log buffer flush work item has to do register access to
* send the ack to GuC and this work item, if not synced before
......@@ -122,31 +120,6 @@ static int guc_init_wq(struct intel_guc *guc)
return -ENOMEM;
}
/*
* Even though both sending GuC action, and adding a new workitem to
* GuC workqueue are serialized (each with its own locking), since
* we're using mutliple engines, it's possible that we're going to
* issue a preempt request with two (or more - each for different
* engine) workitems in GuC queue. In this situation, GuC may submit
* all of them, which will make us very confused.
* Our preemption contexts may even already be complete - before we
* even had the chance to sent the preempt action to GuC!. Rather
* than introducing yet another lock, we can just use ordered workqueue
* to make sure we're always sending a single preemption request with a
* single workitem.
*/
if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
USES_GUC_SUBMISSION(dev_priv)) {
guc->preempt_wq = alloc_ordered_workqueue("i915-guc_preempt",
WQ_HIGHPRI);
if (!guc->preempt_wq) {
destroy_workqueue(guc->log.relay.flush_wq);
DRM_ERROR("Couldn't allocate workqueue for GuC "
"preemption\n");
return -ENOMEM;
}
}
return 0;
}
......@@ -154,10 +127,6 @@ static void guc_fini_wq(struct intel_guc *guc)
{
struct workqueue_struct *wq;
wq = fetch_and_zero(&guc->preempt_wq);
if (wq)
destroy_workqueue(wq);
wq = fetch_and_zero(&guc->log.relay.flush_wq);
if (wq)
destroy_workqueue(wq);
......
......@@ -37,11 +37,6 @@
struct __guc_ads_blob;
struct guc_preempt_work {
struct work_struct work;
struct intel_engine_cs *engine;
};
/*
* Top level structure of GuC. It handles firmware loading and manages client
* pool and doorbells. intel_guc owns a intel_guc_client to replace the legacy
......@@ -76,10 +71,6 @@ struct intel_guc {
void *shared_data_vaddr;
struct intel_guc_client *execbuf_client;
struct intel_guc_client *preempt_client;
struct guc_preempt_work preempt_work[I915_NUM_ENGINES];
struct workqueue_struct *preempt_wq;
DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
/* Cyclic counter mod pagesize */
......
This diff is collapsed.
......@@ -103,13 +103,10 @@ static int ring_doorbell_nop(struct intel_guc_client *client)
/*
* Basic client sanity check, handy to validate create_clients.
*/
static int validate_client(struct intel_guc_client *client,
int client_priority,
bool is_preempt_client)
static int validate_client(struct intel_guc_client *client, int client_priority)
{
struct drm_i915_private *dev_priv = guc_to_i915(client->guc);
struct i915_gem_context *ctx_owner = is_preempt_client ?
dev_priv->preempt_context : dev_priv->kernel_context;
struct i915_gem_context *ctx_owner = dev_priv->kernel_context;
if (client->owner != ctx_owner ||
client->engines != INTEL_INFO(dev_priv)->engine_mask ||
......@@ -163,7 +160,7 @@ static int igt_guc_clients(void *args)
*/
guc_clients_disable(guc);
guc_clients_destroy(guc);
if (guc->execbuf_client || guc->preempt_client) {
if (guc->execbuf_client) {
pr_err("guc_clients_destroy lied!\n");
err = -EINVAL;
goto unlock;
......@@ -177,24 +174,14 @@ static int igt_guc_clients(void *args)
GEM_BUG_ON(!guc->execbuf_client);
err = validate_client(guc->execbuf_client,
GUC_CLIENT_PRIORITY_KMD_NORMAL, false);
GUC_CLIENT_PRIORITY_KMD_NORMAL);
if (err) {
pr_err("execbug client validation failed\n");
goto out;
}
if (guc->preempt_client) {
err = validate_client(guc->preempt_client,
GUC_CLIENT_PRIORITY_KMD_HIGH, true);
if (err) {
pr_err("preempt client validation failed\n");
goto out;
}
}
/* each client should now have reserved a doorbell */
if (!has_doorbell(guc->execbuf_client) ||
(guc->preempt_client && !has_doorbell(guc->preempt_client))) {
/* the client should now have reserved a doorbell */
if (!has_doorbell(guc->execbuf_client)) {
pr_err("guc_clients_create didn't reserve doorbells\n");
err = -EINVAL;
goto out;
......@@ -204,8 +191,7 @@ static int igt_guc_clients(void *args)
guc_clients_enable(guc);
/* each client should now have received a doorbell */
if (!client_doorbell_in_sync(guc->execbuf_client) ||
!client_doorbell_in_sync(guc->preempt_client)) {
if (!client_doorbell_in_sync(guc->execbuf_client)) {
pr_err("failed to initialize the doorbells\n");
err = -EINVAL;
goto out;
......@@ -300,8 +286,7 @@ static int igt_guc_doorbells(void *arg)
goto out;
}
err = validate_client(clients[i],
i % GUC_CLIENT_PRIORITY_NUM, false);
err = validate_client(clients[i], i % GUC_CLIENT_PRIORITY_NUM);
if (err) {
pr_err("[%d] client_alloc sanity check failed!\n", i);
err = -EINVAL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment