Commit 22916bad authored by Matthew Brost's avatar Matthew Brost Committed by Matt Roper

drm/i915: Move submission tasklet to i915_sched_engine

The submission tasklet operates on i915_sched_engine, thus it is the
correct place for it.

v3:
 (Jason Ekstrand)
  Change sched_engine->engine to a void* private data pointer
  Add kernel doc
v4:
 (Daniele)
  Update private_data comment
  Set queue_priority_hint in kick_execlists
v5:
 (CI)
  Rebase and fix build error
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: default avatarMatt Roper <matthew.d.roper@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210618010638.98941-9-matthew.brost@intel.com
parent d2a31d02
...@@ -125,20 +125,6 @@ execlists_active(const struct intel_engine_execlists *execlists) ...@@ -125,20 +125,6 @@ execlists_active(const struct intel_engine_execlists *execlists)
return active; return active;
} }
static inline void
execlists_active_lock_bh(struct intel_engine_execlists *execlists)
{
local_bh_disable(); /* prevent local softirq and lock recursion */
tasklet_lock(&execlists->tasklet);
}
static inline void
execlists_active_unlock_bh(struct intel_engine_execlists *execlists)
{
tasklet_unlock(&execlists->tasklet);
local_bh_enable(); /* restore softirq, and kick ksoftirqd! */
}
struct i915_request * struct i915_request *
execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists); execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
......
...@@ -713,6 +713,7 @@ static int engine_setup_common(struct intel_engine_cs *engine) ...@@ -713,6 +713,7 @@ static int engine_setup_common(struct intel_engine_cs *engine)
err = -ENOMEM; err = -ENOMEM;
goto err_sched_engine; goto err_sched_engine;
} }
engine->sched_engine->private_data = engine;
err = intel_engine_init_cmd_parser(engine); err = intel_engine_init_cmd_parser(engine);
if (err) if (err)
...@@ -944,7 +945,6 @@ int intel_engines_init(struct intel_gt *gt) ...@@ -944,7 +945,6 @@ int intel_engines_init(struct intel_gt *gt)
void intel_engine_cleanup_common(struct intel_engine_cs *engine) void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{ {
GEM_BUG_ON(!list_empty(&engine->sched_engine->requests)); GEM_BUG_ON(!list_empty(&engine->sched_engine->requests));
tasklet_kill(&engine->execlists.tasklet); /* flush the callback */
i915_sched_engine_put(engine->sched_engine); i915_sched_engine_put(engine->sched_engine);
intel_breadcrumbs_free(engine->breadcrumbs); intel_breadcrumbs_free(engine->breadcrumbs);
...@@ -1193,7 +1193,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine) ...@@ -1193,7 +1193,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync) void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync)
{ {
struct tasklet_struct *t = &engine->execlists.tasklet; struct tasklet_struct *t = &engine->sched_engine->tasklet;
if (!t->callback) if (!t->callback)
return; return;
...@@ -1454,8 +1454,8 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, ...@@ -1454,8 +1454,8 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n", drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n",
yesno(test_bit(TASKLET_STATE_SCHED, yesno(test_bit(TASKLET_STATE_SCHED,
&engine->execlists.tasklet.state)), &engine->sched_engine->tasklet.state)),
enableddisabled(!atomic_read(&engine->execlists.tasklet.count)), enableddisabled(!atomic_read(&engine->sched_engine->tasklet.count)),
repr_timer(&engine->execlists.preempt), repr_timer(&engine->execlists.preempt),
repr_timer(&engine->execlists.timer)); repr_timer(&engine->execlists.timer));
...@@ -1479,7 +1479,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, ...@@ -1479,7 +1479,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
idx, hws[idx * 2], hws[idx * 2 + 1]); idx, hws[idx * 2], hws[idx * 2 + 1]);
} }
execlists_active_lock_bh(execlists); i915_sched_engine_active_lock_bh(engine->sched_engine);
rcu_read_lock(); rcu_read_lock();
for (port = execlists->active; (rq = *port); port++) { for (port = execlists->active; (rq = *port); port++) {
char hdr[160]; char hdr[160];
...@@ -1510,7 +1510,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, ...@@ -1510,7 +1510,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
i915_request_show(m, rq, hdr, 0); i915_request_show(m, rq, hdr, 0);
} }
rcu_read_unlock(); rcu_read_unlock();
execlists_active_unlock_bh(execlists); i915_sched_engine_active_unlock_bh(engine->sched_engine);
} else if (GRAPHICS_VER(dev_priv) > 6) { } else if (GRAPHICS_VER(dev_priv) > 6) {
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n", drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
ENGINE_READ(engine, RING_PP_DIR_BASE)); ENGINE_READ(engine, RING_PP_DIR_BASE));
......
...@@ -138,11 +138,6 @@ struct st_preempt_hang { ...@@ -138,11 +138,6 @@ struct st_preempt_hang {
* driver and the hardware state for execlist mode of submission. * driver and the hardware state for execlist mode of submission.
*/ */
struct intel_engine_execlists { struct intel_engine_execlists {
/**
* @tasklet: softirq tasklet for bottom handler
*/
struct tasklet_struct tasklet;
/** /**
* @timer: kick the current context if its timeslice expires * @timer: kick the current context if its timeslice expires
*/ */
......
...@@ -570,7 +570,7 @@ static void kick_siblings(struct i915_request *rq, struct intel_context *ce) ...@@ -570,7 +570,7 @@ static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
resubmit_virtual_request(rq, ve); resubmit_virtual_request(rq, ve);
if (READ_ONCE(ve->request)) if (READ_ONCE(ve->request))
tasklet_hi_schedule(&ve->base.execlists.tasklet); tasklet_hi_schedule(&ve->base.sched_engine->tasklet);
} }
static void __execlists_schedule_out(struct i915_request * const rq, static void __execlists_schedule_out(struct i915_request * const rq,
...@@ -739,9 +739,9 @@ trace_ports(const struct intel_engine_execlists *execlists, ...@@ -739,9 +739,9 @@ trace_ports(const struct intel_engine_execlists *execlists,
} }
static bool static bool
reset_in_progress(const struct intel_engine_execlists *execlists) reset_in_progress(const struct intel_engine_cs *engine)
{ {
return unlikely(!__tasklet_is_enabled(&execlists->tasklet)); return unlikely(!__tasklet_is_enabled(&engine->sched_engine->tasklet));
} }
static __maybe_unused noinline bool static __maybe_unused noinline bool
...@@ -757,7 +757,7 @@ assert_pending_valid(const struct intel_engine_execlists *execlists, ...@@ -757,7 +757,7 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
trace_ports(execlists, msg, execlists->pending); trace_ports(execlists, msg, execlists->pending);
/* We may be messing around with the lists during reset, lalala */ /* We may be messing around with the lists during reset, lalala */
if (reset_in_progress(execlists)) if (reset_in_progress(engine))
return true; return true;
if (!execlists->pending[0]) { if (!execlists->pending[0]) {
...@@ -1190,7 +1190,7 @@ static void start_timeslice(struct intel_engine_cs *engine) ...@@ -1190,7 +1190,7 @@ static void start_timeslice(struct intel_engine_cs *engine)
* its timeslice, so recheck. * its timeslice, so recheck.
*/ */
if (!timer_pending(&el->timer)) if (!timer_pending(&el->timer))
tasklet_hi_schedule(&el->tasklet); tasklet_hi_schedule(&engine->sched_engine->tasklet);
return; return;
} }
...@@ -1772,8 +1772,8 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive) ...@@ -1772,8 +1772,8 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
* access. Either we are inside the tasklet, or the tasklet is disabled * access. Either we are inside the tasklet, or the tasklet is disabled
* and we assume that is only inside the reset paths and so serialised. * and we assume that is only inside the reset paths and so serialised.
*/ */
GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) && GEM_BUG_ON(!tasklet_is_locked(&engine->sched_engine->tasklet) &&
!reset_in_progress(execlists)); !reset_in_progress(engine));
/* /*
* Note that csb_write, csb_status may be either in HWSP or mmio. * Note that csb_write, csb_status may be either in HWSP or mmio.
...@@ -2131,7 +2131,7 @@ static void execlists_unhold(struct intel_engine_cs *engine, ...@@ -2131,7 +2131,7 @@ static void execlists_unhold(struct intel_engine_cs *engine,
if (rq_prio(rq) > engine->sched_engine->queue_priority_hint) { if (rq_prio(rq) > engine->sched_engine->queue_priority_hint) {
engine->sched_engine->queue_priority_hint = rq_prio(rq); engine->sched_engine->queue_priority_hint = rq_prio(rq);
tasklet_hi_schedule(&engine->execlists.tasklet); tasklet_hi_schedule(&engine->sched_engine->tasklet);
} }
spin_unlock_irq(&engine->sched_engine->lock); spin_unlock_irq(&engine->sched_engine->lock);
...@@ -2322,13 +2322,13 @@ static void execlists_reset(struct intel_engine_cs *engine, const char *msg) ...@@ -2322,13 +2322,13 @@ static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
ENGINE_TRACE(engine, "reset for %s\n", msg); ENGINE_TRACE(engine, "reset for %s\n", msg);
/* Mark this tasklet as disabled to avoid waiting for it to complete */ /* Mark this tasklet as disabled to avoid waiting for it to complete */
tasklet_disable_nosync(&engine->execlists.tasklet); tasklet_disable_nosync(&engine->sched_engine->tasklet);
ring_set_paused(engine, 1); /* Freeze the current request in place */ ring_set_paused(engine, 1); /* Freeze the current request in place */
execlists_capture(engine); execlists_capture(engine);
intel_engine_reset(engine, msg); intel_engine_reset(engine, msg);
tasklet_enable(&engine->execlists.tasklet); tasklet_enable(&engine->sched_engine->tasklet);
clear_and_wake_up_bit(bit, lock); clear_and_wake_up_bit(bit, lock);
} }
...@@ -2351,8 +2351,9 @@ static bool preempt_timeout(const struct intel_engine_cs *const engine) ...@@ -2351,8 +2351,9 @@ static bool preempt_timeout(const struct intel_engine_cs *const engine)
*/ */
static void execlists_submission_tasklet(struct tasklet_struct *t) static void execlists_submission_tasklet(struct tasklet_struct *t)
{ {
struct intel_engine_cs * const engine = struct i915_sched_engine *sched_engine =
from_tasklet(engine, t, execlists.tasklet); from_tasklet(sched_engine, t, tasklet);
struct intel_engine_cs * const engine = sched_engine->private_data;
struct i915_request *post[2 * EXECLIST_MAX_PORTS]; struct i915_request *post[2 * EXECLIST_MAX_PORTS];
struct i915_request **inactive; struct i915_request **inactive;
...@@ -2427,13 +2428,16 @@ static void execlists_irq_handler(struct intel_engine_cs *engine, u16 iir) ...@@ -2427,13 +2428,16 @@ static void execlists_irq_handler(struct intel_engine_cs *engine, u16 iir)
intel_engine_signal_breadcrumbs(engine); intel_engine_signal_breadcrumbs(engine);
if (tasklet) if (tasklet)
tasklet_hi_schedule(&engine->execlists.tasklet); tasklet_hi_schedule(&engine->sched_engine->tasklet);
} }
static void __execlists_kick(struct intel_engine_execlists *execlists) static void __execlists_kick(struct intel_engine_execlists *execlists)
{ {
struct intel_engine_cs *engine =
container_of(execlists, typeof(*engine), execlists);
/* Kick the tasklet for some interrupt coalescing and reset handling */ /* Kick the tasklet for some interrupt coalescing and reset handling */
tasklet_hi_schedule(&execlists->tasklet); tasklet_hi_schedule(&engine->sched_engine->tasklet);
} }
#define execlists_kick(t, member) \ #define execlists_kick(t, member) \
...@@ -2808,10 +2812,8 @@ static int execlists_resume(struct intel_engine_cs *engine) ...@@ -2808,10 +2812,8 @@ static int execlists_resume(struct intel_engine_cs *engine)
static void execlists_reset_prepare(struct intel_engine_cs *engine) static void execlists_reset_prepare(struct intel_engine_cs *engine)
{ {
struct intel_engine_execlists * const execlists = &engine->execlists;
ENGINE_TRACE(engine, "depth<-%d\n", ENGINE_TRACE(engine, "depth<-%d\n",
atomic_read(&execlists->tasklet.count)); atomic_read(&engine->sched_engine->tasklet.count));
/* /*
* Prevent request submission to the hardware until we have * Prevent request submission to the hardware until we have
...@@ -2822,8 +2824,8 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine) ...@@ -2822,8 +2824,8 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
* Turning off the execlists->tasklet until the reset is over * Turning off the execlists->tasklet until the reset is over
* prevents the race. * prevents the race.
*/ */
__tasklet_disable_sync_once(&execlists->tasklet); __tasklet_disable_sync_once(&engine->sched_engine->tasklet);
GEM_BUG_ON(!reset_in_progress(execlists)); GEM_BUG_ON(!reset_in_progress(engine));
/* /*
* We stop engines, otherwise we might get failed reset and a * We stop engines, otherwise we might get failed reset and a
...@@ -2973,8 +2975,9 @@ static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled) ...@@ -2973,8 +2975,9 @@ static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
static void nop_submission_tasklet(struct tasklet_struct *t) static void nop_submission_tasklet(struct tasklet_struct *t)
{ {
struct intel_engine_cs * const engine = struct i915_sched_engine *sched_engine =
from_tasklet(engine, t, execlists.tasklet); from_tasklet(sched_engine, t, tasklet);
struct intel_engine_cs * const engine = sched_engine->private_data;
/* The driver is wedged; don't process any more events. */ /* The driver is wedged; don't process any more events. */
WRITE_ONCE(engine->sched_engine->queue_priority_hint, INT_MIN); WRITE_ONCE(engine->sched_engine->queue_priority_hint, INT_MIN);
...@@ -3061,8 +3064,8 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine) ...@@ -3061,8 +3064,8 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
sched_engine->queue_priority_hint = INT_MIN; sched_engine->queue_priority_hint = INT_MIN;
sched_engine->queue = RB_ROOT_CACHED; sched_engine->queue = RB_ROOT_CACHED;
GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet)); GEM_BUG_ON(__tasklet_is_enabled(&engine->sched_engine->tasklet));
execlists->tasklet.callback = nop_submission_tasklet; engine->sched_engine->tasklet.callback = nop_submission_tasklet;
spin_unlock_irqrestore(&engine->sched_engine->lock, flags); spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
rcu_read_unlock(); rcu_read_unlock();
...@@ -3082,14 +3085,14 @@ static void execlists_reset_finish(struct intel_engine_cs *engine) ...@@ -3082,14 +3085,14 @@ static void execlists_reset_finish(struct intel_engine_cs *engine)
* reset as the next level of recovery, and as a final resort we * reset as the next level of recovery, and as a final resort we
* will declare the device wedged. * will declare the device wedged.
*/ */
GEM_BUG_ON(!reset_in_progress(execlists)); GEM_BUG_ON(!reset_in_progress(engine));
/* And kick in case we missed a new request submission. */ /* And kick in case we missed a new request submission. */
if (__tasklet_enable(&execlists->tasklet)) if (__tasklet_enable(&engine->sched_engine->tasklet))
__execlists_kick(execlists); __execlists_kick(execlists);
ENGINE_TRACE(engine, "depth->%d\n", ENGINE_TRACE(engine, "depth->%d\n",
atomic_read(&execlists->tasklet.count)); atomic_read(&engine->sched_engine->tasklet.count));
} }
static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine) static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
...@@ -3163,7 +3166,7 @@ static void kick_execlists(const struct i915_request *rq, int prio) ...@@ -3163,7 +3166,7 @@ static void kick_execlists(const struct i915_request *rq, int prio)
* so kiss. * so kiss.
*/ */
if (prio >= max(I915_PRIORITY_NORMAL, rq_prio(inflight))) if (prio >= max(I915_PRIORITY_NORMAL, rq_prio(inflight)))
tasklet_hi_schedule(&engine->execlists.tasklet); tasklet_hi_schedule(&sched_engine->tasklet);
unlock: unlock:
rcu_read_unlock(); rcu_read_unlock();
...@@ -3174,7 +3177,7 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine) ...@@ -3174,7 +3177,7 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine)
engine->submit_request = execlists_submit_request; engine->submit_request = execlists_submit_request;
engine->sched_engine->schedule = i915_schedule; engine->sched_engine->schedule = i915_schedule;
engine->sched_engine->kick_backend = kick_execlists; engine->sched_engine->kick_backend = kick_execlists;
engine->execlists.tasklet.callback = execlists_submission_tasklet; engine->sched_engine->tasklet.callback = execlists_submission_tasklet;
} }
static void execlists_shutdown(struct intel_engine_cs *engine) static void execlists_shutdown(struct intel_engine_cs *engine)
...@@ -3182,7 +3185,7 @@ static void execlists_shutdown(struct intel_engine_cs *engine) ...@@ -3182,7 +3185,7 @@ static void execlists_shutdown(struct intel_engine_cs *engine)
/* Synchronise with residual timers and any softirq they raise */ /* Synchronise with residual timers and any softirq they raise */
del_timer_sync(&engine->execlists.timer); del_timer_sync(&engine->execlists.timer);
del_timer_sync(&engine->execlists.preempt); del_timer_sync(&engine->execlists.preempt);
tasklet_kill(&engine->execlists.tasklet); tasklet_kill(&engine->sched_engine->tasklet);
} }
static void execlists_release(struct intel_engine_cs *engine) static void execlists_release(struct intel_engine_cs *engine)
...@@ -3298,7 +3301,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine) ...@@ -3298,7 +3301,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
struct intel_uncore *uncore = engine->uncore; struct intel_uncore *uncore = engine->uncore;
u32 base = engine->mmio_base; u32 base = engine->mmio_base;
tasklet_setup(&engine->execlists.tasklet, execlists_submission_tasklet); tasklet_setup(&engine->sched_engine->tasklet, execlists_submission_tasklet);
timer_setup(&engine->execlists.timer, execlists_timeslice, 0); timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
timer_setup(&engine->execlists.preempt, execlists_preempt, 0); timer_setup(&engine->execlists.preempt, execlists_preempt, 0);
...@@ -3380,7 +3383,7 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk) ...@@ -3380,7 +3383,7 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
* rbtrees as in the case it is running in parallel, it may reinsert * rbtrees as in the case it is running in parallel, it may reinsert
* the rb_node into a sibling. * the rb_node into a sibling.
*/ */
tasklet_kill(&ve->base.execlists.tasklet); tasklet_kill(&ve->base.sched_engine->tasklet);
/* Decouple ourselves from the siblings, no more access allowed. */ /* Decouple ourselves from the siblings, no more access allowed. */
for (n = 0; n < ve->num_siblings; n++) { for (n = 0; n < ve->num_siblings; n++) {
...@@ -3392,13 +3395,13 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk) ...@@ -3392,13 +3395,13 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
spin_lock_irq(&sibling->sched_engine->lock); spin_lock_irq(&sibling->sched_engine->lock);
/* Detachment is lazily performed in the execlists tasklet */ /* Detachment is lazily performed in the sched_engine->tasklet */
if (!RB_EMPTY_NODE(node)) if (!RB_EMPTY_NODE(node))
rb_erase_cached(node, &sibling->execlists.virtual); rb_erase_cached(node, &sibling->execlists.virtual);
spin_unlock_irq(&sibling->sched_engine->lock); spin_unlock_irq(&sibling->sched_engine->lock);
} }
GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet)); GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.sched_engine->tasklet));
GEM_BUG_ON(!list_empty(virtual_queue(ve))); GEM_BUG_ON(!list_empty(virtual_queue(ve)));
lrc_fini(&ve->context); lrc_fini(&ve->context);
...@@ -3545,9 +3548,11 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve) ...@@ -3545,9 +3548,11 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
static void virtual_submission_tasklet(struct tasklet_struct *t) static void virtual_submission_tasklet(struct tasklet_struct *t)
{ {
struct i915_sched_engine *sched_engine =
from_tasklet(sched_engine, t, tasklet);
struct virtual_engine * const ve = struct virtual_engine * const ve =
from_tasklet(ve, t, base.execlists.tasklet); (struct virtual_engine *)sched_engine->private_data;
const int prio = READ_ONCE(ve->base.sched_engine->queue_priority_hint); const int prio = READ_ONCE(sched_engine->queue_priority_hint);
intel_engine_mask_t mask; intel_engine_mask_t mask;
unsigned int n; unsigned int n;
...@@ -3616,7 +3621,7 @@ static void virtual_submission_tasklet(struct tasklet_struct *t) ...@@ -3616,7 +3621,7 @@ static void virtual_submission_tasklet(struct tasklet_struct *t)
GEM_BUG_ON(RB_EMPTY_NODE(&node->rb)); GEM_BUG_ON(RB_EMPTY_NODE(&node->rb));
node->prio = prio; node->prio = prio;
if (first && prio > sibling->sched_engine->queue_priority_hint) if (first && prio > sibling->sched_engine->queue_priority_hint)
tasklet_hi_schedule(&sibling->execlists.tasklet); tasklet_hi_schedule(&sibling->sched_engine->tasklet);
unlock_engine: unlock_engine:
spin_unlock_irq(&sibling->sched_engine->lock); spin_unlock_irq(&sibling->sched_engine->lock);
...@@ -3657,7 +3662,7 @@ static void virtual_submit_request(struct i915_request *rq) ...@@ -3657,7 +3662,7 @@ static void virtual_submit_request(struct i915_request *rq)
GEM_BUG_ON(!list_empty(virtual_queue(ve))); GEM_BUG_ON(!list_empty(virtual_queue(ve)));
list_move_tail(&rq->sched.link, virtual_queue(ve)); list_move_tail(&rq->sched.link, virtual_queue(ve));
tasklet_hi_schedule(&ve->base.execlists.tasklet); tasklet_hi_schedule(&ve->base.sched_engine->tasklet);
unlock: unlock:
spin_unlock_irqrestore(&ve->base.sched_engine->lock, flags); spin_unlock_irqrestore(&ve->base.sched_engine->lock, flags);
...@@ -3751,6 +3756,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings, ...@@ -3751,6 +3756,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
err = -ENOMEM; err = -ENOMEM;
goto err_put; goto err_put;
} }
ve->base.sched_engine->private_data = &ve->base;
ve->base.cops = &virtual_context_ops; ve->base.cops = &virtual_context_ops;
ve->base.request_alloc = execlists_request_alloc; ve->base.request_alloc = execlists_request_alloc;
...@@ -3761,7 +3767,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings, ...@@ -3761,7 +3767,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
ve->base.bond_execute = virtual_bond_execute; ve->base.bond_execute = virtual_bond_execute;
INIT_LIST_HEAD(virtual_queue(ve)); INIT_LIST_HEAD(virtual_queue(ve));
tasklet_setup(&ve->base.execlists.tasklet, virtual_submission_tasklet); tasklet_setup(&ve->base.sched_engine->tasklet, virtual_submission_tasklet);
intel_context_init(&ve->context, &ve->base); intel_context_init(&ve->context, &ve->base);
...@@ -3789,7 +3795,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings, ...@@ -3789,7 +3795,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
* layering if we handle cloning of the requests and * layering if we handle cloning of the requests and
* submitting a copy into each backend. * submitting a copy into each backend.
*/ */
if (sibling->execlists.tasklet.callback != if (sibling->sched_engine->tasklet.callback !=
execlists_submission_tasklet) { execlists_submission_tasklet) {
err = -ENODEV; err = -ENODEV;
goto err_put; goto err_put;
......
...@@ -349,6 +349,7 @@ int mock_engine_init(struct intel_engine_cs *engine) ...@@ -349,6 +349,7 @@ int mock_engine_init(struct intel_engine_cs *engine)
engine->sched_engine = i915_sched_engine_create(ENGINE_MOCK); engine->sched_engine = i915_sched_engine_create(ENGINE_MOCK);
if (!engine->sched_engine) if (!engine->sched_engine)
return -ENOMEM; return -ENOMEM;
engine->sched_engine->private_data = engine;
intel_engine_init_execlists(engine); intel_engine_init_execlists(engine);
intel_engine_init__pm(engine); intel_engine_init__pm(engine);
......
...@@ -43,7 +43,7 @@ static int wait_for_submit(struct intel_engine_cs *engine, ...@@ -43,7 +43,7 @@ static int wait_for_submit(struct intel_engine_cs *engine,
unsigned long timeout) unsigned long timeout)
{ {
/* Ignore our own attempts to suppress excess tasklets */ /* Ignore our own attempts to suppress excess tasklets */
tasklet_hi_schedule(&engine->execlists.tasklet); tasklet_hi_schedule(&engine->sched_engine->tasklet);
timeout += jiffies; timeout += jiffies;
do { do {
...@@ -553,13 +553,13 @@ static int live_pin_rewind(void *arg) ...@@ -553,13 +553,13 @@ static int live_pin_rewind(void *arg)
static int engine_lock_reset_tasklet(struct intel_engine_cs *engine) static int engine_lock_reset_tasklet(struct intel_engine_cs *engine)
{ {
tasklet_disable(&engine->execlists.tasklet); tasklet_disable(&engine->sched_engine->tasklet);
local_bh_disable(); local_bh_disable();
if (test_and_set_bit(I915_RESET_ENGINE + engine->id, if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
&engine->gt->reset.flags)) { &engine->gt->reset.flags)) {
local_bh_enable(); local_bh_enable();
tasklet_enable(&engine->execlists.tasklet); tasklet_enable(&engine->sched_engine->tasklet);
intel_gt_set_wedged(engine->gt); intel_gt_set_wedged(engine->gt);
return -EBUSY; return -EBUSY;
...@@ -574,7 +574,7 @@ static void engine_unlock_reset_tasklet(struct intel_engine_cs *engine) ...@@ -574,7 +574,7 @@ static void engine_unlock_reset_tasklet(struct intel_engine_cs *engine)
&engine->gt->reset.flags); &engine->gt->reset.flags);
local_bh_enable(); local_bh_enable();
tasklet_enable(&engine->execlists.tasklet); tasklet_enable(&engine->sched_engine->tasklet);
} }
static int live_hold_reset(void *arg) static int live_hold_reset(void *arg)
...@@ -628,7 +628,7 @@ static int live_hold_reset(void *arg) ...@@ -628,7 +628,7 @@ static int live_hold_reset(void *arg)
if (err) if (err)
goto out; goto out;
engine->execlists.tasklet.callback(&engine->execlists.tasklet); engine->sched_engine->tasklet.callback(&engine->sched_engine->tasklet);
GEM_BUG_ON(execlists_active(&engine->execlists) != rq); GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
i915_request_get(rq); i915_request_get(rq);
...@@ -1200,7 +1200,7 @@ static int live_timeslice_rewind(void *arg) ...@@ -1200,7 +1200,7 @@ static int live_timeslice_rewind(void *arg)
while (i915_request_is_active(rq[A2])) { /* semaphore yield! */ while (i915_request_is_active(rq[A2])) { /* semaphore yield! */
/* Wait for the timeslice to kick in */ /* Wait for the timeslice to kick in */
del_timer(&engine->execlists.timer); del_timer(&engine->execlists.timer);
tasklet_hi_schedule(&engine->execlists.tasklet); tasklet_hi_schedule(&engine->sched_engine->tasklet);
intel_engine_flush_submission(engine); intel_engine_flush_submission(engine);
} }
/* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */ /* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */
...@@ -4606,7 +4606,7 @@ static int reset_virtual_engine(struct intel_gt *gt, ...@@ -4606,7 +4606,7 @@ static int reset_virtual_engine(struct intel_gt *gt,
if (err) if (err)
goto out_heartbeat; goto out_heartbeat;
engine->execlists.tasklet.callback(&engine->execlists.tasklet); engine->sched_engine->tasklet.callback(&engine->sched_engine->tasklet);
GEM_BUG_ON(execlists_active(&engine->execlists) != rq); GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
/* Fake a preemption event; failed of course */ /* Fake a preemption event; failed of course */
......
...@@ -1702,7 +1702,7 @@ static int __igt_atomic_reset_engine(struct intel_engine_cs *engine, ...@@ -1702,7 +1702,7 @@ static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
const struct igt_atomic_section *p, const struct igt_atomic_section *p,
const char *mode) const char *mode)
{ {
struct tasklet_struct * const t = &engine->execlists.tasklet; struct tasklet_struct * const t = &engine->sched_engine->tasklet;
int err; int err;
GEM_TRACE("i915_reset_engine(%s:%s) under %s\n", GEM_TRACE("i915_reset_engine(%s:%s) under %s\n",
......
...@@ -49,7 +49,7 @@ static int wait_for_submit(struct intel_engine_cs *engine, ...@@ -49,7 +49,7 @@ static int wait_for_submit(struct intel_engine_cs *engine,
unsigned long timeout) unsigned long timeout)
{ {
/* Ignore our own attempts to suppress excess tasklets */ /* Ignore our own attempts to suppress excess tasklets */
tasklet_hi_schedule(&engine->execlists.tasklet); tasklet_hi_schedule(&engine->sched_engine->tasklet);
timeout += jiffies; timeout += jiffies;
do { do {
...@@ -1613,12 +1613,12 @@ static void garbage_reset(struct intel_engine_cs *engine, ...@@ -1613,12 +1613,12 @@ static void garbage_reset(struct intel_engine_cs *engine,
local_bh_disable(); local_bh_disable();
if (!test_and_set_bit(bit, lock)) { if (!test_and_set_bit(bit, lock)) {
tasklet_disable(&engine->execlists.tasklet); tasklet_disable(&engine->sched_engine->tasklet);
if (!rq->fence.error) if (!rq->fence.error)
__intel_engine_reset_bh(engine, NULL); __intel_engine_reset_bh(engine, NULL);
tasklet_enable(&engine->execlists.tasklet); tasklet_enable(&engine->sched_engine->tasklet);
clear_and_wake_up_bit(bit, lock); clear_and_wake_up_bit(bit, lock);
} }
local_bh_enable(); local_bh_enable();
......
...@@ -321,7 +321,7 @@ static int igt_atomic_engine_reset(void *arg) ...@@ -321,7 +321,7 @@ static int igt_atomic_engine_reset(void *arg)
goto out_unlock; goto out_unlock;
for_each_engine(engine, gt, id) { for_each_engine(engine, gt, id) {
struct tasklet_struct *t = &engine->execlists.tasklet; struct tasklet_struct *t = &engine->sched_engine->tasklet;
if (t->func) if (t->func)
tasklet_disable(t); tasklet_disable(t);
......
...@@ -241,8 +241,9 @@ static void __guc_dequeue(struct intel_engine_cs *engine) ...@@ -241,8 +241,9 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
static void guc_submission_tasklet(struct tasklet_struct *t) static void guc_submission_tasklet(struct tasklet_struct *t)
{ {
struct intel_engine_cs * const engine = struct i915_sched_engine *sched_engine =
from_tasklet(engine, t, execlists.tasklet); from_tasklet(sched_engine, t, tasklet);
struct intel_engine_cs * const engine = sched_engine->private_data;
struct intel_engine_execlists * const execlists = &engine->execlists; struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_request **port, *rq; struct i915_request **port, *rq;
unsigned long flags; unsigned long flags;
...@@ -272,14 +273,12 @@ static void cs_irq_handler(struct intel_engine_cs *engine, u16 iir) ...@@ -272,14 +273,12 @@ static void cs_irq_handler(struct intel_engine_cs *engine, u16 iir)
{ {
if (iir & GT_RENDER_USER_INTERRUPT) { if (iir & GT_RENDER_USER_INTERRUPT) {
intel_engine_signal_breadcrumbs(engine); intel_engine_signal_breadcrumbs(engine);
tasklet_hi_schedule(&engine->execlists.tasklet); tasklet_hi_schedule(&engine->sched_engine->tasklet);
} }
} }
static void guc_reset_prepare(struct intel_engine_cs *engine) static void guc_reset_prepare(struct intel_engine_cs *engine)
{ {
struct intel_engine_execlists * const execlists = &engine->execlists;
ENGINE_TRACE(engine, "\n"); ENGINE_TRACE(engine, "\n");
/* /*
...@@ -291,7 +290,7 @@ static void guc_reset_prepare(struct intel_engine_cs *engine) ...@@ -291,7 +290,7 @@ static void guc_reset_prepare(struct intel_engine_cs *engine)
* Turning off the execlists->tasklet until the reset is over * Turning off the execlists->tasklet until the reset is over
* prevents the race. * prevents the race.
*/ */
__tasklet_disable_sync_once(&execlists->tasklet); __tasklet_disable_sync_once(&engine->sched_engine->tasklet);
} }
static void guc_reset_state(struct intel_context *ce, static void guc_reset_state(struct intel_context *ce,
...@@ -395,14 +394,12 @@ static void guc_reset_cancel(struct intel_engine_cs *engine) ...@@ -395,14 +394,12 @@ static void guc_reset_cancel(struct intel_engine_cs *engine)
static void guc_reset_finish(struct intel_engine_cs *engine) static void guc_reset_finish(struct intel_engine_cs *engine)
{ {
struct intel_engine_execlists * const execlists = &engine->execlists; if (__tasklet_enable(&engine->sched_engine->tasklet))
if (__tasklet_enable(&execlists->tasklet))
/* And kick in case we missed a new request submission. */ /* And kick in case we missed a new request submission. */
tasklet_hi_schedule(&execlists->tasklet); tasklet_hi_schedule(&engine->sched_engine->tasklet);
ENGINE_TRACE(engine, "depth->%d\n", ENGINE_TRACE(engine, "depth->%d\n",
atomic_read(&execlists->tasklet.count)); atomic_read(&engine->sched_engine->tasklet.count));
} }
/* /*
...@@ -520,7 +517,7 @@ static void guc_submit_request(struct i915_request *rq) ...@@ -520,7 +517,7 @@ static void guc_submit_request(struct i915_request *rq)
GEM_BUG_ON(i915_sched_engine_is_empty(engine->sched_engine)); GEM_BUG_ON(i915_sched_engine_is_empty(engine->sched_engine));
GEM_BUG_ON(list_empty(&rq->sched.link)); GEM_BUG_ON(list_empty(&rq->sched.link));
tasklet_hi_schedule(&engine->execlists.tasklet); tasklet_hi_schedule(&engine->sched_engine->tasklet);
spin_unlock_irqrestore(&engine->sched_engine->lock, flags); spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
} }
...@@ -600,7 +597,7 @@ static void guc_release(struct intel_engine_cs *engine) ...@@ -600,7 +597,7 @@ static void guc_release(struct intel_engine_cs *engine)
{ {
engine->sanitize = NULL; /* no longer in control, nothing to sanitize */ engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
tasklet_kill(&engine->execlists.tasklet); tasklet_kill(&engine->sched_engine->tasklet);
intel_engine_cleanup_common(engine); intel_engine_cleanup_common(engine);
lrc_fini_wa_ctx(engine); lrc_fini_wa_ctx(engine);
...@@ -679,7 +676,7 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine) ...@@ -679,7 +676,7 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine)
*/ */
GEM_BUG_ON(GRAPHICS_VER(i915) < 11); GEM_BUG_ON(GRAPHICS_VER(i915) < 11);
tasklet_setup(&engine->execlists.tasklet, guc_submission_tasklet); tasklet_setup(&engine->sched_engine->tasklet, guc_submission_tasklet);
guc_default_vfuncs(engine); guc_default_vfuncs(engine);
guc_default_irqs(engine); guc_default_irqs(engine);
......
...@@ -436,6 +436,7 @@ void i915_sched_engine_free(struct kref *kref) ...@@ -436,6 +436,7 @@ void i915_sched_engine_free(struct kref *kref)
struct i915_sched_engine *sched_engine = struct i915_sched_engine *sched_engine =
container_of(kref, typeof(*sched_engine), ref); container_of(kref, typeof(*sched_engine), ref);
tasklet_kill(&sched_engine->tasklet); /* flush the callback */
kfree(sched_engine); kfree(sched_engine);
} }
......
...@@ -79,6 +79,20 @@ i915_sched_engine_reset_on_empty(struct i915_sched_engine *sched_engine) ...@@ -79,6 +79,20 @@ i915_sched_engine_reset_on_empty(struct i915_sched_engine *sched_engine)
sched_engine->no_priolist = false; sched_engine->no_priolist = false;
} }
static inline void
i915_sched_engine_active_lock_bh(struct i915_sched_engine *sched_engine)
{
local_bh_disable(); /* prevent local softirq and lock recursion */
tasklet_lock(&sched_engine->tasklet);
}
static inline void
i915_sched_engine_active_unlock_bh(struct i915_sched_engine *sched_engine)
{
tasklet_unlock(&sched_engine->tasklet);
local_bh_enable(); /* restore softirq, and kick ksoftirqd! */
}
void i915_request_show_with_schedule(struct drm_printer *m, void i915_request_show_with_schedule(struct drm_printer *m,
const struct i915_request *rq, const struct i915_request *rq,
const char *prefix, const char *prefix,
......
...@@ -124,6 +124,11 @@ struct i915_sched_engine { ...@@ -124,6 +124,11 @@ struct i915_sched_engine {
*/ */
struct list_head hold; struct list_head hold;
/**
* @tasklet: softirq tasklet for submission
*/
struct tasklet_struct tasklet;
/** /**
* @default_priolist: priority list for I915_PRIORITY_NORMAL * @default_priolist: priority list for I915_PRIORITY_NORMAL
*/ */
...@@ -153,6 +158,11 @@ struct i915_sched_engine { ...@@ -153,6 +158,11 @@ struct i915_sched_engine {
*/ */
bool no_priolist; bool no_priolist;
/**
* @private_data: private data of the submission backend
*/
void *private_data;
/** /**
* @kick_backend: kick backend after a request's priority has changed * @kick_backend: kick backend after a request's priority has changed
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment