Commit 52c0fdb2 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Replace global breadcrumbs with per-context interrupt tracking

A few years ago, see commit 688e6c72 ("drm/i915: Slaughter the
thundering i915_wait_request herd"), the issue of handling multiple
clients waiting in parallel was brought to our attention. The
requirement was that every client should be woken immediately upon its
request being signaled, without incurring any cpu overhead.

To handle certain fragility of our hw meant that we could not do a
simple check inside the irq handler (some generations required almost
unbounded delays before we could be sure of seqno coherency) and so
request completion checking required delegation.

Before commit 688e6c72, the solution was simple. Every client
waiting on a request would be woken on every interrupt and each would do
a heavyweight check to see if their request was complete. Commit
688e6c72 introduced an rbtree so that only the earliest waiter on
the global timeline would woken, and would wake the next and so on.
(Along with various complications to handle requests being reordered
along the global timeline, and also a requirement for kthread to provide
a delegate for fence signaling that had no process context.)

The global rbtree depends on knowing the execution timeline (and global
seqno). Without knowing that order, we must instead check all contexts
queued to the HW to see which may have advanced. We trim that list by
only checking queued contexts that are being waited on, but still we
keep a list of all active contexts and their active signalers that we
inspect from inside the irq handler. By moving the waiters onto the fence
signal list, we can combine the client wakeup with the dma_fence
signaling (a dramatic reduction in complexity, but does require the HW
being coherent, the seqno must be visible from the cpu before the
interrupt is raised - we keep a timer backup just in case).

Having previously fixed all the issues with irq-seqno serialisation (by
inserting delays onto the GPU after each request instead of random delays
on the CPU after each interrupt), we can rely on the seqno state to
perfom direct wakeups from the interrupt handler. This allows us to
preserve our single context switch behaviour of the current routine,
with the only downside that we lose the RT priority sorting of wakeups.
In general, direct wakeup latency of multiple clients is about the same
(about 10% better in most cases) with a reduction in total CPU time spent
in the waiter (about 20-50% depending on gen). Average herd behaviour is
improved, but at the cost of not delegating wakeups on task_prio.

v2: Capture fence signaling state for error state and add comments to
warm even the most cold of hearts.
v3: Check if the request is still active before busywaiting
v4: Reduce the amount of pointer misdirection with list_for_each_safe
and using a local i915_request variable inside the loops
v5: Add a missing pluralisation to a purely informative selftest message.

References: 688e6c72 ("drm/i915: Slaughter the thundering i915_wait_request herd")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190129205230.19056-2-chris@chris-wilson.co.uk
parent 3df0bd19
...@@ -1315,29 +1315,16 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) ...@@ -1315,29 +1315,16 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake)); seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
for_each_engine(engine, dev_priv, id) { for_each_engine(engine, dev_priv, id) {
struct intel_breadcrumbs *b = &engine->breadcrumbs;
struct rb_node *rb;
seq_printf(m, "%s:\n", engine->name); seq_printf(m, "%s:\n", engine->name);
seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n", seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n",
engine->hangcheck.seqno, seqno[id], engine->hangcheck.seqno, seqno[id],
intel_engine_last_submit(engine), intel_engine_last_submit(engine),
jiffies_to_msecs(jiffies - jiffies_to_msecs(jiffies -
engine->hangcheck.action_timestamp)); engine->hangcheck.action_timestamp));
seq_printf(m, "\twaiters? %s, fake irq active? %s\n", seq_printf(m, "\tfake irq active? %s\n",
yesno(intel_engine_has_waiter(engine)),
yesno(test_bit(engine->id, yesno(test_bit(engine->id,
&dev_priv->gpu_error.missed_irq_rings))); &dev_priv->gpu_error.missed_irq_rings)));
spin_lock_irq(&b->rb_lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = rb_entry(rb, typeof(*w), node);
seq_printf(m, "\t%s [%d] waiting for %x\n",
w->tsk->comm, w->tsk->pid, w->seqno);
}
spin_unlock_irq(&b->rb_lock);
seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
(long long)engine->hangcheck.acthd, (long long)engine->hangcheck.acthd,
(long long)acthd[id]); (long long)acthd[id]);
...@@ -2021,18 +2008,6 @@ static int i915_swizzle_info(struct seq_file *m, void *data) ...@@ -2021,18 +2008,6 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
return 0; return 0;
} }
static int count_irq_waiters(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
int count = 0;
for_each_engine(engine, i915, id)
count += intel_engine_has_waiter(engine);
return count;
}
static const char *rps_power_to_str(unsigned int power) static const char *rps_power_to_str(unsigned int power)
{ {
static const char * const strings[] = { static const char * const strings[] = {
...@@ -2072,7 +2047,6 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) ...@@ -2072,7 +2047,6 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
seq_printf(m, "RPS enabled? %d\n", rps->enabled); seq_printf(m, "RPS enabled? %d\n", rps->enabled);
seq_printf(m, "GPU busy? %s [%d requests]\n", seq_printf(m, "GPU busy? %s [%d requests]\n",
yesno(dev_priv->gt.awake), dev_priv->gt.active_requests); yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
seq_printf(m, "Boosts outstanding? %d\n", seq_printf(m, "Boosts outstanding? %d\n",
atomic_read(&rps->num_waiters)); atomic_read(&rps->num_waiters));
seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive)); seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
......
...@@ -327,6 +327,9 @@ intel_context_init(struct intel_context *ce, ...@@ -327,6 +327,9 @@ intel_context_init(struct intel_context *ce,
struct intel_engine_cs *engine) struct intel_engine_cs *engine)
{ {
ce->gem_context = ctx; ce->gem_context = ctx;
INIT_LIST_HEAD(&ce->signal_link);
INIT_LIST_HEAD(&ce->signals);
} }
static struct i915_gem_context * static struct i915_gem_context *
......
...@@ -164,6 +164,8 @@ struct i915_gem_context { ...@@ -164,6 +164,8 @@ struct i915_gem_context {
struct intel_context { struct intel_context {
struct i915_gem_context *gem_context; struct i915_gem_context *gem_context;
struct intel_engine_cs *active; struct intel_engine_cs *active;
struct list_head signal_link;
struct list_head signals;
struct i915_vma *state; struct i915_vma *state;
struct intel_ring *ring; struct intel_ring *ring;
u32 *lrc_reg_state; u32 *lrc_reg_state;
......
...@@ -447,9 +447,14 @@ static void error_print_request(struct drm_i915_error_state_buf *m, ...@@ -447,9 +447,14 @@ static void error_print_request(struct drm_i915_error_state_buf *m,
if (!erq->seqno) if (!erq->seqno)
return; return;
err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n", err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x%s%s, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n",
prefix, erq->pid, erq->ban_score, prefix, erq->pid, erq->ban_score,
erq->context, erq->seqno, erq->sched_attr.priority, erq->context, erq->seqno,
test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&erq->flags) ? "!" : "",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&erq->flags) ? "+" : "",
erq->sched_attr.priority,
jiffies_to_msecs(erq->jiffies - epoch), jiffies_to_msecs(erq->jiffies - epoch),
erq->start, erq->head, erq->tail); erq->start, erq->head, erq->tail);
} }
...@@ -530,7 +535,6 @@ static void error_print_engine(struct drm_i915_error_state_buf *m, ...@@ -530,7 +535,6 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
} }
err_printf(m, " seqno: 0x%08x\n", ee->seqno); err_printf(m, " seqno: 0x%08x\n", ee->seqno);
err_printf(m, " last_seqno: 0x%08x\n", ee->last_seqno); err_printf(m, " last_seqno: 0x%08x\n", ee->last_seqno);
err_printf(m, " waiting: %s\n", yesno(ee->waiting));
err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head); err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head);
err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail); err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail);
err_printf(m, " hangcheck timestamp: %dms (%lu%s)\n", err_printf(m, " hangcheck timestamp: %dms (%lu%s)\n",
...@@ -804,21 +808,6 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, ...@@ -804,21 +808,6 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
error->epoch); error->epoch);
} }
if (IS_ERR(ee->waiters)) {
err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n",
m->i915->engine[i]->name);
} else if (ee->num_waiters) {
err_printf(m, "%s --- %d waiters\n",
m->i915->engine[i]->name,
ee->num_waiters);
for (j = 0; j < ee->num_waiters; j++) {
err_printf(m, " seqno 0x%08x for %s [%d]\n",
ee->waiters[j].seqno,
ee->waiters[j].comm,
ee->waiters[j].pid);
}
}
print_error_obj(m, m->i915->engine[i], print_error_obj(m, m->i915->engine[i],
"ringbuffer", ee->ringbuffer); "ringbuffer", ee->ringbuffer);
...@@ -1000,8 +989,6 @@ void __i915_gpu_state_free(struct kref *error_ref) ...@@ -1000,8 +989,6 @@ void __i915_gpu_state_free(struct kref *error_ref)
i915_error_object_free(ee->wa_ctx); i915_error_object_free(ee->wa_ctx);
kfree(ee->requests); kfree(ee->requests);
if (!IS_ERR_OR_NULL(ee->waiters))
kfree(ee->waiters);
} }
for (i = 0; i < ARRAY_SIZE(error->active_bo); i++) for (i = 0; i < ARRAY_SIZE(error->active_bo); i++)
...@@ -1205,59 +1192,6 @@ static void gen6_record_semaphore_state(struct intel_engine_cs *engine, ...@@ -1205,59 +1192,6 @@ static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
I915_READ(RING_SYNC_2(engine->mmio_base)); I915_READ(RING_SYNC_2(engine->mmio_base));
} }
static void error_record_engine_waiters(struct intel_engine_cs *engine,
struct drm_i915_error_engine *ee)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
struct drm_i915_error_waiter *waiter;
struct rb_node *rb;
int count;
ee->num_waiters = 0;
ee->waiters = NULL;
if (RB_EMPTY_ROOT(&b->waiters))
return;
if (!spin_trylock_irq(&b->rb_lock)) {
ee->waiters = ERR_PTR(-EDEADLK);
return;
}
count = 0;
for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
count++;
spin_unlock_irq(&b->rb_lock);
waiter = NULL;
if (count)
waiter = kmalloc_array(count,
sizeof(struct drm_i915_error_waiter),
GFP_ATOMIC);
if (!waiter)
return;
if (!spin_trylock_irq(&b->rb_lock)) {
kfree(waiter);
ee->waiters = ERR_PTR(-EDEADLK);
return;
}
ee->waiters = waiter;
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = rb_entry(rb, typeof(*w), node);
strcpy(waiter->comm, w->tsk->comm);
waiter->pid = w->tsk->pid;
waiter->seqno = w->seqno;
waiter++;
if (++ee->num_waiters == count)
break;
}
spin_unlock_irq(&b->rb_lock);
}
static void error_record_engine_registers(struct i915_gpu_state *error, static void error_record_engine_registers(struct i915_gpu_state *error,
struct intel_engine_cs *engine, struct intel_engine_cs *engine,
struct drm_i915_error_engine *ee) struct drm_i915_error_engine *ee)
...@@ -1293,7 +1227,6 @@ static void error_record_engine_registers(struct i915_gpu_state *error, ...@@ -1293,7 +1227,6 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
intel_engine_get_instdone(engine, &ee->instdone); intel_engine_get_instdone(engine, &ee->instdone);
ee->waiting = intel_engine_has_waiter(engine);
ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base)); ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
ee->acthd = intel_engine_get_active_head(engine); ee->acthd = intel_engine_get_active_head(engine);
ee->seqno = intel_engine_get_seqno(engine); ee->seqno = intel_engine_get_seqno(engine);
...@@ -1367,6 +1300,7 @@ static void record_request(struct i915_request *request, ...@@ -1367,6 +1300,7 @@ static void record_request(struct i915_request *request,
{ {
struct i915_gem_context *ctx = request->gem_context; struct i915_gem_context *ctx = request->gem_context;
erq->flags = request->fence.flags;
erq->context = ctx->hw_id; erq->context = ctx->hw_id;
erq->sched_attr = request->sched.attr; erq->sched_attr = request->sched.attr;
erq->ban_score = atomic_read(&ctx->ban_score); erq->ban_score = atomic_read(&ctx->ban_score);
...@@ -1542,7 +1476,6 @@ static void gem_record_rings(struct i915_gpu_state *error) ...@@ -1542,7 +1476,6 @@ static void gem_record_rings(struct i915_gpu_state *error)
ee->engine_id = i; ee->engine_id = i;
error_record_engine_registers(error, engine, ee); error_record_engine_registers(error, engine, ee);
error_record_engine_waiters(engine, ee);
error_record_engine_execlists(engine, ee); error_record_engine_execlists(engine, ee);
request = i915_gem_find_active_request(engine); request = i915_gem_find_active_request(engine);
......
...@@ -82,8 +82,6 @@ struct i915_gpu_state { ...@@ -82,8 +82,6 @@ struct i915_gpu_state {
int engine_id; int engine_id;
/* Software tracked state */ /* Software tracked state */
bool idle; bool idle;
bool waiting;
int num_waiters;
unsigned long hangcheck_timestamp; unsigned long hangcheck_timestamp;
struct i915_address_space *vm; struct i915_address_space *vm;
int num_requests; int num_requests;
...@@ -147,6 +145,7 @@ struct i915_gpu_state { ...@@ -147,6 +145,7 @@ struct i915_gpu_state {
struct drm_i915_error_object *default_state; struct drm_i915_error_object *default_state;
struct drm_i915_error_request { struct drm_i915_error_request {
unsigned long flags;
long jiffies; long jiffies;
pid_t pid; pid_t pid;
u32 context; u32 context;
...@@ -159,12 +158,6 @@ struct i915_gpu_state { ...@@ -159,12 +158,6 @@ struct i915_gpu_state {
} *requests, execlist[EXECLIST_MAX_PORTS]; } *requests, execlist[EXECLIST_MAX_PORTS];
unsigned int num_ports; unsigned int num_ports;
struct drm_i915_error_waiter {
char comm[TASK_COMM_LEN];
pid_t pid;
u32 seqno;
} *waiters;
struct { struct {
u32 gfx_mode; u32 gfx_mode;
union { union {
......
...@@ -1169,66 +1169,6 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) ...@@ -1169,66 +1169,6 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
return; return;
} }
static void notify_ring(struct intel_engine_cs *engine)
{
const u32 seqno = intel_engine_get_seqno(engine);
struct i915_request *rq = NULL;
struct task_struct *tsk = NULL;
struct intel_wait *wait;
if (unlikely(!engine->breadcrumbs.irq_armed))
return;
rcu_read_lock();
spin_lock(&engine->breadcrumbs.irq_lock);
wait = engine->breadcrumbs.irq_wait;
if (wait) {
/*
* We use a callback from the dma-fence to submit
* requests after waiting on our own requests. To
* ensure minimum delay in queuing the next request to
* hardware, signal the fence now rather than wait for
* the signaler to be woken up. We still wake up the
* waiter in order to handle the irq-seqno coherency
* issues (we may receive the interrupt before the
* seqno is written, see __i915_request_irq_complete())
* and to handle coalescing of multiple seqno updates
* and many waiters.
*/
if (i915_seqno_passed(seqno, wait->seqno)) {
struct i915_request *waiter = wait->request;
if (waiter &&
!i915_request_signaled(waiter) &&
intel_wait_check_request(wait, waiter))
rq = i915_request_get(waiter);
tsk = wait->tsk;
}
engine->breadcrumbs.irq_count++;
} else {
if (engine->breadcrumbs.irq_armed)
__intel_engine_disarm_breadcrumbs(engine);
}
spin_unlock(&engine->breadcrumbs.irq_lock);
if (rq) {
spin_lock(&rq->lock);
dma_fence_signal_locked(&rq->fence);
GEM_BUG_ON(!i915_request_completed(rq));
spin_unlock(&rq->lock);
i915_request_put(rq);
}
if (tsk && tsk->state & TASK_NORMAL)
wake_up_process(tsk);
rcu_read_unlock();
}
static void vlv_c0_read(struct drm_i915_private *dev_priv, static void vlv_c0_read(struct drm_i915_private *dev_priv,
struct intel_rps_ei *ei) struct intel_rps_ei *ei)
{ {
...@@ -1473,20 +1413,20 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, ...@@ -1473,20 +1413,20 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir) u32 gt_iir)
{ {
if (gt_iir & GT_RENDER_USER_INTERRUPT) if (gt_iir & GT_RENDER_USER_INTERRUPT)
notify_ring(dev_priv->engine[RCS]); intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
if (gt_iir & ILK_BSD_USER_INTERRUPT) if (gt_iir & ILK_BSD_USER_INTERRUPT)
notify_ring(dev_priv->engine[VCS]); intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]);
} }
static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir) u32 gt_iir)
{ {
if (gt_iir & GT_RENDER_USER_INTERRUPT) if (gt_iir & GT_RENDER_USER_INTERRUPT)
notify_ring(dev_priv->engine[RCS]); intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
if (gt_iir & GT_BSD_USER_INTERRUPT) if (gt_iir & GT_BSD_USER_INTERRUPT)
notify_ring(dev_priv->engine[VCS]); intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]);
if (gt_iir & GT_BLT_USER_INTERRUPT) if (gt_iir & GT_BLT_USER_INTERRUPT)
notify_ring(dev_priv->engine[BCS]); intel_engine_breadcrumbs_irq(dev_priv->engine[BCS]);
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT | GT_BSD_CS_ERROR_INTERRUPT |
...@@ -1506,7 +1446,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir) ...@@ -1506,7 +1446,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
tasklet = true; tasklet = true;
if (iir & GT_RENDER_USER_INTERRUPT) { if (iir & GT_RENDER_USER_INTERRUPT) {
notify_ring(engine); intel_engine_breadcrumbs_irq(engine);
tasklet |= USES_GUC_SUBMISSION(engine->i915); tasklet |= USES_GUC_SUBMISSION(engine->i915);
} }
...@@ -1852,7 +1792,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) ...@@ -1852,7 +1792,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
if (HAS_VEBOX(dev_priv)) { if (HAS_VEBOX(dev_priv)) {
if (pm_iir & PM_VEBOX_USER_INTERRUPT) if (pm_iir & PM_VEBOX_USER_INTERRUPT)
notify_ring(dev_priv->engine[VECS]); intel_engine_breadcrumbs_irq(dev_priv->engine[VECS]);
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
...@@ -4276,7 +4216,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) ...@@ -4276,7 +4216,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
I915_WRITE16(IIR, iir); I915_WRITE16(IIR, iir);
if (iir & I915_USER_INTERRUPT) if (iir & I915_USER_INTERRUPT)
notify_ring(dev_priv->engine[RCS]); intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
if (iir & I915_MASTER_ERROR_INTERRUPT) if (iir & I915_MASTER_ERROR_INTERRUPT)
i8xx_error_irq_handler(dev_priv, eir, eir_stuck); i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
...@@ -4384,7 +4324,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) ...@@ -4384,7 +4324,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
I915_WRITE(IIR, iir); I915_WRITE(IIR, iir);
if (iir & I915_USER_INTERRUPT) if (iir & I915_USER_INTERRUPT)
notify_ring(dev_priv->engine[RCS]); intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
if (iir & I915_MASTER_ERROR_INTERRUPT) if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck); i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
...@@ -4529,10 +4469,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) ...@@ -4529,10 +4469,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
I915_WRITE(IIR, iir); I915_WRITE(IIR, iir);
if (iir & I915_USER_INTERRUPT) if (iir & I915_USER_INTERRUPT)
notify_ring(dev_priv->engine[RCS]); intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
if (iir & I915_BSD_USER_INTERRUPT) if (iir & I915_BSD_USER_INTERRUPT)
notify_ring(dev_priv->engine[VCS]); intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]);
if (iir & I915_MASTER_ERROR_INTERRUPT) if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck); i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
......
...@@ -60,7 +60,7 @@ static bool i915_fence_signaled(struct dma_fence *fence) ...@@ -60,7 +60,7 @@ static bool i915_fence_signaled(struct dma_fence *fence)
static bool i915_fence_enable_signaling(struct dma_fence *fence) static bool i915_fence_enable_signaling(struct dma_fence *fence)
{ {
return intel_engine_enable_signaling(to_request(fence), true); return i915_request_enable_breadcrumb(to_request(fence));
} }
static signed long i915_fence_wait(struct dma_fence *fence, static signed long i915_fence_wait(struct dma_fence *fence,
...@@ -203,7 +203,7 @@ static void __retire_engine_request(struct intel_engine_cs *engine, ...@@ -203,7 +203,7 @@ static void __retire_engine_request(struct intel_engine_cs *engine,
if (!i915_request_signaled(rq)) if (!i915_request_signaled(rq))
dma_fence_signal_locked(&rq->fence); dma_fence_signal_locked(&rq->fence);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags)) if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
intel_engine_cancel_signaling(rq); i915_request_cancel_breadcrumb(rq);
if (rq->waitboost) { if (rq->waitboost) {
GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters)); GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
atomic_dec(&rq->i915->gt_pm.rps.num_waiters); atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
...@@ -377,9 +377,12 @@ void __i915_request_submit(struct i915_request *request) ...@@ -377,9 +377,12 @@ void __i915_request_submit(struct i915_request *request)
/* We may be recursing from the signal callback of another i915 fence */ /* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
request->global_seqno = seqno; request->global_seqno = seqno;
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
intel_engine_enable_signaling(request, false); !i915_request_enable_breadcrumb(request))
intel_engine_queue_breadcrumbs(engine);
spin_unlock(&request->lock); spin_unlock(&request->lock);
engine->emit_fini_breadcrumb(request, engine->emit_fini_breadcrumb(request,
...@@ -389,8 +392,6 @@ void __i915_request_submit(struct i915_request *request) ...@@ -389,8 +392,6 @@ void __i915_request_submit(struct i915_request *request)
move_to_timeline(request, &engine->timeline); move_to_timeline(request, &engine->timeline);
trace_i915_request_execute(request); trace_i915_request_execute(request);
wake_up_all(&request->execute);
} }
void i915_request_submit(struct i915_request *request) void i915_request_submit(struct i915_request *request)
...@@ -433,7 +434,9 @@ void __i915_request_unsubmit(struct i915_request *request) ...@@ -433,7 +434,9 @@ void __i915_request_unsubmit(struct i915_request *request)
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
request->global_seqno = 0; request->global_seqno = 0;
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
intel_engine_cancel_signaling(request); i915_request_cancel_breadcrumb(request);
GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
clear_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
spin_unlock(&request->lock); spin_unlock(&request->lock);
/* Transfer back from the global per-engine timeline to per-context */ /* Transfer back from the global per-engine timeline to per-context */
...@@ -633,13 +636,11 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) ...@@ -633,13 +636,11 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
/* We bump the ref for the fence chain */ /* We bump the ref for the fence chain */
i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify); i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
init_waitqueue_head(&rq->execute);
i915_sched_node_init(&rq->sched); i915_sched_node_init(&rq->sched);
/* No zalloc, must clear what we need by hand */ /* No zalloc, must clear what we need by hand */
rq->global_seqno = 0; rq->global_seqno = 0;
rq->signaling.wait.seqno = 0;
rq->file_priv = NULL; rq->file_priv = NULL;
rq->batch = NULL; rq->batch = NULL;
rq->capture_list = NULL; rq->capture_list = NULL;
...@@ -1030,13 +1031,10 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu) ...@@ -1030,13 +1031,10 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu)
return this_cpu != cpu; return this_cpu != cpu;
} }
static bool __i915_spin_request(const struct i915_request *rq, static bool __i915_spin_request(const struct i915_request * const rq,
u32 seqno, int state, unsigned long timeout_us) int state, unsigned long timeout_us)
{ {
struct intel_engine_cs *engine = rq->engine; unsigned int cpu;
unsigned int irq, cpu;
GEM_BUG_ON(!seqno);
/* /*
* Only wait for the request if we know it is likely to complete. * Only wait for the request if we know it is likely to complete.
...@@ -1044,12 +1042,12 @@ static bool __i915_spin_request(const struct i915_request *rq, ...@@ -1044,12 +1042,12 @@ static bool __i915_spin_request(const struct i915_request *rq,
* We don't track the timestamps around requests, nor the average * We don't track the timestamps around requests, nor the average
* request length, so we do not have a good indicator that this * request length, so we do not have a good indicator that this
* request will complete within the timeout. What we do know is the * request will complete within the timeout. What we do know is the
* order in which requests are executed by the engine and so we can * order in which requests are executed by the context and so we can
* tell if the request has started. If the request hasn't started yet, * tell if the request has been started. If the request is not even
* it is a fair assumption that it will not complete within our * running yet, it is a fair assumption that it will not complete
* relatively short timeout. * within our relatively short timeout.
*/ */
if (!intel_engine_has_started(engine, seqno)) if (!i915_request_is_running(rq))
return false; return false;
/* /*
...@@ -1063,20 +1061,10 @@ static bool __i915_spin_request(const struct i915_request *rq, ...@@ -1063,20 +1061,10 @@ static bool __i915_spin_request(const struct i915_request *rq,
* takes to sleep on a request, on the order of a microsecond. * takes to sleep on a request, on the order of a microsecond.
*/ */
irq = READ_ONCE(engine->breadcrumbs.irq_count);
timeout_us += local_clock_us(&cpu); timeout_us += local_clock_us(&cpu);
do { do {
if (intel_engine_has_completed(engine, seqno)) if (i915_request_completed(rq))
return seqno == i915_request_global_seqno(rq); return true;
/*
* Seqno are meant to be ordered *before* the interrupt. If
* we see an interrupt without a corresponding seqno advance,
* assume we won't see one in the near future but require
* the engine->seqno_barrier() to fixup coherency.
*/
if (READ_ONCE(engine->breadcrumbs.irq_count) != irq)
break;
if (signal_pending_state(state, current)) if (signal_pending_state(state, current))
break; break;
...@@ -1090,6 +1078,18 @@ static bool __i915_spin_request(const struct i915_request *rq, ...@@ -1090,6 +1078,18 @@ static bool __i915_spin_request(const struct i915_request *rq,
return false; return false;
} }
struct request_wait {
struct dma_fence_cb cb;
struct task_struct *tsk;
};
static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb)
{
struct request_wait *wait = container_of(cb, typeof(*wait), cb);
wake_up_process(wait->tsk);
}
/** /**
* i915_request_wait - wait until execution of request has finished * i915_request_wait - wait until execution of request has finished
* @rq: the request to wait upon * @rq: the request to wait upon
...@@ -1115,8 +1115,7 @@ long i915_request_wait(struct i915_request *rq, ...@@ -1115,8 +1115,7 @@ long i915_request_wait(struct i915_request *rq,
{ {
const int state = flags & I915_WAIT_INTERRUPTIBLE ? const int state = flags & I915_WAIT_INTERRUPTIBLE ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
DEFINE_WAIT_FUNC(exec, default_wake_function); struct request_wait wait;
struct intel_wait wait;
might_sleep(); might_sleep();
GEM_BUG_ON(timeout < 0); GEM_BUG_ON(timeout < 0);
...@@ -1128,47 +1127,24 @@ long i915_request_wait(struct i915_request *rq, ...@@ -1128,47 +1127,24 @@ long i915_request_wait(struct i915_request *rq,
return -ETIME; return -ETIME;
trace_i915_request_wait_begin(rq, flags); trace_i915_request_wait_begin(rq, flags);
add_wait_queue(&rq->execute, &exec);
intel_wait_init(&wait);
if (flags & I915_WAIT_PRIORITY)
i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
restart:
do {
set_current_state(state);
if (intel_wait_update_request(&wait, rq))
break;
if (signal_pending_state(state, current)) {
timeout = -ERESTARTSYS;
goto complete;
}
if (!timeout) {
timeout = -ETIME;
goto complete;
}
timeout = io_schedule_timeout(timeout); /* Optimistic short spin before touching IRQs */
} while (1); if (__i915_spin_request(rq, state, 5))
goto out;
GEM_BUG_ON(!intel_wait_has_seqno(&wait)); if (flags & I915_WAIT_PRIORITY)
GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit)); i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
/* Optimistic short spin before touching IRQs */ wait.tsk = current;
if (__i915_spin_request(rq, wait.seqno, state, 5)) if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
goto complete; goto out;
for (;;) {
set_current_state(state); set_current_state(state);
if (intel_engine_add_wait(rq->engine, &wait))
/*
* In order to check that we haven't missed the interrupt
* as we enabled it, we need to kick ourselves to do a
* coherent check on the seqno before we sleep.
*/
goto wakeup;
for (;;) { if (i915_request_completed(rq))
break;
if (signal_pending_state(state, current)) { if (signal_pending_state(state, current)) {
timeout = -ERESTARTSYS; timeout = -ERESTARTSYS;
break; break;
...@@ -1180,33 +1156,13 @@ long i915_request_wait(struct i915_request *rq, ...@@ -1180,33 +1156,13 @@ long i915_request_wait(struct i915_request *rq,
} }
timeout = io_schedule_timeout(timeout); timeout = io_schedule_timeout(timeout);
if (intel_wait_complete(&wait) &&
intel_wait_check_request(&wait, rq))
break;
set_current_state(state);
wakeup:
if (i915_request_completed(rq))
break;
/* Only spin if we know the GPU is processing this request */
if (__i915_spin_request(rq, wait.seqno, state, 2))
break;
if (!intel_wait_check_request(&wait, rq)) {
intel_engine_remove_wait(rq->engine, &wait);
goto restart;
}
} }
intel_engine_remove_wait(rq->engine, &wait);
complete:
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
remove_wait_queue(&rq->execute, &exec);
trace_i915_request_wait_end(rq);
dma_fence_remove_callback(&rq->fence, &wait.cb);
out:
trace_i915_request_wait_end(rq);
return timeout; return timeout;
} }
......
...@@ -38,23 +38,34 @@ struct drm_i915_gem_object; ...@@ -38,23 +38,34 @@ struct drm_i915_gem_object;
struct i915_request; struct i915_request;
struct i915_timeline; struct i915_timeline;
struct intel_wait {
struct rb_node node;
struct task_struct *tsk;
struct i915_request *request;
u32 seqno;
};
struct intel_signal_node {
struct intel_wait wait;
struct list_head link;
};
struct i915_capture_list { struct i915_capture_list {
struct i915_capture_list *next; struct i915_capture_list *next;
struct i915_vma *vma; struct i915_vma *vma;
}; };
enum {
/*
* I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW.
*
* Set by __i915_request_submit() on handing over to HW, and cleared
* by __i915_request_unsubmit() if we preempt this request.
*
* Finally cleared for consistency on retiring the request, when
* we know the HW is no longer running this request.
*
* See i915_request_is_active()
*/
I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
/*
* I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
*
* Internal bookkeeping used by the breadcrumb code to track when
* a request is on the various signal_list.
*/
I915_FENCE_FLAG_SIGNAL,
};
/** /**
* Request queue structure. * Request queue structure.
* *
...@@ -97,7 +108,7 @@ struct i915_request { ...@@ -97,7 +108,7 @@ struct i915_request {
struct intel_context *hw_context; struct intel_context *hw_context;
struct intel_ring *ring; struct intel_ring *ring;
struct i915_timeline *timeline; struct i915_timeline *timeline;
struct intel_signal_node signaling; struct list_head signal_link;
/* /*
* The rcu epoch of when this request was allocated. Used to judiciously * The rcu epoch of when this request was allocated. Used to judiciously
...@@ -116,7 +127,6 @@ struct i915_request { ...@@ -116,7 +127,6 @@ struct i915_request {
*/ */
struct i915_sw_fence submit; struct i915_sw_fence submit;
wait_queue_entry_t submitq; wait_queue_entry_t submitq;
wait_queue_head_t execute;
/* /*
* A list of everyone we wait upon, and everyone who waits upon us. * A list of everyone we wait upon, and everyone who waits upon us.
...@@ -255,7 +265,7 @@ i915_request_put(struct i915_request *rq) ...@@ -255,7 +265,7 @@ i915_request_put(struct i915_request *rq)
* that it has passed the global seqno and the global seqno is unchanged * that it has passed the global seqno and the global seqno is unchanged
* after the read, it is indeed complete). * after the read, it is indeed complete).
*/ */
static u32 static inline u32
i915_request_global_seqno(const struct i915_request *request) i915_request_global_seqno(const struct i915_request *request)
{ {
return READ_ONCE(request->global_seqno); return READ_ONCE(request->global_seqno);
...@@ -277,6 +287,10 @@ void i915_request_skip(struct i915_request *request, int error); ...@@ -277,6 +287,10 @@ void i915_request_skip(struct i915_request *request, int error);
void __i915_request_unsubmit(struct i915_request *request); void __i915_request_unsubmit(struct i915_request *request);
void i915_request_unsubmit(struct i915_request *request); void i915_request_unsubmit(struct i915_request *request);
/* Note: part of the intel_breadcrumbs family */
bool i915_request_enable_breadcrumb(struct i915_request *request);
void i915_request_cancel_breadcrumb(struct i915_request *request);
long i915_request_wait(struct i915_request *rq, long i915_request_wait(struct i915_request *rq,
unsigned int flags, unsigned int flags,
long timeout) long timeout)
...@@ -293,6 +307,11 @@ static inline bool i915_request_signaled(const struct i915_request *rq) ...@@ -293,6 +307,11 @@ static inline bool i915_request_signaled(const struct i915_request *rq)
return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags); return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags);
} }
static inline bool i915_request_is_active(const struct i915_request *rq)
{
return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
}
/** /**
* Returns true if seq1 is later than seq2. * Returns true if seq1 is later than seq2.
*/ */
...@@ -330,6 +349,11 @@ static inline u32 hwsp_seqno(const struct i915_request *rq) ...@@ -330,6 +349,11 @@ static inline u32 hwsp_seqno(const struct i915_request *rq)
return seqno; return seqno;
} }
static inline bool __i915_request_has_started(const struct i915_request *rq)
{
return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1);
}
/** /**
* i915_request_started - check if the request has begun being executed * i915_request_started - check if the request has begun being executed
* @rq: the request * @rq: the request
...@@ -345,7 +369,23 @@ static inline bool i915_request_started(const struct i915_request *rq) ...@@ -345,7 +369,23 @@ static inline bool i915_request_started(const struct i915_request *rq)
return true; return true;
/* Remember: started but may have since been preempted! */ /* Remember: started but may have since been preempted! */
return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1); return __i915_request_has_started(rq);
}
/**
* i915_request_is_running - check if the request may actually be executing
* @rq: the request
*
* Returns true if the request is currently submitted to hardware, has passed
* its start point (i.e. the context is setup and not busywaiting). Note that
* it may no longer be running by the time the function returns!
*/
static inline bool i915_request_is_running(const struct i915_request *rq)
{
if (!i915_request_is_active(rq))
return false;
return __i915_request_has_started(rq);
} }
static inline bool i915_request_completed(const struct i915_request *rq) static inline bool i915_request_completed(const struct i915_request *rq)
......
...@@ -29,7 +29,7 @@ static void engine_skip_context(struct i915_request *rq) ...@@ -29,7 +29,7 @@ static void engine_skip_context(struct i915_request *rq)
spin_lock(&timeline->lock); spin_lock(&timeline->lock);
if (rq->global_seqno) { if (i915_request_is_active(rq)) {
list_for_each_entry_continue(rq, list_for_each_entry_continue(rq,
&engine->timeline.requests, link) &engine->timeline.requests, link)
if (rq->gem_context == hung_ctx) if (rq->gem_context == hung_ctx)
...@@ -751,18 +751,20 @@ static void reset_restart(struct drm_i915_private *i915) ...@@ -751,18 +751,20 @@ static void reset_restart(struct drm_i915_private *i915)
static void nop_submit_request(struct i915_request *request) static void nop_submit_request(struct i915_request *request)
{ {
struct intel_engine_cs *engine = request->engine;
unsigned long flags; unsigned long flags;
GEM_TRACE("%s fence %llx:%lld -> -EIO\n", GEM_TRACE("%s fence %llx:%lld -> -EIO\n",
request->engine->name, engine->name, request->fence.context, request->fence.seqno);
request->fence.context, request->fence.seqno);
dma_fence_set_error(&request->fence, -EIO); dma_fence_set_error(&request->fence, -EIO);
spin_lock_irqsave(&request->engine->timeline.lock, flags); spin_lock_irqsave(&engine->timeline.lock, flags);
__i915_request_submit(request); __i915_request_submit(request);
i915_request_mark_complete(request); i915_request_mark_complete(request);
intel_engine_write_global_seqno(request->engine, request->global_seqno); intel_engine_write_global_seqno(engine, request->global_seqno);
spin_unlock_irqrestore(&request->engine->timeline.lock, flags); spin_unlock_irqrestore(&engine->timeline.lock, flags);
intel_engine_queue_breadcrumbs(engine);
} }
void i915_gem_set_wedged(struct drm_i915_private *i915) void i915_gem_set_wedged(struct drm_i915_private *i915)
...@@ -817,7 +819,7 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) ...@@ -817,7 +819,7 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
for_each_engine(engine, i915, id) { for_each_engine(engine, i915, id) {
reset_finish_engine(engine); reset_finish_engine(engine);
intel_engine_wakeup(engine); intel_engine_signal_breadcrumbs(engine);
} }
smp_mb__before_atomic(); smp_mb__before_atomic();
......
...@@ -243,7 +243,7 @@ static bool inflight(const struct i915_request *rq, ...@@ -243,7 +243,7 @@ static bool inflight(const struct i915_request *rq,
{ {
const struct i915_request *active; const struct i915_request *active;
if (!rq->global_seqno) if (!i915_request_is_active(rq))
return false; return false;
active = port_request(engine->execlists.port); active = port_request(engine->execlists.port);
......
This diff is collapsed.
...@@ -458,12 +458,6 @@ int intel_engines_init(struct drm_i915_private *dev_priv) ...@@ -458,12 +458,6 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno) void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno)
{ {
intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
/* After manually advancing the seqno, fake the interrupt in case
* there are any waiters for that seqno.
*/
intel_engine_wakeup(engine);
GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno); GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno);
} }
...@@ -607,6 +601,7 @@ int intel_engine_setup_common(struct intel_engine_cs *engine) ...@@ -607,6 +601,7 @@ int intel_engine_setup_common(struct intel_engine_cs *engine)
i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE); i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE);
intel_engine_init_breadcrumbs(engine);
intel_engine_init_execlist(engine); intel_engine_init_execlist(engine);
intel_engine_init_hangcheck(engine); intel_engine_init_hangcheck(engine);
intel_engine_init_batch_pool(engine); intel_engine_init_batch_pool(engine);
...@@ -717,20 +712,14 @@ int intel_engine_init_common(struct intel_engine_cs *engine) ...@@ -717,20 +712,14 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
} }
} }
ret = intel_engine_init_breadcrumbs(engine);
if (ret)
goto err_unpin_preempt;
ret = measure_breadcrumb_dw(engine); ret = measure_breadcrumb_dw(engine);
if (ret < 0) if (ret < 0)
goto err_breadcrumbs; goto err_unpin_preempt;
engine->emit_fini_breadcrumb_dw = ret; engine->emit_fini_breadcrumb_dw = ret;
return 0; return 0;
err_breadcrumbs:
intel_engine_fini_breadcrumbs(engine);
err_unpin_preempt: err_unpin_preempt:
if (i915->preempt_context) if (i915->preempt_context)
__intel_context_unpin(i915->preempt_context, engine); __intel_context_unpin(i915->preempt_context, engine);
...@@ -1294,12 +1283,14 @@ static void print_request(struct drm_printer *m, ...@@ -1294,12 +1283,14 @@ static void print_request(struct drm_printer *m,
x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf)); x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
drm_printf(m, "%s%x%s [%llx:%llx]%s @ %dms: %s\n", drm_printf(m, "%s%x%s%s [%llx:%llx]%s @ %dms: %s\n",
prefix, prefix,
rq->global_seqno, rq->global_seqno,
i915_request_completed(rq) ? "!" : i915_request_completed(rq) ? "!" :
i915_request_started(rq) ? "*" : i915_request_started(rq) ? "*" :
"", "",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&rq->fence.flags) ? "+" : "",
rq->fence.context, rq->fence.seqno, rq->fence.context, rq->fence.seqno,
buf, buf,
jiffies_to_msecs(jiffies - rq->emitted_jiffies), jiffies_to_msecs(jiffies - rq->emitted_jiffies),
...@@ -1492,12 +1483,9 @@ void intel_engine_dump(struct intel_engine_cs *engine, ...@@ -1492,12 +1483,9 @@ void intel_engine_dump(struct intel_engine_cs *engine,
struct drm_printer *m, struct drm_printer *m,
const char *header, ...) const char *header, ...)
{ {
struct intel_breadcrumbs * const b = &engine->breadcrumbs;
struct i915_gpu_error * const error = &engine->i915->gpu_error; struct i915_gpu_error * const error = &engine->i915->gpu_error;
struct i915_request *rq; struct i915_request *rq;
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
unsigned long flags;
struct rb_node *rb;
if (header) { if (header) {
va_list ap; va_list ap;
...@@ -1565,21 +1553,12 @@ void intel_engine_dump(struct intel_engine_cs *engine, ...@@ -1565,21 +1553,12 @@ void intel_engine_dump(struct intel_engine_cs *engine,
intel_execlists_show_requests(engine, m, print_request, 8); intel_execlists_show_requests(engine, m, print_request, 8);
spin_lock_irqsave(&b->rb_lock, flags);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = rb_entry(rb, typeof(*w), node);
drm_printf(m, "\t%s [%d:%c] waiting for %x\n",
w->tsk->comm, w->tsk->pid,
task_state_to_char(w->tsk),
w->seqno);
}
spin_unlock_irqrestore(&b->rb_lock, flags);
drm_printf(m, "HWSP:\n"); drm_printf(m, "HWSP:\n");
hexdump(m, engine->status_page.addr, PAGE_SIZE); hexdump(m, engine->status_page.addr, PAGE_SIZE);
drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine))); drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine)));
intel_engine_print_breadcrumbs(engine, m);
} }
static u8 user_class_map[] = { static u8 user_class_map[] = {
......
...@@ -743,7 +743,7 @@ static int init_ring_common(struct intel_engine_cs *engine) ...@@ -743,7 +743,7 @@ static int init_ring_common(struct intel_engine_cs *engine)
} }
/* Papering over lost _interrupts_ immediately following the restart */ /* Papering over lost _interrupts_ immediately following the restart */
intel_engine_wakeup(engine); intel_engine_queue_breadcrumbs(engine);
out: out:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <drm/drm_util.h> #include <drm/drm_util.h>
#include <linux/hashtable.h> #include <linux/hashtable.h>
#include <linux/irq_work.h>
#include <linux/seqlock.h> #include <linux/seqlock.h>
#include "i915_gem_batch_pool.h" #include "i915_gem_batch_pool.h"
...@@ -381,22 +382,19 @@ struct intel_engine_cs { ...@@ -381,22 +382,19 @@ struct intel_engine_cs {
* the overhead of waking that client is much preferred. * the overhead of waking that client is much preferred.
*/ */
struct intel_breadcrumbs { struct intel_breadcrumbs {
spinlock_t irq_lock; /* protects irq_*; irqsafe */ spinlock_t irq_lock;
struct intel_wait *irq_wait; /* oldest waiter by retirement */ struct list_head signalers;
spinlock_t rb_lock; /* protects the rb and wraps irq_lock */ struct irq_work irq_work; /* for use from inside irq_lock */
struct rb_root waiters; /* sorted by retirement, priority */
struct list_head signals; /* sorted by retirement */
struct task_struct *signaler; /* used for fence signalling */
struct timer_list fake_irq; /* used after a missed interrupt */ struct timer_list fake_irq; /* used after a missed interrupt */
struct timer_list hangcheck; /* detect missed interrupts */ struct timer_list hangcheck; /* detect missed interrupts */
unsigned int hangcheck_interrupts; unsigned int hangcheck_interrupts;
unsigned int irq_enabled; unsigned int irq_enabled;
unsigned int irq_count;
bool irq_armed : 1; bool irq_armed;
bool irq_fired;
} breadcrumbs; } breadcrumbs;
struct { struct {
...@@ -885,83 +883,29 @@ static inline bool intel_engine_has_started(struct intel_engine_cs *engine, ...@@ -885,83 +883,29 @@ static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
void intel_engine_get_instdone(struct intel_engine_cs *engine, void intel_engine_get_instdone(struct intel_engine_cs *engine,
struct intel_instdone *instdone); struct intel_instdone *instdone);
/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */ void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
static inline void intel_wait_init(struct intel_wait *wait)
{
wait->tsk = current;
wait->request = NULL;
}
static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
{
wait->tsk = current;
wait->seqno = seqno;
}
static inline bool intel_wait_has_seqno(const struct intel_wait *wait)
{
return wait->seqno;
}
static inline bool
intel_wait_update_seqno(struct intel_wait *wait, u32 seqno)
{
wait->seqno = seqno;
return intel_wait_has_seqno(wait);
}
static inline bool
intel_wait_update_request(struct intel_wait *wait,
const struct i915_request *rq)
{
return intel_wait_update_seqno(wait, i915_request_global_seqno(rq));
}
static inline bool
intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno)
{
return wait->seqno == seqno;
}
static inline bool
intel_wait_check_request(const struct intel_wait *wait,
const struct i915_request *rq)
{
return intel_wait_check_seqno(wait, i915_request_global_seqno(rq));
}
static inline bool intel_wait_complete(const struct intel_wait *wait) void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
{ void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);
return RB_EMPTY_NODE(&wait->node);
}
bool intel_engine_add_wait(struct intel_engine_cs *engine, bool intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine);
struct intel_wait *wait); void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_remove_wait(struct intel_engine_cs *engine,
struct intel_wait *wait);
bool intel_engine_enable_signaling(struct i915_request *request, bool wakeup);
void intel_engine_cancel_signaling(struct i915_request *request);
static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine) static inline void
intel_engine_queue_breadcrumbs(struct intel_engine_cs *engine)
{ {
return READ_ONCE(engine->breadcrumbs.irq_wait); irq_work_queue(&engine->breadcrumbs.irq_work);
} }
unsigned int intel_engine_wakeup(struct intel_engine_cs *engine); bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine);
#define ENGINE_WAKEUP_WAITER BIT(0)
#define ENGINE_WAKEUP_ASLEEP BIT(1)
void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);
void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine); void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine); void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
struct drm_printer *p);
static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset) static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
{ {
memset(batch, 0, 6 * sizeof(u32)); memset(batch, 0, 6 * sizeof(u32));
......
...@@ -15,7 +15,6 @@ selftest(scatterlist, scatterlist_mock_selftests) ...@@ -15,7 +15,6 @@ selftest(scatterlist, scatterlist_mock_selftests)
selftest(syncmap, i915_syncmap_mock_selftests) selftest(syncmap, i915_syncmap_mock_selftests)
selftest(uncore, intel_uncore_mock_selftests) selftest(uncore, intel_uncore_mock_selftests)
selftest(engine, intel_engine_cs_mock_selftests) selftest(engine, intel_engine_cs_mock_selftests)
selftest(breadcrumbs, intel_breadcrumbs_mock_selftests)
selftest(timelines, i915_timeline_mock_selftests) selftest(timelines, i915_timeline_mock_selftests)
selftest(requests, i915_request_mock_selftests) selftest(requests, i915_request_mock_selftests)
selftest(objects, i915_gem_object_mock_selftests) selftest(objects, i915_gem_object_mock_selftests)
......
...@@ -185,11 +185,6 @@ void igt_spinner_fini(struct igt_spinner *spin) ...@@ -185,11 +185,6 @@ void igt_spinner_fini(struct igt_spinner *spin)
bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq) bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
{ {
if (!wait_event_timeout(rq->execute,
READ_ONCE(rq->global_seqno),
msecs_to_jiffies(10)))
return false;
return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq), return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
rq->fence.seqno), rq->fence.seqno),
10) && 10) &&
......
This diff is collapsed.
...@@ -1127,7 +1127,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, ...@@ -1127,7 +1127,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
wait_for_completion(&arg.completion); wait_for_completion(&arg.completion);
if (wait_for(waitqueue_active(&rq->execute), 10)) { if (wait_for(!list_empty(&rq->fence.cb_list), 10)) {
struct drm_printer p = drm_info_printer(i915->drm.dev); struct drm_printer p = drm_info_printer(i915->drm.dev);
pr_err("igt/evict_vma kthread did not wait\n"); pr_err("igt/evict_vma kthread did not wait\n");
......
...@@ -76,3 +76,57 @@ void timed_fence_fini(struct timed_fence *tf) ...@@ -76,3 +76,57 @@ void timed_fence_fini(struct timed_fence *tf)
destroy_timer_on_stack(&tf->timer); destroy_timer_on_stack(&tf->timer);
i915_sw_fence_fini(&tf->fence); i915_sw_fence_fini(&tf->fence);
} }
struct heap_fence {
struct i915_sw_fence fence;
union {
struct kref ref;
struct rcu_head rcu;
};
};
static int __i915_sw_fence_call
heap_fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
struct heap_fence *h = container_of(fence, typeof(*h), fence);
switch (state) {
case FENCE_COMPLETE:
break;
case FENCE_FREE:
heap_fence_put(&h->fence);
}
return NOTIFY_DONE;
}
struct i915_sw_fence *heap_fence_create(gfp_t gfp)
{
struct heap_fence *h;
h = kmalloc(sizeof(*h), gfp);
if (!h)
return NULL;
i915_sw_fence_init(&h->fence, heap_fence_notify);
refcount_set(&h->ref.refcount, 2);
return &h->fence;
}
static void heap_fence_release(struct kref *ref)
{
struct heap_fence *h = container_of(ref, typeof(*h), ref);
i915_sw_fence_fini(&h->fence);
kfree_rcu(h, rcu);
}
void heap_fence_put(struct i915_sw_fence *fence)
{
struct heap_fence *h = container_of(fence, typeof(*h), fence);
kref_put(&h->ref, heap_fence_release);
}
...@@ -39,4 +39,7 @@ struct timed_fence { ...@@ -39,4 +39,7 @@ struct timed_fence {
void timed_fence_init(struct timed_fence *tf, unsigned long expires); void timed_fence_init(struct timed_fence *tf, unsigned long expires);
void timed_fence_fini(struct timed_fence *tf); void timed_fence_fini(struct timed_fence *tf);
struct i915_sw_fence *heap_fence_create(gfp_t gfp);
void heap_fence_put(struct i915_sw_fence *fence);
#endif /* _LIB_SW_FENCE_H_ */ #endif /* _LIB_SW_FENCE_H_ */
...@@ -86,17 +86,21 @@ static struct mock_request *first_request(struct mock_engine *engine) ...@@ -86,17 +86,21 @@ static struct mock_request *first_request(struct mock_engine *engine)
static void advance(struct mock_request *request) static void advance(struct mock_request *request)
{ {
list_del_init(&request->link); list_del_init(&request->link);
mock_seqno_advance(request->base.engine, request->base.global_seqno); intel_engine_write_global_seqno(request->base.engine,
request->base.global_seqno);
i915_request_mark_complete(&request->base); i915_request_mark_complete(&request->base);
GEM_BUG_ON(!i915_request_completed(&request->base)); GEM_BUG_ON(!i915_request_completed(&request->base));
intel_engine_queue_breadcrumbs(request->base.engine);
} }
static void hw_delay_complete(struct timer_list *t) static void hw_delay_complete(struct timer_list *t)
{ {
struct mock_engine *engine = from_timer(engine, t, hw_delay); struct mock_engine *engine = from_timer(engine, t, hw_delay);
struct mock_request *request; struct mock_request *request;
unsigned long flags;
spin_lock(&engine->hw_lock); spin_lock_irqsave(&engine->hw_lock, flags);
/* Timer fired, first request is complete */ /* Timer fired, first request is complete */
request = first_request(engine); request = first_request(engine);
...@@ -116,7 +120,7 @@ static void hw_delay_complete(struct timer_list *t) ...@@ -116,7 +120,7 @@ static void hw_delay_complete(struct timer_list *t)
advance(request); advance(request);
} }
spin_unlock(&engine->hw_lock); spin_unlock_irqrestore(&engine->hw_lock, flags);
} }
static void mock_context_unpin(struct intel_context *ce) static void mock_context_unpin(struct intel_context *ce)
...@@ -191,11 +195,12 @@ static void mock_submit_request(struct i915_request *request) ...@@ -191,11 +195,12 @@ static void mock_submit_request(struct i915_request *request)
struct mock_request *mock = container_of(request, typeof(*mock), base); struct mock_request *mock = container_of(request, typeof(*mock), base);
struct mock_engine *engine = struct mock_engine *engine =
container_of(request->engine, typeof(*engine), base); container_of(request->engine, typeof(*engine), base);
unsigned long flags;
i915_request_submit(request); i915_request_submit(request);
GEM_BUG_ON(!request->global_seqno); GEM_BUG_ON(!request->global_seqno);
spin_lock_irq(&engine->hw_lock); spin_lock_irqsave(&engine->hw_lock, flags);
list_add_tail(&mock->link, &engine->hw_queue); list_add_tail(&mock->link, &engine->hw_queue);
if (mock->link.prev == &engine->hw_queue) { if (mock->link.prev == &engine->hw_queue) {
if (mock->delay) if (mock->delay)
...@@ -203,7 +208,7 @@ static void mock_submit_request(struct i915_request *request) ...@@ -203,7 +208,7 @@ static void mock_submit_request(struct i915_request *request)
else else
advance(mock); advance(mock);
} }
spin_unlock_irq(&engine->hw_lock); spin_unlock_irqrestore(&engine->hw_lock, flags);
} }
struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
...@@ -273,7 +278,7 @@ void mock_engine_flush(struct intel_engine_cs *engine) ...@@ -273,7 +278,7 @@ void mock_engine_flush(struct intel_engine_cs *engine)
void mock_engine_reset(struct intel_engine_cs *engine) void mock_engine_reset(struct intel_engine_cs *engine)
{ {
intel_write_status_page(engine, I915_GEM_HWS_INDEX, 0); intel_engine_write_global_seqno(engine, 0);
} }
void mock_engine_free(struct intel_engine_cs *engine) void mock_engine_free(struct intel_engine_cs *engine)
......
...@@ -46,10 +46,4 @@ void mock_engine_flush(struct intel_engine_cs *engine); ...@@ -46,10 +46,4 @@ void mock_engine_flush(struct intel_engine_cs *engine);
void mock_engine_reset(struct intel_engine_cs *engine); void mock_engine_reset(struct intel_engine_cs *engine);
void mock_engine_free(struct intel_engine_cs *engine); void mock_engine_free(struct intel_engine_cs *engine);
static inline void mock_seqno_advance(struct intel_engine_cs *engine, u32 seqno)
{
intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
intel_engine_wakeup(engine);
}
#endif /* !__MOCK_ENGINE_H__ */ #endif /* !__MOCK_ENGINE_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment