Commit b51c2c67 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-fixes-2020-11-25' of...

Merge tag 'drm-intel-fixes-2020-11-25' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes

- Fix Perf/OA workaround register corruption (Lionel)
- Correct a comment statement in GVT (Yan)
- Fix GT enable/disable iterrupts, including a race condition that prevented GPU to go idle (Chris)
- Free stale request on destroying the virtual engine (Chris)
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20201126010623.GA827684@intel.com
parents 5ead67bd 280ffdb6
...@@ -30,18 +30,21 @@ ...@@ -30,18 +30,21 @@
#include "i915_trace.h" #include "i915_trace.h"
#include "intel_breadcrumbs.h" #include "intel_breadcrumbs.h"
#include "intel_context.h" #include "intel_context.h"
#include "intel_engine_pm.h"
#include "intel_gt_pm.h" #include "intel_gt_pm.h"
#include "intel_gt_requests.h" #include "intel_gt_requests.h"
static void irq_enable(struct intel_engine_cs *engine) static bool irq_enable(struct intel_engine_cs *engine)
{ {
if (!engine->irq_enable) if (!engine->irq_enable)
return; return false;
/* Caller disables interrupts */ /* Caller disables interrupts */
spin_lock(&engine->gt->irq_lock); spin_lock(&engine->gt->irq_lock);
engine->irq_enable(engine); engine->irq_enable(engine);
spin_unlock(&engine->gt->irq_lock); spin_unlock(&engine->gt->irq_lock);
return true;
} }
static void irq_disable(struct intel_engine_cs *engine) static void irq_disable(struct intel_engine_cs *engine)
...@@ -57,12 +60,11 @@ static void irq_disable(struct intel_engine_cs *engine) ...@@ -57,12 +60,11 @@ static void irq_disable(struct intel_engine_cs *engine)
static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b) static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
{ {
lockdep_assert_held(&b->irq_lock); /*
* Since we are waiting on a request, the GPU should be busy
if (!b->irq_engine || b->irq_armed) * and should have its own rpm reference.
return; */
if (GEM_WARN_ON(!intel_gt_pm_get_if_awake(b->irq_engine->gt)))
if (!intel_gt_pm_get_if_awake(b->irq_engine->gt))
return; return;
/* /*
...@@ -73,25 +75,24 @@ static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b) ...@@ -73,25 +75,24 @@ static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
*/ */
WRITE_ONCE(b->irq_armed, true); WRITE_ONCE(b->irq_armed, true);
/* /* Requests may have completed before we could enable the interrupt. */
* Since we are waiting on a request, the GPU should be busy if (!b->irq_enabled++ && irq_enable(b->irq_engine))
* and should have its own rpm reference. This is tracked irq_work_queue(&b->irq_work);
* by i915->gt.awake, we can forgo holding our own wakref
* for the interrupt as before i915->gt.awake is released (when
* the driver is idle) we disarm the breadcrumbs.
*/
if (!b->irq_enabled++)
irq_enable(b->irq_engine);
} }
static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b) static void intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
{ {
lockdep_assert_held(&b->irq_lock); if (!b->irq_engine)
if (!b->irq_engine || !b->irq_armed)
return; return;
spin_lock(&b->irq_lock);
if (!b->irq_armed)
__intel_breadcrumbs_arm_irq(b);
spin_unlock(&b->irq_lock);
}
static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
{
GEM_BUG_ON(!b->irq_enabled); GEM_BUG_ON(!b->irq_enabled);
if (!--b->irq_enabled) if (!--b->irq_enabled)
irq_disable(b->irq_engine); irq_disable(b->irq_engine);
...@@ -105,8 +106,6 @@ static void add_signaling_context(struct intel_breadcrumbs *b, ...@@ -105,8 +106,6 @@ static void add_signaling_context(struct intel_breadcrumbs *b,
{ {
intel_context_get(ce); intel_context_get(ce);
list_add_tail(&ce->signal_link, &b->signalers); list_add_tail(&ce->signal_link, &b->signalers);
if (list_is_first(&ce->signal_link, &b->signalers))
__intel_breadcrumbs_arm_irq(b);
} }
static void remove_signaling_context(struct intel_breadcrumbs *b, static void remove_signaling_context(struct intel_breadcrumbs *b,
...@@ -174,34 +173,65 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl) ...@@ -174,34 +173,65 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
intel_engine_add_retire(b->irq_engine, tl); intel_engine_add_retire(b->irq_engine, tl);
} }
static bool __signal_request(struct i915_request *rq, struct list_head *signals) static bool __signal_request(struct i915_request *rq)
{ {
clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
if (!__dma_fence_signal(&rq->fence)) { if (!__dma_fence_signal(&rq->fence)) {
i915_request_put(rq); i915_request_put(rq);
return false; return false;
} }
list_add_tail(&rq->signal_link, signals);
return true; return true;
} }
static struct llist_node *
slist_add(struct llist_node *node, struct llist_node *head)
{
node->next = head;
return node;
}
static void signal_irq_work(struct irq_work *work) static void signal_irq_work(struct irq_work *work)
{ {
struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work); struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work);
const ktime_t timestamp = ktime_get(); const ktime_t timestamp = ktime_get();
struct llist_node *signal, *sn;
struct intel_context *ce, *cn; struct intel_context *ce, *cn;
struct list_head *pos, *next; struct list_head *pos, *next;
LIST_HEAD(signal);
signal = NULL;
if (unlikely(!llist_empty(&b->signaled_requests)))
signal = llist_del_all(&b->signaled_requests);
spin_lock(&b->irq_lock); spin_lock(&b->irq_lock);
if (list_empty(&b->signalers)) /*
* Keep the irq armed until the interrupt after all listeners are gone.
*
* Enabling/disabling the interrupt is rather costly, roughly a couple
* of hundred microseconds. If we are proactive and enable/disable
* the interrupt around every request that wants a breadcrumb, we
* quickly drown in the extra orders of magnitude of latency imposed
* on request submission.
*
* So we try to be lazy, and keep the interrupts enabled until no
* more listeners appear within a breadcrumb interrupt interval (that
* is until a request completes that no one cares about). The
* observation is that listeners come in batches, and will often
* listen to a bunch of requests in succession. Though note on icl+,
* interrupts are always enabled due to concerns with rc6 being
* dysfunctional with per-engine interrupt masking.
*
* We also try to avoid raising too many interrupts, as they may
* be generated by userspace batches and it is unfortunately rather
* too easy to drown the CPU under a flood of GPU interrupts. Thus
* whenever no one appears to be listening, we turn off the interrupts.
* Fewer interrupts should conserve power -- at the very least, fewer
* interrupt draw less ire from other users of the system and tools
* like powertop.
*/
if (!signal && b->irq_armed && list_empty(&b->signalers))
__intel_breadcrumbs_disarm_irq(b); __intel_breadcrumbs_disarm_irq(b);
list_splice_init(&b->signaled_requests, &signal);
list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) { list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) {
GEM_BUG_ON(list_empty(&ce->signals)); GEM_BUG_ON(list_empty(&ce->signals));
...@@ -218,7 +248,10 @@ static void signal_irq_work(struct irq_work *work) ...@@ -218,7 +248,10 @@ static void signal_irq_work(struct irq_work *work)
* spinlock as the callback chain may end up adding * spinlock as the callback chain may end up adding
* more signalers to the same context or engine. * more signalers to the same context or engine.
*/ */
__signal_request(rq, &signal); clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
if (__signal_request(rq))
/* We own signal_node now, xfer to local list */
signal = slist_add(&rq->signal_node, signal);
} }
/* /*
...@@ -238,9 +271,9 @@ static void signal_irq_work(struct irq_work *work) ...@@ -238,9 +271,9 @@ static void signal_irq_work(struct irq_work *work)
spin_unlock(&b->irq_lock); spin_unlock(&b->irq_lock);
list_for_each_safe(pos, next, &signal) { llist_for_each_safe(signal, sn, signal) {
struct i915_request *rq = struct i915_request *rq =
list_entry(pos, typeof(*rq), signal_link); llist_entry(signal, typeof(*rq), signal_node);
struct list_head cb_list; struct list_head cb_list;
spin_lock(&rq->lock); spin_lock(&rq->lock);
...@@ -251,6 +284,9 @@ static void signal_irq_work(struct irq_work *work) ...@@ -251,6 +284,9 @@ static void signal_irq_work(struct irq_work *work)
i915_request_put(rq); i915_request_put(rq);
} }
if (!READ_ONCE(b->irq_armed) && !list_empty(&b->signalers))
intel_breadcrumbs_arm_irq(b);
} }
struct intel_breadcrumbs * struct intel_breadcrumbs *
...@@ -264,7 +300,7 @@ intel_breadcrumbs_create(struct intel_engine_cs *irq_engine) ...@@ -264,7 +300,7 @@ intel_breadcrumbs_create(struct intel_engine_cs *irq_engine)
spin_lock_init(&b->irq_lock); spin_lock_init(&b->irq_lock);
INIT_LIST_HEAD(&b->signalers); INIT_LIST_HEAD(&b->signalers);
INIT_LIST_HEAD(&b->signaled_requests); init_llist_head(&b->signaled_requests);
init_irq_work(&b->irq_work, signal_irq_work); init_irq_work(&b->irq_work, signal_irq_work);
...@@ -292,21 +328,22 @@ void intel_breadcrumbs_reset(struct intel_breadcrumbs *b) ...@@ -292,21 +328,22 @@ void intel_breadcrumbs_reset(struct intel_breadcrumbs *b)
void intel_breadcrumbs_park(struct intel_breadcrumbs *b) void intel_breadcrumbs_park(struct intel_breadcrumbs *b)
{ {
unsigned long flags; /* Kick the work once more to drain the signalers */
irq_work_sync(&b->irq_work);
if (!READ_ONCE(b->irq_armed)) while (unlikely(READ_ONCE(b->irq_armed))) {
return; local_irq_disable();
signal_irq_work(&b->irq_work);
spin_lock_irqsave(&b->irq_lock, flags); local_irq_enable();
__intel_breadcrumbs_disarm_irq(b); cond_resched();
spin_unlock_irqrestore(&b->irq_lock, flags); }
GEM_BUG_ON(!list_empty(&b->signalers));
if (!list_empty(&b->signalers))
irq_work_queue(&b->irq_work);
} }
void intel_breadcrumbs_free(struct intel_breadcrumbs *b) void intel_breadcrumbs_free(struct intel_breadcrumbs *b)
{ {
irq_work_sync(&b->irq_work);
GEM_BUG_ON(!list_empty(&b->signalers));
GEM_BUG_ON(b->irq_armed);
kfree(b); kfree(b);
} }
...@@ -327,7 +364,8 @@ static void insert_breadcrumb(struct i915_request *rq, ...@@ -327,7 +364,8 @@ static void insert_breadcrumb(struct i915_request *rq,
* its signal completion. * its signal completion.
*/ */
if (__request_completed(rq)) { if (__request_completed(rq)) {
if (__signal_request(rq, &b->signaled_requests)) if (__signal_request(rq) &&
llist_add(&rq->signal_node, &b->signaled_requests))
irq_work_queue(&b->irq_work); irq_work_queue(&b->irq_work);
return; return;
} }
...@@ -362,8 +400,11 @@ static void insert_breadcrumb(struct i915_request *rq, ...@@ -362,8 +400,11 @@ static void insert_breadcrumb(struct i915_request *rq,
GEM_BUG_ON(!check_signal_order(ce, rq)); GEM_BUG_ON(!check_signal_order(ce, rq));
set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
/* Check after attaching to irq, interrupt may have already fired. */ /*
if (__request_completed(rq)) * Defer enabling the interrupt to after HW submission and recheck
* the request as it may have completed and raised the interrupt as
* we were attaching it into the lists.
*/
irq_work_queue(&b->irq_work); irq_work_queue(&b->irq_work);
} }
......
...@@ -35,7 +35,7 @@ struct intel_breadcrumbs { ...@@ -35,7 +35,7 @@ struct intel_breadcrumbs {
struct intel_engine_cs *irq_engine; struct intel_engine_cs *irq_engine;
struct list_head signalers; struct list_head signalers;
struct list_head signaled_requests; struct llist_head signaled_requests;
struct irq_work irq_work; /* for use from inside irq_lock */ struct irq_work irq_work; /* for use from inside irq_lock */
......
...@@ -182,6 +182,7 @@ ...@@ -182,6 +182,7 @@
struct virtual_engine { struct virtual_engine {
struct intel_engine_cs base; struct intel_engine_cs base;
struct intel_context context; struct intel_context context;
struct rcu_work rcu;
/* /*
* We allow only a single request through the virtual engine at a time * We allow only a single request through the virtual engine at a time
...@@ -5425,33 +5426,57 @@ static struct list_head *virtual_queue(struct virtual_engine *ve) ...@@ -5425,33 +5426,57 @@ static struct list_head *virtual_queue(struct virtual_engine *ve)
return &ve->base.execlists.default_priolist.requests[0]; return &ve->base.execlists.default_priolist.requests[0];
} }
static void virtual_context_destroy(struct kref *kref) static void rcu_virtual_context_destroy(struct work_struct *wrk)
{ {
struct virtual_engine *ve = struct virtual_engine *ve =
container_of(kref, typeof(*ve), context.ref); container_of(wrk, typeof(*ve), rcu.work);
unsigned int n; unsigned int n;
GEM_BUG_ON(!list_empty(virtual_queue(ve)));
GEM_BUG_ON(ve->request);
GEM_BUG_ON(ve->context.inflight); GEM_BUG_ON(ve->context.inflight);
/* Preempt-to-busy may leave a stale request behind. */
if (unlikely(ve->request)) {
struct i915_request *old;
spin_lock_irq(&ve->base.active.lock);
old = fetch_and_zero(&ve->request);
if (old) {
GEM_BUG_ON(!i915_request_completed(old));
__i915_request_submit(old);
i915_request_put(old);
}
spin_unlock_irq(&ve->base.active.lock);
}
/*
* Flush the tasklet in case it is still running on another core.
*
* This needs to be done before we remove ourselves from the siblings'
* rbtrees as in the case it is running in parallel, it may reinsert
* the rb_node into a sibling.
*/
tasklet_kill(&ve->base.execlists.tasklet);
/* Decouple ourselves from the siblings, no more access allowed. */
for (n = 0; n < ve->num_siblings; n++) { for (n = 0; n < ve->num_siblings; n++) {
struct intel_engine_cs *sibling = ve->siblings[n]; struct intel_engine_cs *sibling = ve->siblings[n];
struct rb_node *node = &ve->nodes[sibling->id].rb; struct rb_node *node = &ve->nodes[sibling->id].rb;
unsigned long flags;
if (RB_EMPTY_NODE(node)) if (RB_EMPTY_NODE(node))
continue; continue;
spin_lock_irqsave(&sibling->active.lock, flags); spin_lock_irq(&sibling->active.lock);
/* Detachment is lazily performed in the execlists tasklet */ /* Detachment is lazily performed in the execlists tasklet */
if (!RB_EMPTY_NODE(node)) if (!RB_EMPTY_NODE(node))
rb_erase_cached(node, &sibling->execlists.virtual); rb_erase_cached(node, &sibling->execlists.virtual);
spin_unlock_irqrestore(&sibling->active.lock, flags); spin_unlock_irq(&sibling->active.lock);
} }
GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet)); GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
GEM_BUG_ON(!list_empty(virtual_queue(ve)));
if (ve->context.state) if (ve->context.state)
__execlists_context_fini(&ve->context); __execlists_context_fini(&ve->context);
...@@ -5464,6 +5489,27 @@ static void virtual_context_destroy(struct kref *kref) ...@@ -5464,6 +5489,27 @@ static void virtual_context_destroy(struct kref *kref)
kfree(ve); kfree(ve);
} }
static void virtual_context_destroy(struct kref *kref)
{
struct virtual_engine *ve =
container_of(kref, typeof(*ve), context.ref);
GEM_BUG_ON(!list_empty(&ve->context.signals));
/*
* When destroying the virtual engine, we have to be aware that
* it may still be in use from an hardirq/softirq context causing
* the resubmission of a completed request (background completion
* due to preempt-to-busy). Before we can free the engine, we need
* to flush the submission code and tasklets that are still potentially
* accessing the engine. Flushing the tasklets requires process context,
* and since we can guard the resubmit onto the engine with an RCU read
* lock, we can delegate the free of the engine to an RCU worker.
*/
INIT_RCU_WORK(&ve->rcu, rcu_virtual_context_destroy);
queue_rcu_work(system_wq, &ve->rcu);
}
static void virtual_engine_initial_hint(struct virtual_engine *ve) static void virtual_engine_initial_hint(struct virtual_engine *ve)
{ {
int swp; int swp;
......
...@@ -255,7 +255,7 @@ struct intel_gvt_mmio { ...@@ -255,7 +255,7 @@ struct intel_gvt_mmio {
#define F_CMD_ACCESS (1 << 3) #define F_CMD_ACCESS (1 << 3)
/* This reg has been accessed by a VM */ /* This reg has been accessed by a VM */
#define F_ACCESSED (1 << 4) #define F_ACCESSED (1 << 4)
/* This reg has been accessed through GPU commands */ /* This reg could be accessed by unaligned address */
#define F_UNALIGN (1 << 6) #define F_UNALIGN (1 << 6)
/* This reg is in GVT's mmio save-restor list and in hardware /* This reg is in GVT's mmio save-restor list and in hardware
* logical context image * logical context image
......
...@@ -909,8 +909,13 @@ static int gen8_oa_read(struct i915_perf_stream *stream, ...@@ -909,8 +909,13 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
DRM_I915_PERF_RECORD_OA_REPORT_LOST); DRM_I915_PERF_RECORD_OA_REPORT_LOST);
if (ret) if (ret)
return ret; return ret;
intel_uncore_write(uncore, oastatus_reg,
oastatus & ~GEN8_OASTATUS_REPORT_LOST); intel_uncore_rmw(uncore, oastatus_reg,
GEN8_OASTATUS_COUNTER_OVERFLOW |
GEN8_OASTATUS_REPORT_LOST,
IS_GEN_RANGE(uncore->i915, 8, 10) ?
(GEN8_OASTATUS_HEAD_POINTER_WRAP |
GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0);
} }
return gen8_append_oa_reports(stream, buf, count, offset); return gen8_append_oa_reports(stream, buf, count, offset);
......
...@@ -676,6 +676,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) ...@@ -676,6 +676,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN7_OASTATUS2_MEM_SELECT_GGTT (1 << 0) /* 0: PPGTT, 1: GGTT */ #define GEN7_OASTATUS2_MEM_SELECT_GGTT (1 << 0) /* 0: PPGTT, 1: GGTT */
#define GEN8_OASTATUS _MMIO(0x2b08) #define GEN8_OASTATUS _MMIO(0x2b08)
#define GEN8_OASTATUS_TAIL_POINTER_WRAP (1 << 17)
#define GEN8_OASTATUS_HEAD_POINTER_WRAP (1 << 16)
#define GEN8_OASTATUS_OVERRUN_STATUS (1 << 3) #define GEN8_OASTATUS_OVERRUN_STATUS (1 << 3)
#define GEN8_OASTATUS_COUNTER_OVERFLOW (1 << 2) #define GEN8_OASTATUS_COUNTER_OVERFLOW (1 << 2)
#define GEN8_OASTATUS_OABUFFER_OVERFLOW (1 << 1) #define GEN8_OASTATUS_OABUFFER_OVERFLOW (1 << 1)
......
...@@ -176,7 +176,11 @@ struct i915_request { ...@@ -176,7 +176,11 @@ struct i915_request {
struct intel_context *context; struct intel_context *context;
struct intel_ring *ring; struct intel_ring *ring;
struct intel_timeline __rcu *timeline; struct intel_timeline __rcu *timeline;
union {
struct list_head signal_link; struct list_head signal_link;
struct llist_node signal_node;
};
/* /*
* The rcu epoch of when this request was allocated. Used to judiciously * The rcu epoch of when this request was allocated. Used to judiciously
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment