Commit 2854d866 authored by Chris Wilson's avatar Chris Wilson Committed by Joonas Lahtinen

drm/i915/gt: Replace intel_engine_transfer_stale_breadcrumbs

After staring at the breadcrumb enabling/cancellation and coming to the
conclusion that the cause of the mysterious stale breadcrumbs must the
act of submitting a completed requests, we can then redirect those
completed requests onto a dedicated signaled_list at the time of
construction and so eliminate intel_engine_transfer_stale_breadcrumbs().
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200731154834.8378-2-chris@chris-wilson.co.ukSigned-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
parent c18636f7
...@@ -142,16 +142,16 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl) ...@@ -142,16 +142,16 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
intel_engine_add_retire(engine, tl); intel_engine_add_retire(engine, tl);
} }
static void __signal_request(struct i915_request *rq, struct list_head *signals) static bool __signal_request(struct i915_request *rq, struct list_head *signals)
{ {
GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
if (!__dma_fence_signal(&rq->fence)) if (!__dma_fence_signal(&rq->fence))
return; return false;
i915_request_get(rq); i915_request_get(rq);
list_add_tail(&rq->signal_link, signals); list_add_tail(&rq->signal_link, signals);
return true;
} }
static void signal_irq_work(struct irq_work *work) static void signal_irq_work(struct irq_work *work)
...@@ -278,32 +278,6 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine) ...@@ -278,32 +278,6 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
spin_unlock_irqrestore(&b->irq_lock, flags); spin_unlock_irqrestore(&b->irq_lock, flags);
} }
void intel_engine_transfer_stale_breadcrumbs(struct intel_engine_cs *engine,
struct intel_context *ce)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
unsigned long flags;
spin_lock_irqsave(&b->irq_lock, flags);
if (!list_empty(&ce->signals)) {
struct i915_request *rq, *next;
/* Queue for executing the signal callbacks in the irq_work */
list_for_each_entry_safe(rq, next, &ce->signals, signal_link) {
GEM_BUG_ON(rq->engine != engine);
GEM_BUG_ON(!__request_completed(rq));
__signal_request(rq, &b->signaled_requests);
}
INIT_LIST_HEAD(&ce->signals);
list_del_init(&ce->signal_link);
irq_work_queue(&b->irq_work);
}
spin_unlock_irqrestore(&b->irq_lock, flags);
}
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine) void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
{ {
} }
...@@ -317,6 +291,17 @@ static void insert_breadcrumb(struct i915_request *rq, ...@@ -317,6 +291,17 @@ static void insert_breadcrumb(struct i915_request *rq,
if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
return; return;
/*
* If the request is already completed, we can transfer it
* straight onto a signaled list, and queue the irq worker for
* its signal completion.
*/
if (__request_completed(rq)) {
if (__signal_request(rq, &b->signaled_requests))
irq_work_queue(&b->irq_work);
return;
}
__intel_breadcrumbs_arm_irq(b); __intel_breadcrumbs_arm_irq(b);
/* /*
...@@ -344,8 +329,11 @@ static void insert_breadcrumb(struct i915_request *rq, ...@@ -344,8 +329,11 @@ static void insert_breadcrumb(struct i915_request *rq,
if (pos == &ce->signals) /* catch transitions from empty list */ if (pos == &ce->signals) /* catch transitions from empty list */
list_move_tail(&ce->signal_link, &b->signalers); list_move_tail(&ce->signal_link, &b->signalers);
GEM_BUG_ON(!check_signal_order(ce, rq)); GEM_BUG_ON(!check_signal_order(ce, rq));
set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
/* Check after attaching to irq, interrupt may have already fired. */
if (__request_completed(rq))
irq_work_queue(&b->irq_work);
} }
bool i915_request_enable_breadcrumb(struct i915_request *rq) bool i915_request_enable_breadcrumb(struct i915_request *rq)
...@@ -401,7 +389,7 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq) ...@@ -401,7 +389,7 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
spin_unlock(&b->irq_lock); spin_unlock(&b->irq_lock);
return !__request_completed(rq); return true;
} }
void i915_request_cancel_breadcrumb(struct i915_request *rq) void i915_request_cancel_breadcrumb(struct i915_request *rq)
......
...@@ -237,9 +237,6 @@ intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine) ...@@ -237,9 +237,6 @@ intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine); void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine); void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_transfer_stale_breadcrumbs(struct intel_engine_cs *engine,
struct intel_context *ce);
void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine, void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
struct drm_printer *p); struct drm_printer *p);
......
...@@ -1805,18 +1805,6 @@ static bool virtual_matches(const struct virtual_engine *ve, ...@@ -1805,18 +1805,6 @@ static bool virtual_matches(const struct virtual_engine *ve,
return true; return true;
} }
static void virtual_xfer_breadcrumbs(struct virtual_engine *ve)
{
/*
* All the outstanding signals on ve->siblings[0] must have
* been completed, just pending the interrupt handler. As those
* signals still refer to the old sibling (via rq->engine), we must
* transfer those to the old irq_worker to keep our locking
* consistent.
*/
intel_engine_transfer_stale_breadcrumbs(ve->siblings[0], &ve->context);
}
#define for_each_waiter(p__, rq__) \ #define for_each_waiter(p__, rq__) \
list_for_each_entry_lockless(p__, \ list_for_each_entry_lockless(p__, \
&(rq__)->sched.waiters_list, \ &(rq__)->sched.waiters_list, \
...@@ -2267,9 +2255,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -2267,9 +2255,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
virtual_update_register_offsets(regs, virtual_update_register_offsets(regs,
engine); engine);
if (!list_empty(&ve->context.signals))
virtual_xfer_breadcrumbs(ve);
/* /*
* Move the bound engine to the top of the list * Move the bound engine to the top of the list
* for future execution. We then kick this * for future execution. We then kick this
......
...@@ -569,9 +569,8 @@ bool __i915_request_submit(struct i915_request *request) ...@@ -569,9 +569,8 @@ bool __i915_request_submit(struct i915_request *request)
*/ */
__notify_execute_cb(request); __notify_execute_cb(request);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) && if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
!i915_request_enable_breadcrumb(request)) i915_request_enable_breadcrumb(request);
intel_engine_signal_breadcrumbs(engine);
return result; return result;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment