Commit 9eb143bb authored by Chris Wilson's avatar Chris Wilson

drm/i915: Allow a request to be cancelled

If we preempt a request and remove it from the execution queue, we need
to undo its global seqno and restart any waiters.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170223074422.4125-11-chris@chris-wilson.co.uk
parent cced5e2f
...@@ -356,22 +356,15 @@ static inline int wakeup_priority(struct intel_breadcrumbs *b, ...@@ -356,22 +356,15 @@ static inline int wakeup_priority(struct intel_breadcrumbs *b,
return tsk->prio; return tsk->prio;
} }
void intel_engine_remove_wait(struct intel_engine_cs *engine, static void __intel_engine_remove_wait(struct intel_engine_cs *engine,
struct intel_wait *wait) struct intel_wait *wait)
{ {
struct intel_breadcrumbs *b = &engine->breadcrumbs; struct intel_breadcrumbs *b = &engine->breadcrumbs;
/* Quick check to see if this waiter was already decoupled from assert_spin_locked(&b->lock);
* the tree by the bottom-half to avoid contention on the spinlock
* by the herd.
*/
if (RB_EMPTY_NODE(&wait->node))
return;
spin_lock_irq(&b->lock);
if (RB_EMPTY_NODE(&wait->node)) if (RB_EMPTY_NODE(&wait->node))
goto out_unlock; goto out;
if (b->first_wait == wait) { if (b->first_wait == wait) {
const int priority = wakeup_priority(b, wait->tsk); const int priority = wakeup_priority(b, wait->tsk);
...@@ -436,11 +429,27 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine, ...@@ -436,11 +429,27 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
GEM_BUG_ON(RB_EMPTY_NODE(&wait->node)); GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
rb_erase(&wait->node, &b->waiters); rb_erase(&wait->node, &b->waiters);
out_unlock: out:
GEM_BUG_ON(b->first_wait == wait); GEM_BUG_ON(b->first_wait == wait);
GEM_BUG_ON(rb_first(&b->waiters) != GEM_BUG_ON(rb_first(&b->waiters) !=
(b->first_wait ? &b->first_wait->node : NULL)); (b->first_wait ? &b->first_wait->node : NULL));
GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh) ^ RB_EMPTY_ROOT(&b->waiters)); GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh) ^ RB_EMPTY_ROOT(&b->waiters));
}
void intel_engine_remove_wait(struct intel_engine_cs *engine,
struct intel_wait *wait)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
/* Quick check to see if this waiter was already decoupled from
* the tree by the bottom-half to avoid contention on the spinlock
* by the herd.
*/
if (RB_EMPTY_NODE(&wait->node))
return;
spin_lock_irq(&b->lock);
__intel_engine_remove_wait(engine, wait);
spin_unlock_irq(&b->lock); spin_unlock_irq(&b->lock);
} }
...@@ -506,11 +515,13 @@ static int intel_breadcrumbs_signaler(void *arg) ...@@ -506,11 +515,13 @@ static int intel_breadcrumbs_signaler(void *arg)
dma_fence_signal(&request->fence); dma_fence_signal(&request->fence);
local_bh_enable(); /* kick start the tasklets */ local_bh_enable(); /* kick start the tasklets */
spin_lock_irq(&b->lock);
/* Wake up all other completed waiters and select the /* Wake up all other completed waiters and select the
* next bottom-half for the next user interrupt. * next bottom-half for the next user interrupt.
*/ */
intel_engine_remove_wait(engine, __intel_engine_remove_wait(engine,
&request->signaling.wait); &request->signaling.wait);
/* Find the next oldest signal. Note that as we have /* Find the next oldest signal. Note that as we have
* not been holding the lock, another client may * not been holding the lock, another client may
...@@ -518,7 +529,6 @@ static int intel_breadcrumbs_signaler(void *arg) ...@@ -518,7 +529,6 @@ static int intel_breadcrumbs_signaler(void *arg)
* we just completed - so double check we are still * we just completed - so double check we are still
* the oldest before picking the next one. * the oldest before picking the next one.
*/ */
spin_lock_irq(&b->lock);
if (request == rcu_access_pointer(b->first_signal)) { if (request == rcu_access_pointer(b->first_signal)) {
struct rb_node *rb = struct rb_node *rb =
rb_next(&request->signaling.node); rb_next(&request->signaling.node);
...@@ -526,6 +536,8 @@ static int intel_breadcrumbs_signaler(void *arg) ...@@ -526,6 +536,8 @@ static int intel_breadcrumbs_signaler(void *arg)
rb ? to_signaler(rb) : NULL); rb ? to_signaler(rb) : NULL);
} }
rb_erase(&request->signaling.node, &b->signals); rb_erase(&request->signaling.node, &b->signals);
RB_CLEAR_NODE(&request->signaling.node);
spin_unlock_irq(&b->lock); spin_unlock_irq(&b->lock);
i915_gem_request_put(request); i915_gem_request_put(request);
...@@ -613,6 +625,35 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request) ...@@ -613,6 +625,35 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
wake_up_process(b->signaler); wake_up_process(b->signaler);
} }
void intel_engine_cancel_signaling(struct drm_i915_gem_request *request)
{
struct intel_engine_cs *engine = request->engine;
struct intel_breadcrumbs *b = &engine->breadcrumbs;
assert_spin_locked(&request->lock);
GEM_BUG_ON(!request->signaling.wait.seqno);
spin_lock(&b->lock);
if (!RB_EMPTY_NODE(&request->signaling.node)) {
if (request == rcu_access_pointer(b->first_signal)) {
struct rb_node *rb =
rb_next(&request->signaling.node);
rcu_assign_pointer(b->first_signal,
rb ? to_signaler(rb) : NULL);
}
rb_erase(&request->signaling.node, &b->signals);
RB_CLEAR_NODE(&request->signaling.node);
i915_gem_request_put(request);
}
__intel_engine_remove_wait(engine, &request->signaling.wait);
spin_unlock(&b->lock);
request->signaling.wait.seqno = 0;
}
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine) int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
{ {
struct intel_breadcrumbs *b = &engine->breadcrumbs; struct intel_breadcrumbs *b = &engine->breadcrumbs;
......
...@@ -635,6 +635,7 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine, ...@@ -635,6 +635,7 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine,
void intel_engine_remove_wait(struct intel_engine_cs *engine, void intel_engine_remove_wait(struct intel_engine_cs *engine,
struct intel_wait *wait); struct intel_wait *wait);
void intel_engine_enable_signaling(struct drm_i915_gem_request *request); void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
void intel_engine_cancel_signaling(struct drm_i915_gem_request *request);
static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine) static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment