Commit 6e7eb7a8 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Bump signaler priority on adding a waiter

The handling of the no-preemption priority level imposes the restriction
that we need to maintain the implied ordering even though preemption is
disabled. Otherwise we may end up with an AB-BA deadlock across multiple
engine due to a real preemption event reordering the no-preemption
WAITs. To resolve this issue we currently promote all requests to WAIT
on unsubmission, however this interferes with the timeslicing
requirement that we do not apply any implicit promotion that will defeat
the round-robin timeslice list. (If we automatically promote the active
request it will go back to the head of the queue and not the tail!)

So we need implicit promotion to prevent reordering around semaphores
where we are not allowed to preempt, and we must avoid implicit
promotion on unsubmission. So instead of at unsubmit, if we apply that
implicit promotion on adding the dependency, we avoid the semaphore
deadlock and we also reduce the gains made by the promotion for user
space waiting. Furthermore, by keeping the earlier dependencies at a
higher level, we reduce the search space for timeslicing without
altering runtime scheduling too badly (no dependencies at all will be
assigned a higher priority for rrul).

v2: Limit the bump to external edges (as originally intended) i.e.
between contexts and out to the user.

Testcase: igt/gem_concurrent_blit
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190515130052.4475-3-chris@chris-wilson.co.uk
parent af461ff3
...@@ -98,12 +98,14 @@ static int live_busywait_preempt(void *arg) ...@@ -98,12 +98,14 @@ static int live_busywait_preempt(void *arg)
ctx_hi = kernel_context(i915); ctx_hi = kernel_context(i915);
if (!ctx_hi) if (!ctx_hi)
goto err_unlock; goto err_unlock;
ctx_hi->sched.priority = INT_MAX; ctx_hi->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
ctx_lo = kernel_context(i915); ctx_lo = kernel_context(i915);
if (!ctx_lo) if (!ctx_lo)
goto err_ctx_hi; goto err_ctx_hi;
ctx_lo->sched.priority = INT_MIN; ctx_lo->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
obj = i915_gem_object_create_internal(i915, PAGE_SIZE); obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) { if (IS_ERR(obj)) {
...@@ -958,12 +960,14 @@ static int live_preempt_hang(void *arg) ...@@ -958,12 +960,14 @@ static int live_preempt_hang(void *arg)
ctx_hi = kernel_context(i915); ctx_hi = kernel_context(i915);
if (!ctx_hi) if (!ctx_hi)
goto err_spin_lo; goto err_spin_lo;
ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY; ctx_hi->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
ctx_lo = kernel_context(i915); ctx_lo = kernel_context(i915);
if (!ctx_lo) if (!ctx_lo)
goto err_ctx_hi; goto err_ctx_hi;
ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY; ctx_lo->sched.priority =
I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
for_each_engine(engine, i915, id) { for_each_engine(engine, i915, id) {
struct i915_request *rq; struct i915_request *rq;
......
...@@ -489,15 +489,6 @@ void __i915_request_unsubmit(struct i915_request *request) ...@@ -489,15 +489,6 @@ void __i915_request_unsubmit(struct i915_request *request)
/* We may be recursing from the signal callback of another i915 fence */ /* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
/*
* As we do not allow WAIT to preempt inflight requests,
* once we have executed a request, along with triggering
* any execution callbacks, we must preserve its ordering
* within the non-preemptible FIFO.
*/
BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK); /* only internal */
request->sched.attr.priority |= __NO_PREEMPTION;
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
i915_request_cancel_breadcrumb(request); i915_request_cancel_breadcrumb(request);
......
...@@ -387,6 +387,16 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node, ...@@ -387,6 +387,16 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
!node_started(signal)) !node_started(signal))
node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN; node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
/*
* As we do not allow WAIT to preempt inflight requests,
* once we have executed a request, along with triggering
* any execution callbacks, we must preserve its ordering
* within the non-preemptible FIFO.
*/
BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK);
if (flags & I915_DEPENDENCY_EXTERNAL)
__bump_priority(signal, __NO_PREEMPTION);
ret = true; ret = true;
} }
...@@ -405,6 +415,7 @@ int i915_sched_node_add_dependency(struct i915_sched_node *node, ...@@ -405,6 +415,7 @@ int i915_sched_node_add_dependency(struct i915_sched_node *node,
return -ENOMEM; return -ENOMEM;
if (!__i915_sched_node_add_dependency(node, signal, dep, if (!__i915_sched_node_add_dependency(node, signal, dep,
I915_DEPENDENCY_EXTERNAL |
I915_DEPENDENCY_ALLOC)) I915_DEPENDENCY_ALLOC))
i915_dependency_free(dep); i915_dependency_free(dep);
......
...@@ -67,6 +67,7 @@ struct i915_dependency { ...@@ -67,6 +67,7 @@ struct i915_dependency {
struct list_head dfs_link; struct list_head dfs_link;
unsigned long flags; unsigned long flags;
#define I915_DEPENDENCY_ALLOC BIT(0) #define I915_DEPENDENCY_ALLOC BIT(0)
#define I915_DEPENDENCY_EXTERNAL BIT(1)
}; };
#endif /* _I915_SCHEDULER_TYPES_H_ */ #endif /* _I915_SCHEDULER_TYPES_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment