Commit f33a8a51 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Merge wait_for_timelines with retire_request

wait_for_timelines is essentially the same loop as retiring requests
(with an extra timeout), so merge the two into one routine.

v2: i915_retire_requests_timeout and keep VT'd w/a as !interruptible
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-10-chris@chris-wilson.co.uk
parent 33d85644
......@@ -432,9 +432,7 @@ static int create_mmap_offset(struct drm_i915_gem_object *obj)
/* Attempt to reap some mmap space from dead objects */
do {
err = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
err = i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT);
if (err)
break;
......
......@@ -59,9 +59,7 @@ static bool switch_to_kernel_context_sync(struct intel_gt *gt)
{
bool result = !intel_gt_is_wedged(gt);
if (i915_gem_wait_for_idle(gt->i915,
I915_WAIT_FOR_IDLE_BOOST,
I915_GEM_IDLE_TIMEOUT) == -ETIME) {
if (i915_gem_wait_for_idle(gt->i915, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
/* XXX hide warning from gem_eio */
if (i915_modparams.reset) {
dev_err(gt->i915->drm.dev,
......
......@@ -1137,7 +1137,7 @@ __sseu_finish(const char *name,
if ((flags & TEST_IDLE) && ret == 0) {
ret = i915_gem_wait_for_idle(ce->engine->i915,
0, MAX_SCHEDULE_TIMEOUT);
MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
......
......@@ -196,8 +196,7 @@ int intel_gt_resume(struct intel_gt *gt)
static void wait_for_idle(struct intel_gt *gt)
{
if (i915_gem_wait_for_idle(gt->i915, 0,
I915_GEM_IDLE_TIMEOUT) == -ETIME) {
if (i915_gem_wait_for_idle(gt->i915, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
/*
* Forcibly cancel outstanding work and leave
* the gpu quiet.
......
......@@ -3635,9 +3635,7 @@ i915_drop_caches_set(void *data, u64 val)
i915_retire_requests(i915);
if (val & (DROP_IDLE | DROP_ACTIVE)) {
ret = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
ret = i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
}
......
......@@ -2321,8 +2321,7 @@ void i915_gem_driver_register(struct drm_i915_private *i915);
void i915_gem_driver_unregister(struct drm_i915_private *i915);
void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
void i915_gem_driver_release(struct drm_i915_private *dev_priv);
int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
unsigned int flags, long timeout);
int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, long timeout);
void i915_gem_suspend(struct drm_i915_private *dev_priv);
void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
void i915_gem_resume(struct drm_i915_private *dev_priv);
......
......@@ -883,61 +883,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
}
}
static long
wait_for_timelines(struct intel_gt *gt, unsigned int wait, long timeout)
{
struct intel_gt_timelines *timelines = &gt->timelines;
struct intel_timeline *tl;
unsigned long flags;
spin_lock_irqsave(&timelines->lock, flags);
list_for_each_entry(tl, &timelines->active_list, link) {
struct dma_fence *fence;
fence = i915_active_fence_get(&tl->last_request);
if (!fence)
continue;
spin_unlock_irqrestore(&timelines->lock, flags);
if (!dma_fence_is_i915(fence)) {
timeout = dma_fence_wait_timeout(fence,
flags & I915_WAIT_INTERRUPTIBLE,
timeout);
} else {
struct i915_request *rq = to_request(fence);
/*
* "Race-to-idle".
*
* Switching to the kernel context is often used as
* a synchronous step prior to idling, e.g. in suspend
* for flushing all current operations to memory before
* sleeping. These we want to complete as quickly as
* possible to avoid prolonged stalls, so allow the gpu
* to boost to maximum clocks.
*/
if (flags & I915_WAIT_FOR_IDLE_BOOST)
gen6_rps_boost(rq);
timeout = i915_request_wait(rq, flags, timeout);
}
dma_fence_put(fence);
if (timeout < 0)
return timeout;
/* restart after reacquiring the lock */
spin_lock_irqsave(&timelines->lock, flags);
tl = list_entry(&timelines->active_list, typeof(*tl), link);
}
spin_unlock_irqrestore(&timelines->lock, flags);
return timeout;
}
int i915_gem_wait_for_idle(struct drm_i915_private *i915,
unsigned int flags, long timeout)
int i915_gem_wait_for_idle(struct drm_i915_private *i915, long timeout)
{
struct intel_gt *gt = &i915->gt;
......@@ -945,18 +891,13 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915,
if (!intel_gt_pm_is_awake(gt))
return 0;
do {
timeout = wait_for_timelines(gt, flags, timeout);
if (timeout < 0)
return timeout;
while ((timeout = i915_retire_requests_timeout(i915, timeout)) > 0) {
cond_resched();
if (signal_pending(current))
return -EINTR;
}
} while (i915_retire_requests(i915));
return 0;
return timeout;
}
struct i915_vma *
......
......@@ -46,9 +46,7 @@ static int ggtt_flush(struct drm_i915_private *i915)
* the hopes that we can then remove contexts and the like only
* bound by their active reference.
*/
return i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
return i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT);
}
static bool
......@@ -126,6 +124,8 @@ i915_gem_evict_something(struct i915_address_space *vm,
min_size, alignment, color,
start, end, mode);
i915_retire_requests(vm->i915);
search_again:
active = NULL;
INIT_LIST_HEAD(&eviction_list);
......@@ -264,12 +264,12 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
trace_i915_gem_evict_node(vm, target, flags);
/* Retire before we search the active list. Although we have
/*
* Retire before we search the active list. Although we have
* reasonable accuracy in our retirement lists, we may have
* a stray pin (preventing eviction) that can only be resolved by
* retiring.
*/
if (!(flags & PIN_NONBLOCK))
i915_retire_requests(vm->i915);
if (i915_vm_has_cache_coloring(vm)) {
......
......@@ -2528,7 +2528,9 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct i915_ggtt *ggtt = &dev_priv->ggtt;
if (unlikely(ggtt->do_idle_maps)) {
if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) {
/* XXX This does not prevent more requests being submitted! */
if (i915_retire_requests_timeout(dev_priv,
-MAX_SCHEDULE_TIMEOUT)) {
DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
/* Wait a bit, in hopes it avoids the hang */
udelay(10);
......
......@@ -1508,13 +1508,19 @@ long i915_request_wait(struct i915_request *rq,
return timeout;
}
bool i915_retire_requests(struct drm_i915_private *i915)
long i915_retire_requests_timeout(struct drm_i915_private *i915, long timeout)
{
struct intel_gt_timelines *timelines = &i915->gt.timelines;
struct intel_timeline *tl, *tn;
unsigned long active_count = 0;
unsigned long flags;
bool interruptible;
LIST_HEAD(free);
interruptible = true;
if (timeout < 0)
timeout = -timeout, interruptible = false;
spin_lock_irqsave(&timelines->lock, flags);
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
if (!mutex_trylock(&tl->mutex))
......@@ -1525,13 +1531,27 @@ bool i915_retire_requests(struct drm_i915_private *i915)
tl->active_count++; /* pin the list element */
spin_unlock_irqrestore(&timelines->lock, flags);
if (timeout > 0) {
struct dma_fence *fence;
fence = i915_active_fence_get(&tl->last_request);
if (fence) {
timeout = dma_fence_wait_timeout(fence,
interruptible,
timeout);
dma_fence_put(fence);
}
}
retire_requests(tl);
spin_lock_irqsave(&timelines->lock, flags);
/* Resume iteration after dropping lock */
list_safe_reset_next(tl, tn, link);
if (!--tl->active_count)
if (--tl->active_count)
active_count += !!rcu_access_pointer(tl->last_request.fence);
else
list_del(&tl->link);
mutex_unlock(&tl->mutex);
......@@ -1547,7 +1567,7 @@ bool i915_retire_requests(struct drm_i915_private *i915)
list_for_each_entry_safe(tl, tn, &free, link)
__intel_timeline_free(&tl->kref);
return !list_empty(&timelines->active_list);
return active_count ? timeout : 0;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
......
......@@ -310,7 +310,6 @@ long i915_request_wait(struct i915_request *rq,
#define I915_WAIT_INTERRUPTIBLE BIT(0)
#define I915_WAIT_PRIORITY BIT(1) /* small priority bump for the request */
#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
#define I915_WAIT_FOR_IDLE_BOOST BIT(3)
static inline bool i915_request_signaled(const struct i915_request *rq)
{
......@@ -460,6 +459,10 @@ i915_request_active_timeline(struct i915_request *rq)
lockdep_is_held(&rq->engine->active.lock));
}
bool i915_retire_requests(struct drm_i915_private *i915);
long i915_retire_requests_timeout(struct drm_i915_private *i915, long timeout);
static inline void i915_retire_requests(struct drm_i915_private *i915)
{
i915_retire_requests_timeout(i915, 0);
}
#endif /* I915_REQUEST_H */
......@@ -18,8 +18,7 @@ int igt_flush_test(struct drm_i915_private *i915)
cond_resched();
i915_retire_requests(i915);
if (i915_gem_wait_for_idle(i915, 0, HZ / 5) == -ETIME) {
if (i915_gem_wait_for_idle(i915, HZ / 5) == -ETIME) {
pr_err("%pS timed out, cancelling all further testing.\n",
__builtin_return_address(0));
......@@ -30,7 +29,6 @@ int igt_flush_test(struct drm_i915_private *i915)
intel_gt_set_wedged(&i915->gt);
ret = -EIO;
}
i915_retire_requests(i915);
return ret;
}
......@@ -23,9 +23,7 @@ int igt_live_test_begin(struct igt_live_test *t,
t->func = func;
t->name = name;
err = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
err = i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT);
if (err) {
pr_err("%s(%s): failed to idle before, with err=%d!",
func, name, err);
......
......@@ -44,7 +44,7 @@ void mock_device_flush(struct drm_i915_private *i915)
do {
for_each_engine(engine, i915, id)
mock_engine_flush(engine);
} while (i915_retire_requests(i915));
} while (i915_retire_requests_timeout(i915, MAX_SCHEDULE_TIMEOUT));
}
static void mock_device_release(struct drm_device *dev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment