Commit 30084b14 authored by Chris Wilson's avatar Chris Wilson

drm/i915/gt: Flush other retirees inside intel_gt_retire_requests()

Our goal in wait_for_idle (intel_gt_retire_requests) is to the current
workload *and* their idle barriers. This requires us to notice the late
arrival of those, which is done by inspecting the list of active
timelines. However, if a concurrent retirer is running that new timeline
may not be added until after we drop the lock -- so flush concurrent
retirers before we take the lock and inspect the list.

Closes: https://gitlab.freedesktop.org/drm/intel/issues/878Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Acked-by: default avatarAndi Shyti <andi.shyti@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191223211008.2371613-1-chris@chris-wilson.co.uk
parent b42d3b15
...@@ -282,7 +282,7 @@ static inline void __intel_engine_reset(struct intel_engine_cs *engine, ...@@ -282,7 +282,7 @@ static inline void __intel_engine_reset(struct intel_engine_cs *engine,
bool intel_engines_are_idle(struct intel_gt *gt); bool intel_engines_are_idle(struct intel_gt *gt);
bool intel_engine_is_idle(struct intel_engine_cs *engine); bool intel_engine_is_idle(struct intel_engine_cs *engine);
bool intel_engine_flush_submission(struct intel_engine_cs *engine); void intel_engine_flush_submission(struct intel_engine_cs *engine);
void intel_engines_reset_default_submission(struct intel_gt *gt); void intel_engines_reset_default_submission(struct intel_gt *gt);
......
...@@ -1047,10 +1047,9 @@ static bool ring_is_idle(struct intel_engine_cs *engine) ...@@ -1047,10 +1047,9 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
return idle; return idle;
} }
bool intel_engine_flush_submission(struct intel_engine_cs *engine) void intel_engine_flush_submission(struct intel_engine_cs *engine)
{ {
struct tasklet_struct *t = &engine->execlists.tasklet; struct tasklet_struct *t = &engine->execlists.tasklet;
bool active = tasklet_is_locked(t);
if (__tasklet_is_scheduled(t)) { if (__tasklet_is_scheduled(t)) {
local_bh_disable(); local_bh_disable();
...@@ -1061,13 +1060,10 @@ bool intel_engine_flush_submission(struct intel_engine_cs *engine) ...@@ -1061,13 +1060,10 @@ bool intel_engine_flush_submission(struct intel_engine_cs *engine)
tasklet_unlock(t); tasklet_unlock(t);
} }
local_bh_enable(); local_bh_enable();
active = true;
} }
/* Otherwise flush the tasklet if it was running on another cpu */ /* Otherwise flush the tasklet if it was running on another cpu */
tasklet_unlock_wait(t); tasklet_unlock_wait(t);
return active;
} }
/** /**
......
...@@ -26,21 +26,18 @@ static bool retire_requests(struct intel_timeline *tl) ...@@ -26,21 +26,18 @@ static bool retire_requests(struct intel_timeline *tl)
return !i915_active_fence_isset(&tl->last_request); return !i915_active_fence_isset(&tl->last_request);
} }
static bool flush_submission(struct intel_gt *gt) static void flush_submission(struct intel_gt *gt)
{ {
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id; enum intel_engine_id id;
bool active = false;
if (!intel_gt_pm_is_awake(gt)) if (!intel_gt_pm_is_awake(gt))
return false; return;
for_each_engine(engine, gt, id) { for_each_engine(engine, gt, id) {
active |= intel_engine_flush_submission(engine); intel_engine_flush_submission(engine);
active |= flush_work(&engine->retire_work); flush_work(&engine->retire_work);
} }
return active;
} }
static void engine_retire(struct work_struct *work) static void engine_retire(struct work_struct *work)
...@@ -126,7 +123,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) ...@@ -126,7 +123,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
timeout = -timeout, interruptible = false; timeout = -timeout, interruptible = false;
flush_submission(gt); /* kick the ksoftirqd tasklets */ flush_submission(gt); /* kick the ksoftirqd tasklets */
spin_lock(&timelines->lock); spin_lock(&timelines->lock);
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) { list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
if (!mutex_trylock(&tl->mutex)) { if (!mutex_trylock(&tl->mutex)) {
...@@ -153,6 +149,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) ...@@ -153,6 +149,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
active_count += !retire_requests(tl); active_count += !retire_requests(tl);
flush_submission(gt); /* sync with concurrent retirees */
spin_lock(&timelines->lock); spin_lock(&timelines->lock);
/* Resume iteration after dropping lock */ /* Resume iteration after dropping lock */
...@@ -173,9 +170,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) ...@@ -173,9 +170,6 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
list_for_each_entry_safe(tl, tn, &free, link) list_for_each_entry_safe(tl, tn, &free, link)
__intel_timeline_free(&tl->kref); __intel_timeline_free(&tl->kref);
if (flush_submission(gt))
active_count++;
return active_count ? timeout : 0; return active_count ? timeout : 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment