Commit d13a3177 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Flush idle barriers when waiting

If we do find ourselves with an idle barrier inside our active while
waiting, attempt to flush it by emitting a pulse using the kernel
context.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Steve Carbonari <steven.carbonari@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200225192206.1107336-1-chris@chris-wilson.co.uk
parent 53e3ca67
......@@ -7,6 +7,7 @@
#include <linux/debugobjects.h>
#include "gt/intel_context.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_ring.h"
......@@ -460,26 +461,49 @@ static void enable_signaling(struct i915_active_fence *active)
dma_fence_put(fence);
}
int i915_active_wait(struct i915_active *ref)
static int flush_barrier(struct active_node *it)
{
struct active_node *it, *n;
int err = 0;
struct intel_engine_cs *engine;
might_sleep();
if (likely(!is_barrier(&it->base)))
return 0;
if (!i915_active_acquire_if_busy(ref))
engine = __barrier_to_engine(it);
smp_rmb(); /* serialise with add_active_barriers */
if (!is_barrier(&it->base))
return 0;
/* Flush lazy signals */
return intel_engine_flush_barriers(engine);
}
static int flush_lazy_signals(struct i915_active *ref)
{
struct active_node *it, *n;
int err = 0;
enable_signaling(&ref->excl);
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
if (is_barrier(&it->base)) /* unconnected idle barrier */
continue;
err = flush_barrier(it); /* unconnected idle barrier? */
if (err)
break;
enable_signaling(&it->base);
}
/* Any fence added after the wait begins will not be auto-signaled */
return err;
}
int i915_active_wait(struct i915_active *ref)
{
int err;
might_sleep();
if (!i915_active_acquire_if_busy(ref))
return 0;
/* Any fence added after the wait begins will not be auto-signaled */
err = flush_lazy_signals(ref);
i915_active_release(ref);
if (err)
return err;
......
......@@ -201,11 +201,57 @@ static int live_active_retire(void *arg)
return err;
}
static int live_active_barrier(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
struct live_active *active;
int err = 0;
/* Check that we get a callback when requests retire upon waiting */
active = __live_alloc(i915);
if (!active)
return -ENOMEM;
err = i915_active_acquire(&active->base);
if (err)
goto out;
for_each_uabi_engine(engine, i915) {
err = i915_active_acquire_preallocate_barrier(&active->base,
engine);
if (err)
break;
i915_active_acquire_barrier(&active->base);
}
i915_active_release(&active->base);
if (err == 0)
err = i915_active_wait(&active->base);
if (err == 0 && !READ_ONCE(active->retired)) {
pr_err("i915_active not retired after flushing barriers!\n");
err = -EINVAL;
}
out:
__live_put(active);
if (igt_flush_test(i915))
err = -EIO;
return err;
}
int i915_active_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_active_wait),
SUBTEST(live_active_retire),
SUBTEST(live_active_barrier),
};
if (intel_gt_is_wedged(&i915->gt))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment