Commit 5366b96b authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-fixes-2020-03-19' of...

Merge tag 'drm-intel-fixes-2020-03-19' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes

drm/i915 fixes for v5.6-rc7:
- Track active elements during dequeue
- Fix failure to handle all MCR ranges
- Revert unnecessary workaround
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/877dzgepvu.fsf@intel.com
parents 362b86a3 fe8b7085
...@@ -1600,17 +1600,6 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve, ...@@ -1600,17 +1600,6 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
spin_unlock(&old->breadcrumbs.irq_lock); spin_unlock(&old->breadcrumbs.irq_lock);
} }
static struct i915_request *
last_active(const struct intel_engine_execlists *execlists)
{
struct i915_request * const *last = READ_ONCE(execlists->active);
while (*last && i915_request_completed(*last))
last++;
return *last;
}
#define for_each_waiter(p__, rq__) \ #define for_each_waiter(p__, rq__) \
list_for_each_entry_lockless(p__, \ list_for_each_entry_lockless(p__, \
&(rq__)->sched.waiters_list, \ &(rq__)->sched.waiters_list, \
...@@ -1740,11 +1729,9 @@ static void record_preemption(struct intel_engine_execlists *execlists) ...@@ -1740,11 +1729,9 @@ static void record_preemption(struct intel_engine_execlists *execlists)
(void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++); (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
} }
static unsigned long active_preempt_timeout(struct intel_engine_cs *engine) static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
const struct i915_request *rq)
{ {
struct i915_request *rq;
rq = last_active(&engine->execlists);
if (!rq) if (!rq)
return 0; return 0;
...@@ -1755,13 +1742,14 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine) ...@@ -1755,13 +1742,14 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
return READ_ONCE(engine->props.preempt_timeout_ms); return READ_ONCE(engine->props.preempt_timeout_ms);
} }
static void set_preempt_timeout(struct intel_engine_cs *engine) static void set_preempt_timeout(struct intel_engine_cs *engine,
const struct i915_request *rq)
{ {
if (!intel_engine_has_preempt_reset(engine)) if (!intel_engine_has_preempt_reset(engine))
return; return;
set_timer_ms(&engine->execlists.preempt, set_timer_ms(&engine->execlists.preempt,
active_preempt_timeout(engine)); active_preempt_timeout(engine, rq));
} }
static inline void clear_ports(struct i915_request **ports, int count) static inline void clear_ports(struct i915_request **ports, int count)
...@@ -1774,6 +1762,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -1774,6 +1762,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
struct intel_engine_execlists * const execlists = &engine->execlists; struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_request **port = execlists->pending; struct i915_request **port = execlists->pending;
struct i915_request ** const last_port = port + execlists->port_mask; struct i915_request ** const last_port = port + execlists->port_mask;
struct i915_request * const *active;
struct i915_request *last; struct i915_request *last;
struct rb_node *rb; struct rb_node *rb;
bool submit = false; bool submit = false;
...@@ -1828,7 +1817,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -1828,7 +1817,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* i.e. we will retrigger preemption following the ack in case * i.e. we will retrigger preemption following the ack in case
* of trouble. * of trouble.
*/ */
last = last_active(execlists); active = READ_ONCE(execlists->active);
while ((last = *active) && i915_request_completed(last))
active++;
if (last) { if (last) {
if (need_preempt(engine, last, rb)) { if (need_preempt(engine, last, rb)) {
ENGINE_TRACE(engine, ENGINE_TRACE(engine,
...@@ -2110,7 +2102,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -2110,7 +2102,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* Skip if we ended up with exactly the same set of requests, * Skip if we ended up with exactly the same set of requests,
* e.g. trying to timeslice a pair of ordered contexts * e.g. trying to timeslice a pair of ordered contexts
*/ */
if (!memcmp(execlists->active, execlists->pending, if (!memcmp(active, execlists->pending,
(port - execlists->pending + 1) * sizeof(*port))) { (port - execlists->pending + 1) * sizeof(*port))) {
do do
execlists_schedule_out(fetch_and_zero(port)); execlists_schedule_out(fetch_and_zero(port));
...@@ -2121,7 +2113,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -2121,7 +2113,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
clear_ports(port + 1, last_port - port); clear_ports(port + 1, last_port - port);
execlists_submit_ports(engine); execlists_submit_ports(engine);
set_preempt_timeout(engine); set_preempt_timeout(engine, *active);
} else { } else {
skip_submit: skip_submit:
ring_set_paused(engine, 0); ring_set_paused(engine, 0);
...@@ -4008,26 +4000,6 @@ static int gen12_emit_flush_render(struct i915_request *request, ...@@ -4008,26 +4000,6 @@ static int gen12_emit_flush_render(struct i915_request *request,
*cs++ = preparser_disable(false); *cs++ = preparser_disable(false);
intel_ring_advance(request, cs); intel_ring_advance(request, cs);
/*
* Wa_1604544889:tgl
*/
if (IS_TGL_REVID(request->i915, TGL_REVID_A0, TGL_REVID_A0)) {
flags = 0;
flags |= PIPE_CONTROL_CS_STALL;
flags |= PIPE_CONTROL_HDC_PIPELINE_FLUSH;
flags |= PIPE_CONTROL_STORE_DATA_INDEX;
flags |= PIPE_CONTROL_QW_WRITE;
cs = intel_ring_begin(request, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
cs = gen8_emit_pipe_control(cs, flags,
LRC_PPHWSP_SCRATCH_ADDR);
intel_ring_advance(request, cs);
}
} }
return 0; return 0;
......
...@@ -1529,15 +1529,34 @@ create_scratch(struct i915_address_space *vm, int count) ...@@ -1529,15 +1529,34 @@ create_scratch(struct i915_address_space *vm, int count)
return ERR_PTR(err); return ERR_PTR(err);
} }
static const struct {
u32 start;
u32 end;
} mcr_ranges_gen8[] = {
{ .start = 0x5500, .end = 0x55ff },
{ .start = 0x7000, .end = 0x7fff },
{ .start = 0x9400, .end = 0x97ff },
{ .start = 0xb000, .end = 0xb3ff },
{ .start = 0xe000, .end = 0xe7ff },
{},
};
static bool mcr_range(struct drm_i915_private *i915, u32 offset) static bool mcr_range(struct drm_i915_private *i915, u32 offset)
{ {
int i;
if (INTEL_GEN(i915) < 8)
return false;
/* /*
* Registers in this range are affected by the MCR selector * Registers in these ranges are affected by the MCR selector
* which only controls CPU initiated MMIO. Routing does not * which only controls CPU initiated MMIO. Routing does not
* work for CS access so we cannot verify them on this path. * work for CS access so we cannot verify them on this path.
*/ */
if (INTEL_GEN(i915) >= 8 && (offset >= 0xb000 && offset <= 0xb4ff)) for (i = 0; mcr_ranges_gen8[i].start; i++)
return true; if (offset >= mcr_ranges_gen8[i].start &&
offset <= mcr_ranges_gen8[i].end)
return true;
return false; return false;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment