Commit b16a8497 authored by Chris Wilson's avatar Chris Wilson Committed by Greg Kroah-Hartman

drm/i915: Insert a command barrier on BLT/BSD cache flushes

commit f0a1fb10 upstream.

This looked like an odd regression from

commit ec5cc0f9
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date:   Thu Jun 12 10:28:55 2014 +0100

    drm/i915: Restrict GPU boost to the RCS engine

but in reality it undercovered a much older coherency bug. The issue that
boosting the GPU frequency on the BCS ring was masking was that we could
wake the CPU up after completion of a BCS batch and inspect memory prior
to the write cache being fully evicted. In order to serialise the
breadcrumb interrupt (and so ensure that the CPU's view of memory is
coherent) we need to perform a post-sync operation in the MI_FLUSH_DW.

v2: Fix all the MI_FLUSH_DW (bsd plus the duplication in execlists).

Also fix the invalidate_domains mask in gen8_emit_flush() for ring !=
VCS.

Testcase: gpuX-rcs-gpu-read-after-write
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Acked-by: default avatarDaniel Vetter <daniel@ffwll.ch>
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 12bc2f3d
...@@ -1237,15 +1237,17 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf, ...@@ -1237,15 +1237,17 @@ static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
cmd = MI_FLUSH_DW + 1; cmd = MI_FLUSH_DW + 1;
if (ring == &dev_priv->ring[VCS]) { /* We always require a command barrier so that subsequent
if (invalidate_domains & I915_GEM_GPU_DOMAINS) * commands, such as breadcrumb interrupts, are strictly ordered
cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | * wrt the contents of the write cache being flushed to memory
MI_FLUSH_DW_STORE_INDEX | * (and thus being coherent from the CPU).
MI_FLUSH_DW_OP_STOREDW; */
} else { cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
if (invalidate_domains & I915_GEM_DOMAIN_RENDER)
cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
MI_FLUSH_DW_OP_STOREDW; cmd |= MI_INVALIDATE_TLB;
if (ring == &dev_priv->ring[VCS])
cmd |= MI_INVALIDATE_BSD;
} }
intel_logical_ring_emit(ringbuf, cmd); intel_logical_ring_emit(ringbuf, cmd);
......
...@@ -2178,6 +2178,14 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring, ...@@ -2178,6 +2178,14 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
cmd = MI_FLUSH_DW; cmd = MI_FLUSH_DW;
if (INTEL_INFO(ring->dev)->gen >= 8) if (INTEL_INFO(ring->dev)->gen >= 8)
cmd += 1; cmd += 1;
/* We always require a command barrier so that subsequent
* commands, such as breadcrumb interrupts, are strictly ordered
* wrt the contents of the write cache being flushed to memory
* (and thus being coherent from the CPU).
*/
cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
/* /*
* Bspec vol 1c.5 - video engine command streamer: * Bspec vol 1c.5 - video engine command streamer:
* "If ENABLED, all TLBs will be invalidated once the flush * "If ENABLED, all TLBs will be invalidated once the flush
...@@ -2185,8 +2193,8 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring, ...@@ -2185,8 +2193,8 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
* Post-Sync Operation field is a value of 1h or 3h." * Post-Sync Operation field is a value of 1h or 3h."
*/ */
if (invalidate & I915_GEM_GPU_DOMAINS) if (invalidate & I915_GEM_GPU_DOMAINS)
cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
intel_ring_emit(ring, cmd); intel_ring_emit(ring, cmd);
intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
if (INTEL_INFO(ring->dev)->gen >= 8) { if (INTEL_INFO(ring->dev)->gen >= 8) {
...@@ -2282,6 +2290,14 @@ static int gen6_ring_flush(struct intel_engine_cs *ring, ...@@ -2282,6 +2290,14 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
cmd = MI_FLUSH_DW; cmd = MI_FLUSH_DW;
if (INTEL_INFO(ring->dev)->gen >= 8) if (INTEL_INFO(ring->dev)->gen >= 8)
cmd += 1; cmd += 1;
/* We always require a command barrier so that subsequent
* commands, such as breadcrumb interrupts, are strictly ordered
* wrt the contents of the write cache being flushed to memory
* (and thus being coherent from the CPU).
*/
cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
/* /*
* Bspec vol 1c.3 - blitter engine command streamer: * Bspec vol 1c.3 - blitter engine command streamer:
* "If ENABLED, all TLBs will be invalidated once the flush * "If ENABLED, all TLBs will be invalidated once the flush
...@@ -2289,8 +2305,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring, ...@@ -2289,8 +2305,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
* Post-Sync Operation field is a value of 1h or 3h." * Post-Sync Operation field is a value of 1h or 3h."
*/ */
if (invalidate & I915_GEM_DOMAIN_RENDER) if (invalidate & I915_GEM_DOMAIN_RENDER)
cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | cmd |= MI_INVALIDATE_TLB;
MI_FLUSH_DW_OP_STOREDW;
intel_ring_emit(ring, cmd); intel_ring_emit(ring, cmd);
intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
if (INTEL_INFO(ring->dev)->gen >= 8) { if (INTEL_INFO(ring->dev)->gen >= 8) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment