Commit d86b18a0 authored by Jon Bloomfield's avatar Jon Bloomfield Committed by Jani Nikula

drm/i915: Serialize GTT/Aperture accesses on BXT

BXT has a H/W issue with IOMMU which can lead to system hangs when
Aperture accesses are queued within the GAM behind GTT Accesses.

This patch avoids the condition by wrapping all GTT updates in stop_machine
and using a flushing read prior to restarting the machine.

The stop_machine guarantees no new Aperture accesses can begin while
the PTE writes are being emmitted. The flushing read ensures that
any following Aperture accesses cannot begin until the PTE writes
have been cleared out of the GAM's fifo.

Only FOLLOWING Aperture accesses need to be separated from in flight
PTE updates. PTE Writes may follow tightly behind already in flight
Aperture accesses, so no flushing read is required at the start of
a PTE update sequence.

This issue was reproduced by running
	igt/gem_readwrite and
	igt/gem_render_copy
simultaneously from different processes, each in a tight loop,
with INTEL_IOMMU enabled.

This patch was originally published as:
	drm/i915: Serialize GTT Updates on BXT

[Note: This will cause a performance penalty for some use cases, but
avoiding hangs trumps performance hits. This may need to be worked
around in Mesa to recover the lost performance.]

v2: Move bxt/iommu detection into static function
    Remove #ifdef CONFIG_INTEL_IOMMU protection
    Make function names more reflective of purpose
    Move flushing read into static function

v3: Tidy up for checkpatch.pl

Testcase: igt/gem_concurrent_blit
Signed-off-by: default avatarJon Bloomfield <jon.bloomfield@intel.com>
Cc: John Harrison <john.C.Harrison@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: stable@vger.kernel.org
Link: http://patchwork.freedesktop.org/patch/msgid/1495641251-30022-1-git-send-email-jon.bloomfield@intel.comReviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
(cherry picked from commit 0ef34ad6)
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
parent 3c2993b8
......@@ -2991,6 +2991,16 @@ static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
return false;
}
static inline bool
intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
{
#ifdef CONFIG_INTEL_IOMMU
if (IS_BROXTON(dev_priv) && intel_iommu_gfx_mapped)
return true;
#endif
return false;
}
int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
int enable_ppgtt);
......
......@@ -2191,6 +2191,101 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
gen8_set_pte(&gtt_base[i], scratch_pte);
}
static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
{
struct drm_i915_private *dev_priv = vm->i915;
/*
* Make sure the internal GAM fifo has been cleared of all GTT
* writes before exiting stop_machine(). This guarantees that
* any aperture accesses waiting to start in another process
* cannot back up behind the GTT writes causing a hang.
* The register can be any arbitrary GAM register.
*/
POSTING_READ(GFX_FLSH_CNTL_GEN6);
}
struct insert_page {
struct i915_address_space *vm;
dma_addr_t addr;
u64 offset;
enum i915_cache_level level;
};
static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
{
struct insert_page *arg = _arg;
gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
bxt_vtd_ggtt_wa(arg->vm);
return 0;
}
static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
enum i915_cache_level level,
u32 unused)
{
struct insert_page arg = { vm, addr, offset, level };
stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
}
struct insert_entries {
struct i915_address_space *vm;
struct sg_table *st;
u64 start;
enum i915_cache_level level;
};
static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
{
struct insert_entries *arg = _arg;
gen8_ggtt_insert_entries(arg->vm, arg->st, arg->start, arg->level, 0);
bxt_vtd_ggtt_wa(arg->vm);
return 0;
}
static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
struct sg_table *st,
u64 start,
enum i915_cache_level level,
u32 unused)
{
struct insert_entries arg = { vm, st, start, level };
stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
}
struct clear_range {
struct i915_address_space *vm;
u64 start;
u64 length;
};
static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
{
struct clear_range *arg = _arg;
gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
bxt_vtd_ggtt_wa(arg->vm);
return 0;
}
static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
u64 start,
u64 length)
{
struct clear_range arg = { vm, start, length };
stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
}
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
u64 start, u64 length)
{
......@@ -2785,6 +2880,14 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->base.insert_entries = gen8_ggtt_insert_entries;
/* Serialize GTT updates with aperture access on BXT if VT-d is on. */
if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
ggtt->base.insert_page = bxt_vtd_ggtt_insert_page__BKL;
if (ggtt->base.clear_range != nop_clear_range)
ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
}
ggtt->invalidate = gen6_ggtt_invalidate;
return ggtt_probe_common(ggtt, size);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment