Commit b1ed35d9 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Revoke fenced GTT mmapings across GPU reset

The fence registers are clobbered by a GPU reset. If there is concurrent
user access to a fenced region via a GTT mmaping, the access will not be
fenced during the reset (until we restore the fences afterwards). In order
to prevent invalid access during the reset, before we clobber the fences
first we must invalidate the GTT mmapings. Access to the mmap will then
be forced to fault in the page, and in handling the fault, i915_gem_fault()
will take the struct_mutex and wait upon the reset to complete.

v2: Fix up commentary.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=99274
Testcase: igt/gem_mmap_gtt/hang
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/20170104145110.1486-1-chris@chris-wilson.co.ukReviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
parent fd7d6c5c
...@@ -1776,6 +1776,7 @@ void i915_reset(struct drm_i915_private *dev_priv) ...@@ -1776,6 +1776,7 @@ void i915_reset(struct drm_i915_private *dev_priv)
error->reset_count++; error->reset_count++;
pr_notice("drm/i915: Resetting chip after gpu hang\n"); pr_notice("drm/i915: Resetting chip after gpu hang\n");
i915_gem_reset_prepare(dev_priv);
disable_engines_irq(dev_priv); disable_engines_irq(dev_priv);
ret = intel_gpu_reset(dev_priv, ALL_ENGINES); ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
...@@ -1789,7 +1790,7 @@ void i915_reset(struct drm_i915_private *dev_priv) ...@@ -1789,7 +1790,7 @@ void i915_reset(struct drm_i915_private *dev_priv)
goto error; goto error;
} }
i915_gem_reset(dev_priv); i915_gem_reset_finish(dev_priv);
intel_overlay_reset(dev_priv); intel_overlay_reset(dev_priv);
/* Ok, now get things going again... */ /* Ok, now get things going again... */
......
...@@ -3334,7 +3334,8 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error) ...@@ -3334,7 +3334,8 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
return READ_ONCE(error->reset_count); return READ_ONCE(error->reset_count);
} }
void i915_gem_reset(struct drm_i915_private *dev_priv); void i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
void i915_gem_set_wedged(struct drm_i915_private *dev_priv); void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv); int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
...@@ -3418,6 +3419,7 @@ i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o, ...@@ -3418,6 +3419,7 @@ i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
int __must_check i915_vma_get_fence(struct i915_vma *vma); int __must_check i915_vma_get_fence(struct i915_vma *vma);
int __must_check i915_vma_put_fence(struct i915_vma *vma); int __must_check i915_vma_put_fence(struct i915_vma *vma);
void i915_gem_revoke_fences(struct drm_i915_private *dev_priv);
void i915_gem_restore_fences(struct drm_i915_private *dev_priv); void i915_gem_restore_fences(struct drm_i915_private *dev_priv);
void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv); void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv);
......
...@@ -2726,6 +2726,11 @@ static void reset_request(struct drm_i915_gem_request *request) ...@@ -2726,6 +2726,11 @@ static void reset_request(struct drm_i915_gem_request *request)
memset(vaddr + head, 0, request->postfix - head); memset(vaddr + head, 0, request->postfix - head);
} }
void i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
{
i915_gem_revoke_fences(dev_priv);
}
static void i915_gem_reset_engine(struct intel_engine_cs *engine) static void i915_gem_reset_engine(struct intel_engine_cs *engine)
{ {
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
...@@ -2791,7 +2796,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine) ...@@ -2791,7 +2796,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
spin_unlock_irqrestore(&engine->timeline->lock, flags); spin_unlock_irqrestore(&engine->timeline->lock, flags);
} }
void i915_gem_reset(struct drm_i915_private *dev_priv) void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
{ {
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id; enum intel_engine_id id;
......
...@@ -366,6 +366,30 @@ i915_vma_get_fence(struct i915_vma *vma) ...@@ -366,6 +366,30 @@ i915_vma_get_fence(struct i915_vma *vma)
return fence_update(fence, set); return fence_update(fence, set);
} }
/**
* i915_gem_revoke_fences - revoke fence state
* @dev_priv: i915 device private
*
* Removes all GTT mmappings via the fence registers. This forces any user
* of the fence to reacquire that fence before continuing with their access.
* One use is during GPU reset where the fence register is lost and we need to
* revoke concurrent userspace access via GTT mmaps until the hardware has been
* reset and the fence registers have been restored.
*/
void i915_gem_revoke_fences(struct drm_i915_private *dev_priv)
{
int i;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
if (fence->vma)
i915_gem_release_mmap(fence->vma->obj);
}
}
/** /**
* i915_gem_restore_fences - restore fence state * i915_gem_restore_fences - restore fence state
* @dev_priv: i915 device private * @dev_priv: i915 device private
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment