Commit 21dd3734 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Defer reporting EIO until we try to use the GPU

Instead of reporting EIO upfront in the entrance of an ioctl that may or
may not attempt to use the GPU, defer the actual detection of an invalid
ioctl to when we issue a GPU instruction. This allows us to continue to
use bo in video memory (via pread/pwrite and mmap) after the GPU has hung.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
parent e110e8d6
...@@ -1052,7 +1052,6 @@ extern void i915_mem_takedown(struct mem_block **heap); ...@@ -1052,7 +1052,6 @@ extern void i915_mem_takedown(struct mem_block **heap);
extern void i915_mem_release(struct drm_device * dev, extern void i915_mem_release(struct drm_device * dev,
struct drm_file *file_priv, struct mem_block *heap); struct drm_file *file_priv, struct mem_block *heap);
/* i915_gem.c */ /* i915_gem.c */
int i915_gem_check_is_wedged(struct drm_device *dev);
int i915_gem_init_ioctl(struct drm_device *dev, void *data, int i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
int i915_gem_create_ioctl(struct drm_device *dev, void *data, int i915_gem_create_ioctl(struct drm_device *dev, void *data,
......
...@@ -75,8 +75,8 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, ...@@ -75,8 +75,8 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
dev_priv->mm.object_memory -= size; dev_priv->mm.object_memory -= size;
} }
int static int
i915_gem_check_is_wedged(struct drm_device *dev) i915_gem_wait_for_error(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct completion *x = &dev_priv->error_completion; struct completion *x = &dev_priv->error_completion;
...@@ -90,27 +90,24 @@ i915_gem_check_is_wedged(struct drm_device *dev) ...@@ -90,27 +90,24 @@ i915_gem_check_is_wedged(struct drm_device *dev)
if (ret) if (ret)
return ret; return ret;
/* Success, we reset the GPU! */ if (atomic_read(&dev_priv->mm.wedged)) {
if (!atomic_read(&dev_priv->mm.wedged)) /* GPU is hung, bump the completion count to account for
return 0; * the token we just consumed so that we never hit zero and
* end up waiting upon a subsequent completion event that
/* GPU is hung, bump the completion count to account for * will never happen.
* the token we just consumed so that we never hit zero and */
* end up waiting upon a subsequent completion event that spin_lock_irqsave(&x->wait.lock, flags);
* will never happen. x->done++;
*/ spin_unlock_irqrestore(&x->wait.lock, flags);
spin_lock_irqsave(&x->wait.lock, flags); }
x->done++; return 0;
spin_unlock_irqrestore(&x->wait.lock, flags);
return -EIO;
} }
int i915_mutex_lock_interruptible(struct drm_device *dev) int i915_mutex_lock_interruptible(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
int ret; int ret;
ret = i915_gem_check_is_wedged(dev); ret = i915_gem_wait_for_error(dev);
if (ret) if (ret)
return ret; return ret;
...@@ -118,11 +115,6 @@ int i915_mutex_lock_interruptible(struct drm_device *dev) ...@@ -118,11 +115,6 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
if (ret) if (ret)
return ret; return ret;
if (atomic_read(&dev_priv->mm.wedged)) {
mutex_unlock(&dev->struct_mutex);
return -EAGAIN;
}
WARN_ON(i915_verify_lists(dev)); WARN_ON(i915_verify_lists(dev));
return 0; return 0;
} }
......
...@@ -284,11 +284,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -284,11 +284,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_i915_gem_set_tiling *args = data; struct drm_i915_gem_set_tiling *args = data;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
int ret;
ret = i915_gem_check_is_wedged(dev);
if (ret)
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) if (obj == NULL)
......
...@@ -980,9 +980,13 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) ...@@ -980,9 +980,13 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
int intel_ring_begin(struct intel_ring_buffer *ring, int intel_ring_begin(struct intel_ring_buffer *ring,
int num_dwords) int num_dwords)
{ {
struct drm_i915_private *dev_priv = ring->dev->dev_private;
int n = 4*num_dwords; int n = 4*num_dwords;
int ret; int ret;
if (unlikely(atomic_read(&dev_priv->mm.wedged)))
return -EIO;
if (unlikely(ring->tail + n > ring->effective_size)) { if (unlikely(ring->tail + n > ring->effective_size)) {
ret = intel_wrap_ring_buffer(ring); ret = intel_wrap_ring_buffer(ring);
if (unlikely(ret)) if (unlikely(ret))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment