Commit 88241785 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Propagate error from flushing the ring

... in order to avoid a BUG() and potential unbounded waits.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
parent 776ad806
...@@ -1086,10 +1086,10 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, ...@@ -1086,10 +1086,10 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev); void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj); int i915_gem_init_object(struct drm_gem_object *obj);
void i915_gem_flush_ring(struct drm_device *dev, int __must_check i915_gem_flush_ring(struct drm_device *dev,
struct intel_ring_buffer *ring, struct intel_ring_buffer *ring,
uint32_t invalidate_domains, uint32_t invalidate_domains,
uint32_t flush_domains); uint32_t flush_domains);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size); size_t size);
void i915_gem_free_object(struct drm_gem_object *obj); void i915_gem_free_object(struct drm_gem_object *obj);
......
...@@ -35,18 +35,18 @@ ...@@ -35,18 +35,18 @@
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/pci.h> #include <linux/pci.h>
static void i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
bool write); bool write);
static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
uint64_t offset, uint64_t offset,
uint64_t size); uint64_t size);
static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
unsigned alignment, unsigned alignment,
bool map_and_fenceable); bool map_and_fenceable);
static void i915_gem_clear_fence_reg(struct drm_device *dev, static void i915_gem_clear_fence_reg(struct drm_device *dev,
struct drm_i915_fence_reg *reg); struct drm_i915_fence_reg *reg);
static int i915_gem_phys_pwrite(struct drm_device *dev, static int i915_gem_phys_pwrite(struct drm_device *dev,
...@@ -2142,25 +2142,37 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) ...@@ -2142,25 +2142,37 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
return ret; return ret;
} }
void int
i915_gem_flush_ring(struct drm_device *dev, i915_gem_flush_ring(struct drm_device *dev,
struct intel_ring_buffer *ring, struct intel_ring_buffer *ring,
uint32_t invalidate_domains, uint32_t invalidate_domains,
uint32_t flush_domains) uint32_t flush_domains)
{ {
if (ring->flush(ring, invalidate_domains, flush_domains) == 0) int ret;
i915_gem_process_flushing_list(dev, flush_domains, ring);
ret = ring->flush(ring, invalidate_domains, flush_domains);
if (ret)
return ret;
i915_gem_process_flushing_list(dev, flush_domains, ring);
return 0;
} }
static int i915_ring_idle(struct drm_device *dev, static int i915_ring_idle(struct drm_device *dev,
struct intel_ring_buffer *ring) struct intel_ring_buffer *ring)
{ {
int ret;
if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
return 0; return 0;
if (!list_empty(&ring->gpu_write_list)) if (!list_empty(&ring->gpu_write_list)) {
i915_gem_flush_ring(dev, ring, ret = i915_gem_flush_ring(dev, ring,
I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
}
return i915_wait_request(dev, return i915_wait_request(dev,
i915_gem_next_request_seqno(dev, ring), i915_gem_next_request_seqno(dev, ring),
ring); ring);
...@@ -2370,10 +2382,13 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, ...@@ -2370,10 +2382,13 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
int ret; int ret;
if (obj->fenced_gpu_access) { if (obj->fenced_gpu_access) {
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
i915_gem_flush_ring(obj->base.dev, ret = i915_gem_flush_ring(obj->base.dev,
obj->last_fenced_ring, obj->last_fenced_ring,
0, obj->base.write_domain); 0, obj->base.write_domain);
if (ret)
return ret;
}
obj->fenced_gpu_access = false; obj->fenced_gpu_access = false;
} }
...@@ -2529,9 +2544,12 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, ...@@ -2529,9 +2544,12 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
return ret; return ret;
} else if (obj->tiling_changed) { } else if (obj->tiling_changed) {
if (obj->fenced_gpu_access) { if (obj->fenced_gpu_access) {
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
i915_gem_flush_ring(obj->base.dev, obj->ring, ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
0, obj->base.write_domain); 0, obj->base.write_domain);
if (ret)
return ret;
}
obj->fenced_gpu_access = false; obj->fenced_gpu_access = false;
} }
...@@ -2817,17 +2835,16 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj) ...@@ -2817,17 +2835,16 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
} }
/** Flushes any GPU write domain for the object if it's dirty. */ /** Flushes any GPU write domain for the object if it's dirty. */
static void static int
i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj) i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
return; return 0;
/* Queue the GPU write cache flushing we need. */ /* Queue the GPU write cache flushing we need. */
i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
BUG_ON(obj->base.write_domain);
} }
/** Flushes the GTT write domain for the object if it's dirty. */ /** Flushes the GTT write domain for the object if it's dirty. */
...@@ -2894,7 +2911,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) ...@@ -2894,7 +2911,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (obj->gtt_space == NULL) if (obj->gtt_space == NULL)
return -EINVAL; return -EINVAL;
i915_gem_object_flush_gpu_write_domain(obj); ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
if (obj->pending_gpu_write || write) { if (obj->pending_gpu_write || write) {
ret = i915_gem_object_wait_rendering(obj, true); ret = i915_gem_object_wait_rendering(obj, true);
if (ret) if (ret)
...@@ -2939,7 +2959,10 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, ...@@ -2939,7 +2959,10 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
if (obj->gtt_space == NULL) if (obj->gtt_space == NULL)
return -EINVAL; return -EINVAL;
i915_gem_object_flush_gpu_write_domain(obj); ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
/* Currently, we are always called from an non-interruptible context. */ /* Currently, we are always called from an non-interruptible context. */
if (pipelined != obj->ring) { if (pipelined != obj->ring) {
...@@ -2964,12 +2987,17 @@ int ...@@ -2964,12 +2987,17 @@ int
i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
bool interruptible) bool interruptible)
{ {
int ret;
if (!obj->active) if (!obj->active)
return 0; return 0;
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
i915_gem_flush_ring(obj->base.dev, obj->ring, ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
0, obj->base.write_domain); 0, obj->base.write_domain);
if (ret)
return ret;
}
return i915_gem_object_wait_rendering(obj, interruptible); return i915_gem_object_wait_rendering(obj, interruptible);
} }
...@@ -2986,7 +3014,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) ...@@ -2986,7 +3014,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
uint32_t old_write_domain, old_read_domains; uint32_t old_write_domain, old_read_domains;
int ret; int ret;
i915_gem_object_flush_gpu_write_domain(obj); ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
ret = i915_gem_object_wait_rendering(obj, true); ret = i915_gem_object_wait_rendering(obj, true);
if (ret) if (ret)
return ret; return ret;
...@@ -3081,7 +3112,10 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, ...@@ -3081,7 +3112,10 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
if (offset == 0 && size == obj->base.size) if (offset == 0 && size == obj->base.size)
return i915_gem_object_set_to_cpu_domain(obj, 0); return i915_gem_object_set_to_cpu_domain(obj, 0);
i915_gem_object_flush_gpu_write_domain(obj); ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
ret = i915_gem_object_wait_rendering(obj, true); ret = i915_gem_object_wait_rendering(obj, true);
if (ret) if (ret)
return ret; return ret;
...@@ -3374,8 +3408,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, ...@@ -3374,8 +3408,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* flush earlier is beneficial. * flush earlier is beneficial.
*/ */
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
i915_gem_flush_ring(dev, obj->ring, ret = i915_gem_flush_ring(dev, obj->ring,
0, obj->base.write_domain); 0, obj->base.write_domain);
} else if (obj->ring->outstanding_lazy_request == } else if (obj->ring->outstanding_lazy_request ==
obj->last_rendering_seqno) { obj->last_rendering_seqno) {
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
......
...@@ -713,14 +713,14 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, ...@@ -713,14 +713,14 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
return ret; return ret;
} }
static void static int
i915_gem_execbuffer_flush(struct drm_device *dev, i915_gem_execbuffer_flush(struct drm_device *dev,
uint32_t invalidate_domains, uint32_t invalidate_domains,
uint32_t flush_domains, uint32_t flush_domains,
uint32_t flush_rings) uint32_t flush_rings)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
int i; int i, ret;
if (flush_domains & I915_GEM_DOMAIN_CPU) if (flush_domains & I915_GEM_DOMAIN_CPU)
intel_gtt_chipset_flush(); intel_gtt_chipset_flush();
...@@ -730,11 +730,17 @@ i915_gem_execbuffer_flush(struct drm_device *dev, ...@@ -730,11 +730,17 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
for (i = 0; i < I915_NUM_RINGS; i++) for (i = 0; i < I915_NUM_RINGS; i++)
if (flush_rings & (1 << i)) if (flush_rings & (1 << i)) {
i915_gem_flush_ring(dev, &dev_priv->ring[i], ret = i915_gem_flush_ring(dev,
invalidate_domains, &dev_priv->ring[i],
flush_domains); invalidate_domains,
flush_domains);
if (ret)
return ret;
}
} }
return 0;
} }
static int static int
...@@ -798,10 +804,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, ...@@ -798,10 +804,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
cd.invalidate_domains, cd.invalidate_domains,
cd.flush_domains); cd.flush_domains);
#endif #endif
i915_gem_execbuffer_flush(ring->dev, ret = i915_gem_execbuffer_flush(ring->dev,
cd.invalidate_domains, cd.invalidate_domains,
cd.flush_domains, cd.flush_domains,
cd.flush_rings); cd.flush_rings);
if (ret)
return ret;
} }
list_for_each_entry(obj, objects, exec_list) { list_for_each_entry(obj, objects, exec_list) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment