Commit 1cf0ba14 authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter

drm/i915: Flush request queue when waiting for ring space

During the review of

commit 1f70999f
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date:   Mon Jan 27 22:43:07 2014 +0000

    drm/i915: Prevent recursion by retiring requests when the ring is full

Ville raised the point that our interaction with request->tail was
likely to foul up other uses elsewhere (such as hang check comparing
ACTHD against requests).

However, we also need to restore the implicit retire requests that certain
test cases depend upon (e.g. igt/gem_exec_lut_handle), this raises the
spectre that the ppgtt will randomly call i915_gpu_idle() and recurse
back into intel_ring_begin().
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=78023Reviewed-by: default avatarBrad Volkin <bradley.d.volkin@intel.com>
[danvet: Remove now unused 'tail' variable as spotted by Brad.]
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent dcfe0506
...@@ -2215,6 +2215,7 @@ struct drm_i915_gem_request * ...@@ -2215,6 +2215,7 @@ struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_ring_buffer *ring); i915_gem_find_active_request(struct intel_ring_buffer *ring);
bool i915_gem_retire_requests(struct drm_device *dev); bool i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible); bool interruptible);
static inline bool i915_reset_in_progress(struct i915_gpu_error *error) static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
......
...@@ -64,7 +64,6 @@ static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker, ...@@ -64,7 +64,6 @@ static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target); static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
static void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
static bool cpu_cache_is_coherent(struct drm_device *dev, static bool cpu_cache_is_coherent(struct drm_device *dev,
enum i915_cache_level level) enum i915_cache_level level)
...@@ -2448,7 +2447,7 @@ void i915_gem_reset(struct drm_device *dev) ...@@ -2448,7 +2447,7 @@ void i915_gem_reset(struct drm_device *dev)
/** /**
* This function clears the request list as sequence numbers are passed. * This function clears the request list as sequence numbers are passed.
*/ */
static void void
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
{ {
uint32_t seqno; uint32_t seqno;
......
...@@ -40,14 +40,19 @@ ...@@ -40,14 +40,19 @@
*/ */
#define CACHELINE_BYTES 64 #define CACHELINE_BYTES 64
static inline int ring_space(struct intel_ring_buffer *ring) static inline int __ring_space(int head, int tail, int size)
{ {
int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE); int space = head - (tail + I915_RING_FREE_SPACE);
if (space < 0) if (space < 0)
space += ring->size; space += size;
return space; return space;
} }
static inline int ring_space(struct intel_ring_buffer *ring)
{
return __ring_space(ring->head & HEAD_ADDR, ring->tail, ring->size);
}
static bool intel_ring_stopped(struct intel_ring_buffer *ring) static bool intel_ring_stopped(struct intel_ring_buffer *ring)
{ {
struct drm_i915_private *dev_priv = ring->dev->dev_private; struct drm_i915_private *dev_priv = ring->dev->dev_private;
...@@ -1482,7 +1487,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) ...@@ -1482,7 +1487,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
{ {
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
u32 seqno = 0, tail; u32 seqno = 0;
int ret; int ret;
if (ring->last_retired_head != -1) { if (ring->last_retired_head != -1) {
...@@ -1495,26 +1500,10 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) ...@@ -1495,26 +1500,10 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
} }
list_for_each_entry(request, &ring->request_list, list) { list_for_each_entry(request, &ring->request_list, list) {
int space; if (__ring_space(request->tail, ring->tail, ring->size) >= n) {
if (request->tail == -1)
continue;
space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
if (space < 0)
space += ring->size;
if (space >= n) {
seqno = request->seqno; seqno = request->seqno;
tail = request->tail;
break; break;
} }
/* Consume this request in case we need more space than
* is available and so need to prevent a race between
* updating last_retired_head and direct reads of
* I915_RING_HEAD. It also provides a nice sanity check.
*/
request->tail = -1;
} }
if (seqno == 0) if (seqno == 0)
...@@ -1524,11 +1513,11 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) ...@@ -1524,11 +1513,11 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
if (ret) if (ret)
return ret; return ret;
ring->head = tail; i915_gem_retire_requests_ring(ring);
ring->space = ring_space(ring); ring->head = ring->last_retired_head;
if (WARN_ON(ring->space < n)) ring->last_retired_head = -1;
return -ENOSPC;
ring->space = ring_space(ring);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment