Commit 7da844c5 authored by Chris Wilson's avatar Chris Wilson
parent 0d9bdd88
...@@ -731,28 +731,3 @@ int __i915_wait_request(struct drm_i915_gem_request *req, ...@@ -731,28 +731,3 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
return ret; return ret;
} }
/**
* Waits for a request to be signaled, and cleans up the
* request and object lists appropriately for that event.
*/
int i915_wait_request(struct drm_i915_gem_request *req)
{
int ret;
lockdep_assert_held(&req->i915->drm.struct_mutex);
GEM_BUG_ON(list_empty(&req->link));
ret = __i915_wait_request(req,
req->i915->mm.interruptible,
NULL,
NULL);
if (ret)
return ret;
/* If the GPU hung, we want to keep the requests to find the guilty. */
if (!i915_reset_in_progress(&req->i915->gpu_error))
i915_gem_request_retire_upto(req);
return 0;
}
...@@ -220,10 +220,6 @@ int __i915_wait_request(struct drm_i915_gem_request *req, ...@@ -220,10 +220,6 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
struct intel_rps_client *rps) struct intel_rps_client *rps)
__attribute__((nonnull(1))); __attribute__((nonnull(1)));
int __must_check
i915_wait_request(struct drm_i915_gem_request *req)
__attribute__((nonnull));
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine); static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
/** /**
......
...@@ -2269,6 +2269,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes) ...@@ -2269,6 +2269,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
{ {
struct intel_ring *ring = req->ring; struct intel_ring *ring = req->ring;
struct drm_i915_gem_request *target; struct drm_i915_gem_request *target;
int ret;
intel_ring_update_space(ring); intel_ring_update_space(ring);
if (ring->space >= bytes) if (ring->space >= bytes)
...@@ -2298,7 +2299,18 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes) ...@@ -2298,7 +2299,18 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
if (WARN_ON(&target->ring_link == &ring->request_list)) if (WARN_ON(&target->ring_link == &ring->request_list))
return -ENOSPC; return -ENOSPC;
return i915_wait_request(target); ret = __i915_wait_request(target, true, NULL, NULL);
if (ret)
return ret;
if (i915_reset_in_progress(&target->i915->gpu_error))
return -EAGAIN;
i915_gem_request_retire_upto(target);
intel_ring_update_space(ring);
GEM_BUG_ON(ring->space < bytes);
return 0;
} }
int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
...@@ -2336,10 +2348,6 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) ...@@ -2336,10 +2348,6 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
int ret = wait_for_space(req, wait_bytes); int ret = wait_for_space(req, wait_bytes);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
intel_ring_update_space(ring);
if (unlikely(ring->space < wait_bytes))
return -EAGAIN;
} }
if (unlikely(need_wrap)) { if (unlikely(need_wrap)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment