Commit 0340d9fd authored by Chris Wilson's avatar Chris Wilson

drm/i915: Remove request retirement before each batch

This reimplements the denial-of-service protection against igt from
commit 227f782e ("drm/i915: Retire requests before creating a new
one") and transfers the stall from before each batch into get_pages().
The issue is that the stall is increasing latency between batches which
is detrimental in some cases (especially coupled with execlists) to
keeping the GPU well fed. Also we have made the observation that retiring
requests can of itself free objects (and requests) and therefore makes
a good first step when shrinking.

v2: Recycle objects prior to i915_gem_object_get_pages()
v3: Remove the reference to the ring from i915_gem_requests_ring() as it
operates on an intel_engine_cs.
v4: Since commit 9b5f4e5e ("drm/i915: Retire oldest completed request
before allocating next") we no longer need the safeguard to retire
requests before get_pages(). We no longer see the huge latencies when
hitting the shrinker between allocations.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1470324762-2545-4-git-send-email-chris@chris-wilson.co.uk
parent 115003e9
...@@ -3169,7 +3169,6 @@ struct drm_i915_gem_request * ...@@ -3169,7 +3169,6 @@ struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *engine); i915_gem_find_active_request(struct intel_engine_cs *engine);
void i915_gem_retire_requests(struct drm_i915_private *dev_priv); void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
static inline u32 i915_reset_counter(struct i915_gpu_error *error) static inline u32 i915_reset_counter(struct i915_gpu_error *error)
{ {
......
...@@ -781,8 +781,6 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine, ...@@ -781,8 +781,6 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4; bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
int retry; int retry;
i915_gem_retire_requests_ring(engine);
vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm; vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
INIT_LIST_HEAD(&ordered_vmas); INIT_LIST_HEAD(&ordered_vmas);
......
...@@ -732,7 +732,7 @@ int i915_wait_request(struct drm_i915_gem_request *req, ...@@ -732,7 +732,7 @@ int i915_wait_request(struct drm_i915_gem_request *req,
return ret; return ret;
} }
void i915_gem_retire_requests_ring(struct intel_engine_cs *engine) static void engine_retire_requests(struct intel_engine_cs *engine)
{ {
struct drm_i915_gem_request *request, *next; struct drm_i915_gem_request *request, *next;
...@@ -756,7 +756,7 @@ void i915_gem_retire_requests(struct drm_i915_private *dev_priv) ...@@ -756,7 +756,7 @@ void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
GEM_BUG_ON(!dev_priv->gt.awake); GEM_BUG_ON(!dev_priv->gt.awake);
for_each_engine(engine, dev_priv) { for_each_engine(engine, dev_priv) {
i915_gem_retire_requests_ring(engine); engine_retire_requests(engine);
if (list_empty(&engine->request_list)) if (list_empty(&engine->request_list))
dev_priv->gt.active_engines &= ~intel_engine_flag(engine); dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment