Commit 5b4a60c2 authored by John Harrison's avatar John Harrison Committed by Daniel Vetter

drm/i915: Add flag to i915_add_request() to skip the cache flush

In order to explcitly track all GPU work (and completely remove the outstanding
lazy request), it is necessary to add extra i915_add_request() calls to various
places. Some of these do not need the implicit cache flush done as part of the
standard batch buffer submission process.

This patch adds a flag to _add_request() to specify whether the flush is
required or not.

For: VIZ-5115
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Reviewed-by: default avatarTomas Elf <tomas.elf@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 8a8edb59
...@@ -2890,9 +2890,12 @@ int __must_check i915_gpu_idle(struct drm_device *dev); ...@@ -2890,9 +2890,12 @@ int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_suspend(struct drm_device *dev); int __must_check i915_gem_suspend(struct drm_device *dev);
void __i915_add_request(struct intel_engine_cs *ring, void __i915_add_request(struct intel_engine_cs *ring,
struct drm_file *file, struct drm_file *file,
struct drm_i915_gem_object *batch_obj); struct drm_i915_gem_object *batch_obj,
bool flush_caches);
#define i915_add_request(ring) \ #define i915_add_request(ring) \
__i915_add_request(ring, NULL, NULL) __i915_add_request(ring, NULL, NULL, true)
#define i915_add_request_no_flush(ring) \
__i915_add_request(ring, NULL, NULL, false)
int __i915_wait_request(struct drm_i915_gem_request *req, int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned reset_counter, unsigned reset_counter,
bool interruptible, bool interruptible,
......
...@@ -2470,7 +2470,8 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) ...@@ -2470,7 +2470,8 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
*/ */
void __i915_add_request(struct intel_engine_cs *ring, void __i915_add_request(struct intel_engine_cs *ring,
struct drm_file *file, struct drm_file *file,
struct drm_i915_gem_object *obj) struct drm_i915_gem_object *obj,
bool flush_caches)
{ {
struct drm_i915_private *dev_priv = ring->dev->dev_private; struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
...@@ -2502,12 +2503,14 @@ void __i915_add_request(struct intel_engine_cs *ring, ...@@ -2502,12 +2503,14 @@ void __i915_add_request(struct intel_engine_cs *ring,
* is that the flush _must_ happen before the next request, no matter * is that the flush _must_ happen before the next request, no matter
* what. * what.
*/ */
if (i915.enable_execlists) if (flush_caches) {
ret = logical_ring_flush_all_caches(ringbuf, request->ctx); if (i915.enable_execlists)
else ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
ret = intel_ring_flush_all_caches(ring); else
/* Not allowed to fail! */ ret = intel_ring_flush_all_caches(ring);
WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret); /* Not allowed to fail! */
WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
}
/* Record the position of the start of the request so that /* Record the position of the start of the request so that
* should we detect the updated seqno part-way through the * should we detect the updated seqno part-way through the
......
...@@ -1066,7 +1066,7 @@ i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params) ...@@ -1066,7 +1066,7 @@ i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
params->ring->gpu_caches_dirty = true; params->ring->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */ /* Add a breadcrumb for the completion of the batch buffer */
__i915_add_request(params->ring, params->file, params->batch_obj); __i915_add_request(params->ring, params->file, params->batch_obj, true);
} }
static int static int
......
...@@ -173,7 +173,7 @@ int i915_gem_render_state_init(struct intel_engine_cs *ring) ...@@ -173,7 +173,7 @@ int i915_gem_render_state_init(struct intel_engine_cs *ring)
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring); i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
__i915_add_request(ring, NULL, so.obj); __i915_add_request(ring, NULL, so.obj, true);
/* __i915_add_request moves object to inactive if it fails */ /* __i915_add_request moves object to inactive if it fails */
out: out:
i915_gem_render_state_fini(&so); i915_gem_render_state_fini(&so);
......
...@@ -1599,7 +1599,7 @@ static int intel_lr_context_render_state_init(struct intel_engine_cs *ring, ...@@ -1599,7 +1599,7 @@ static int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring); i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
__i915_add_request(ring, file, so.obj); __i915_add_request(ring, file, so.obj, true);
/* intel_logical_ring_add_request moves object to inactive if it /* intel_logical_ring_add_request moves object to inactive if it
* fails */ * fails */
out: out:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment