Commit e642c85b authored by Chris Wilson's avatar Chris Wilson

drm/i915: Remove superfluous i915_add_request_no_flush() helper

The only time we need to emit a flush inside request emission is after
an execbuffer, for which we can use the full __i915_add_request(). All
other instances want the simpler i915_add_request() without flushing, so
remove the useless helper.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170317114709.8388-1-chris@chris-wilson.co.uk
parent e3b1895f
...@@ -212,7 +212,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) ...@@ -212,7 +212,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
workload->status = ret; workload->status = ret;
if (!IS_ERR_OR_NULL(rq)) if (!IS_ERR_OR_NULL(rq))
i915_add_request_no_flush(rq); i915_add_request(rq);
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
return ret; return ret;
} }
......
...@@ -933,7 +933,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv) ...@@ -933,7 +933,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
} }
ret = i915_switch_context(req); ret = i915_switch_context(req);
i915_add_request_no_flush(req); i915_add_request(req);
if (ret) if (ret)
return ret; return ret;
} }
......
...@@ -267,8 +267,6 @@ int i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req, ...@@ -267,8 +267,6 @@ int i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches); void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
#define i915_add_request(req) \ #define i915_add_request(req) \
__i915_add_request(req, true)
#define i915_add_request_no_flush(req) \
__i915_add_request(req, false) __i915_add_request(req, false)
void __i915_gem_request_submit(struct drm_i915_gem_request *request); void __i915_gem_request_submit(struct drm_i915_gem_request *request);
......
...@@ -10668,7 +10668,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -10668,7 +10668,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
intel_mark_page_flip_active(intel_crtc, work); intel_mark_page_flip_active(intel_crtc, work);
work->flip_queued_req = i915_gem_request_get(request); work->flip_queued_req = i915_gem_request_get(request);
i915_add_request_no_flush(request); i915_add_request(request);
} }
i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY); i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
...@@ -10684,7 +10684,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -10684,7 +10684,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return 0; return 0;
cleanup_request: cleanup_request:
i915_add_request_no_flush(request); i915_add_request(request);
cleanup_unpin: cleanup_unpin:
to_intel_plane_state(primary->state)->vma = work->old_vma; to_intel_plane_state(primary->state)->vma = work->old_vma;
intel_unpin_fb_vma(vma); intel_unpin_fb_vma(vma);
......
...@@ -278,7 +278,7 @@ static int intel_overlay_on(struct intel_overlay *overlay) ...@@ -278,7 +278,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
cs = intel_ring_begin(req, 4); cs = intel_ring_begin(req, 4);
if (IS_ERR(cs)) { if (IS_ERR(cs)) {
i915_add_request_no_flush(req); i915_add_request(req);
return PTR_ERR(cs); return PTR_ERR(cs);
} }
...@@ -343,7 +343,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay, ...@@ -343,7 +343,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
cs = intel_ring_begin(req, 2); cs = intel_ring_begin(req, 2);
if (IS_ERR(cs)) { if (IS_ERR(cs)) {
i915_add_request_no_flush(req); i915_add_request(req);
return PTR_ERR(cs); return PTR_ERR(cs);
} }
...@@ -419,7 +419,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) ...@@ -419,7 +419,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
cs = intel_ring_begin(req, 6); cs = intel_ring_begin(req, 6);
if (IS_ERR(cs)) { if (IS_ERR(cs)) {
i915_add_request_no_flush(req); i915_add_request(req);
return PTR_ERR(cs); return PTR_ERR(cs);
} }
...@@ -477,7 +477,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) ...@@ -477,7 +477,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
cs = intel_ring_begin(req, 2); cs = intel_ring_begin(req, 2);
if (IS_ERR(cs)) { if (IS_ERR(cs)) {
i915_add_request_no_flush(req); i915_add_request(req);
return PTR_ERR(cs); return PTR_ERR(cs);
} }
......
...@@ -7086,7 +7086,7 @@ static void __intel_autoenable_gt_powersave(struct work_struct *work) ...@@ -7086,7 +7086,7 @@ static void __intel_autoenable_gt_powersave(struct work_struct *work)
rcs->init_context(req); rcs->init_context(req);
/* Mark the device busy, calling intel_enable_gt_powersave() */ /* Mark the device busy, calling intel_enable_gt_powersave() */
i915_add_request_no_flush(req); i915_add_request(req);
unlock: unlock:
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment