Commit 0025c077 authored by Mika Kuoppala's avatar Mika Kuoppala Committed by Daniel Vetter

drm/i915: change i915_add_request to macro

Only execbuffer needed all the parameters on i915_add_request().
By putting __i915_add_request behind macro, all current callsites
become cleaner. Following patch will introduce a new parameter
for __i915_add_request. With this patch, only the relevant callsite
will reflect the change making commit smaller and easier to understand.

v2: _i915_add_request as function name (Chris Wilson)

v3: change name __i915_add_request and fix ordering of params (Ben Widawsky)
Signed-off-by: default avatarMika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Acked-by: default avatarBen Widawsky <ben@bwidawsk.net>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent c0bb617a
...@@ -1756,9 +1756,11 @@ void i915_gem_init_swizzling(struct drm_device *dev); ...@@ -1756,9 +1756,11 @@ void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev); void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev); int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_idle(struct drm_device *dev); int __must_check i915_gem_idle(struct drm_device *dev);
int i915_add_request(struct intel_ring_buffer *ring, int __i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file, struct drm_file *file,
u32 *seqno); u32 *seqno);
#define i915_add_request(ring, seqno) \
__i915_add_request(ring, NULL, seqno);
int __must_check i915_wait_seqno(struct intel_ring_buffer *ring, int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
uint32_t seqno); uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
......
...@@ -959,7 +959,7 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno) ...@@ -959,7 +959,7 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
ret = 0; ret = 0;
if (seqno == ring->outstanding_lazy_request) if (seqno == ring->outstanding_lazy_request)
ret = i915_add_request(ring, NULL, NULL); ret = i915_add_request(ring, NULL);
return ret; return ret;
} }
...@@ -2000,10 +2000,9 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) ...@@ -2000,10 +2000,9 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
return 0; return 0;
} }
int int __i915_add_request(struct intel_ring_buffer *ring,
i915_add_request(struct intel_ring_buffer *ring, struct drm_file *file,
struct drm_file *file, u32 *out_seqno)
u32 *out_seqno)
{ {
drm_i915_private_t *dev_priv = ring->dev->dev_private; drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
...@@ -2280,7 +2279,7 @@ i915_gem_retire_work_handler(struct work_struct *work) ...@@ -2280,7 +2279,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
idle = true; idle = true;
for_each_ring(ring, dev_priv, i) { for_each_ring(ring, dev_priv, i) {
if (ring->gpu_caches_dirty) if (ring->gpu_caches_dirty)
i915_add_request(ring, NULL, NULL); i915_add_request(ring, NULL);
idle &= list_empty(&ring->request_list); idle &= list_empty(&ring->request_list);
} }
......
...@@ -455,7 +455,7 @@ static int do_switch(struct i915_hw_context *to) ...@@ -455,7 +455,7 @@ static int do_switch(struct i915_hw_context *to)
from->obj->dirty = 1; from->obj->dirty = 1;
BUG_ON(from->obj->ring != ring); BUG_ON(from->obj->ring != ring);
ret = i915_add_request(ring, NULL, NULL); ret = i915_add_request(ring, NULL);
if (ret) { if (ret) {
/* Too late, we've already scheduled a context switch. /* Too late, we've already scheduled a context switch.
* Try to undo the change so that the hw state is * Try to undo the change so that the hw state is
......
...@@ -802,7 +802,7 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev, ...@@ -802,7 +802,7 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
ring->gpu_caches_dirty = true; ring->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */ /* Add a breadcrumb for the completion of the batch buffer */
(void)i915_add_request(ring, file, NULL); (void)__i915_add_request(ring, file, NULL);
} }
static int static int
......
...@@ -217,7 +217,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, ...@@ -217,7 +217,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
int ret; int ret;
BUG_ON(overlay->last_flip_req); BUG_ON(overlay->last_flip_req);
ret = i915_add_request(ring, NULL, &overlay->last_flip_req); ret = i915_add_request(ring, &overlay->last_flip_req);
if (ret) if (ret)
return ret; return ret;
...@@ -286,7 +286,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay, ...@@ -286,7 +286,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
intel_ring_emit(ring, flip_addr); intel_ring_emit(ring, flip_addr);
intel_ring_advance(ring); intel_ring_advance(ring);
return i915_add_request(ring, NULL, &overlay->last_flip_req); return i915_add_request(ring, &overlay->last_flip_req);
} }
static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
......
...@@ -1512,7 +1512,7 @@ int intel_ring_idle(struct intel_ring_buffer *ring) ...@@ -1512,7 +1512,7 @@ int intel_ring_idle(struct intel_ring_buffer *ring)
/* We need to add any requests required to flush the objects and ring */ /* We need to add any requests required to flush the objects and ring */
if (ring->outstanding_lazy_request) { if (ring->outstanding_lazy_request) {
ret = i915_add_request(ring, NULL, NULL); ret = i915_add_request(ring, NULL);
if (ret) if (ret)
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment