Commit 3bb73aba authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter

drm/i915: Allow late allocation of request for i915_add_request()

Request preallocation was added to i915_add_request() in order to
support the overlay. However, not all users care and can quite happily
ignore the failure to allocate the request as they will simply repeat
the request in the future.

By pushing the allocation down into i915_add_request(), we can then
remove some rather ugly error handling in the callers.

v2: Nullify request->file_priv otherwise we chase a garbage pointer
when retiring requests.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 540a8950
...@@ -1358,9 +1358,9 @@ void i915_gem_init_ppgtt(struct drm_device *dev); ...@@ -1358,9 +1358,9 @@ void i915_gem_init_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev); void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev); int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_idle(struct drm_device *dev); int __must_check i915_gem_idle(struct drm_device *dev);
int __must_check i915_add_request(struct intel_ring_buffer *ring, int i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file, struct drm_file *file,
struct drm_i915_gem_request *request); struct drm_i915_gem_request *request);
int __must_check i915_wait_seqno(struct intel_ring_buffer *ring, int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
uint32_t seqno); uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
......
...@@ -1597,7 +1597,12 @@ i915_add_request(struct intel_ring_buffer *ring, ...@@ -1597,7 +1597,12 @@ i915_add_request(struct intel_ring_buffer *ring,
ring->gpu_caches_dirty = false; ring->gpu_caches_dirty = false;
} }
BUG_ON(request == NULL); if (request == NULL) {
request = kmalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL)
return -ENOMEM;
}
seqno = i915_gem_next_request_seqno(ring); seqno = i915_gem_next_request_seqno(ring);
/* Record the position of the start of the request so that /* Record the position of the start of the request so that
...@@ -1608,8 +1613,10 @@ i915_add_request(struct intel_ring_buffer *ring, ...@@ -1608,8 +1613,10 @@ i915_add_request(struct intel_ring_buffer *ring,
request_ring_position = intel_ring_get_tail(ring); request_ring_position = intel_ring_get_tail(ring);
ret = ring->add_request(ring, &seqno); ret = ring->add_request(ring, &seqno);
if (ret) if (ret) {
return ret; kfree(request);
return ret;
}
trace_i915_gem_request_add(ring, seqno); trace_i915_gem_request_add(ring, seqno);
...@@ -1619,6 +1626,7 @@ i915_add_request(struct intel_ring_buffer *ring, ...@@ -1619,6 +1626,7 @@ i915_add_request(struct intel_ring_buffer *ring,
request->emitted_jiffies = jiffies; request->emitted_jiffies = jiffies;
was_empty = list_empty(&ring->request_list); was_empty = list_empty(&ring->request_list);
list_add_tail(&request->list, &ring->request_list); list_add_tail(&request->list, &ring->request_list);
request->file_priv = NULL;
if (file) { if (file) {
struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_file_private *file_priv = file->driver_priv;
...@@ -1859,14 +1867,8 @@ i915_gem_retire_work_handler(struct work_struct *work) ...@@ -1859,14 +1867,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
*/ */
idle = true; idle = true;
for_each_ring(ring, dev_priv, i) { for_each_ring(ring, dev_priv, i) {
if (ring->gpu_caches_dirty) { if (ring->gpu_caches_dirty)
struct drm_i915_gem_request *request; i915_add_request(ring, NULL, NULL);
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL ||
i915_add_request(ring, NULL, request))
kfree(request);
}
idle &= list_empty(&ring->request_list); idle &= list_empty(&ring->request_list);
} }
...@@ -1913,25 +1915,13 @@ i915_gem_check_wedge(struct drm_i915_private *dev_priv, ...@@ -1913,25 +1915,13 @@ i915_gem_check_wedge(struct drm_i915_private *dev_priv,
static int static int
i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno) i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
{ {
int ret = 0; int ret;
BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex)); BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
if (seqno == ring->outstanding_lazy_request) { ret = 0;
struct drm_i915_gem_request *request; if (seqno == ring->outstanding_lazy_request)
ret = i915_add_request(ring, NULL, NULL);
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL)
return -ENOMEM;
ret = i915_add_request(ring, NULL, request);
if (ret) {
kfree(request);
return ret;
}
BUG_ON(seqno != request->seqno);
}
return ret; return ret;
} }
......
...@@ -972,16 +972,11 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev, ...@@ -972,16 +972,11 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct drm_file *file, struct drm_file *file,
struct intel_ring_buffer *ring) struct intel_ring_buffer *ring)
{ {
struct drm_i915_gem_request *request;
/* Unconditionally force add_request to emit a full flush. */ /* Unconditionally force add_request to emit a full flush. */
ring->gpu_caches_dirty = true; ring->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */ /* Add a breadcrumb for the completion of the batch buffer */
request = kzalloc(sizeof(*request), GFP_KERNEL); (void)i915_add_request(ring, file, NULL);
if (request == NULL || i915_add_request(ring, file, request)) {
kfree(request);
}
} }
static int static int
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment