Commit ae70797d authored by John Harrison's avatar John Harrison Committed by Daniel Vetter

drm/i915: Update a bunch of LRC functions to take requests

A bunch of the low level LRC functions were passing around ringbuf and ctx
pairs. In a few cases, they took the r/c pair and a request as well. This is all
quite messy and unnecesary. The context_queue() call is especially bad since the
fake request code got removed - it takes a request and three extra things that
must be extracted from the request and then it checks them against what it finds
in the request. Removing all the derivable data makes the code much simpler all
round.

This patch updates those functions to just take the request structure.

Note that logical_ring_wait_for_space now takes a request structure but already
had a local request pointer that it uses to scan for something to wait on. To
avoid confusion the local variable has been renamed 'target' (it is searching
for a target request to do something with) and the parameter has been called req
(to guarantee anything accidentally missed gets a compiler error).

v2: Updated commit message re wait_for_space (Tomas Elf review comment).

For: VIZ-5115
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Reviewed-by: default avatarTomas Elf <tomas.elf@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 9bb1af44
...@@ -544,23 +544,18 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) ...@@ -544,23 +544,18 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
((u32)ring->next_context_status_buffer & 0x07) << 8); ((u32)ring->next_context_status_buffer & 0x07) << 8);
} }
static int execlists_context_queue(struct intel_engine_cs *ring, static int execlists_context_queue(struct drm_i915_gem_request *request)
struct intel_context *to,
u32 tail,
struct drm_i915_gem_request *request)
{ {
struct intel_engine_cs *ring = request->ring;
struct drm_i915_gem_request *cursor; struct drm_i915_gem_request *cursor;
int num_elements = 0; int num_elements = 0;
if (to != ring->default_context) if (request->ctx != ring->default_context)
intel_lr_context_pin(ring, to); intel_lr_context_pin(ring, request->ctx);
WARN_ON(!request);
WARN_ON(to != request->ctx);
i915_gem_request_reference(request); i915_gem_request_reference(request);
request->tail = tail; request->tail = request->ringbuf->tail;
spin_lock_irq(&ring->execlist_lock); spin_lock_irq(&ring->execlist_lock);
...@@ -575,7 +570,7 @@ static int execlists_context_queue(struct intel_engine_cs *ring, ...@@ -575,7 +570,7 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
struct drm_i915_gem_request, struct drm_i915_gem_request,
execlist_link); execlist_link);
if (to == tail_req->ctx) { if (request->ctx == tail_req->ctx) {
WARN(tail_req->elsp_submitted != 0, WARN(tail_req->elsp_submitted != 0,
"More than 2 already-submitted reqs queued\n"); "More than 2 already-submitted reqs queued\n");
list_del(&tail_req->execlist_link); list_del(&tail_req->execlist_link);
...@@ -659,12 +654,12 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request ...@@ -659,12 +654,12 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
return 0; return 0;
} }
static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf, static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
struct intel_context *ctx,
int bytes) int bytes)
{ {
struct intel_engine_cs *ring = ringbuf->ring; struct intel_ringbuffer *ringbuf = req->ringbuf;
struct drm_i915_gem_request *request; struct intel_engine_cs *ring = req->ring;
struct drm_i915_gem_request *target;
unsigned space; unsigned space;
int ret; int ret;
...@@ -674,26 +669,26 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf, ...@@ -674,26 +669,26 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
if (intel_ring_space(ringbuf) >= bytes) if (intel_ring_space(ringbuf) >= bytes)
return 0; return 0;
list_for_each_entry(request, &ring->request_list, list) { list_for_each_entry(target, &ring->request_list, list) {
/* /*
* The request queue is per-engine, so can contain requests * The request queue is per-engine, so can contain requests
* from multiple ringbuffers. Here, we must ignore any that * from multiple ringbuffers. Here, we must ignore any that
* aren't from the ringbuffer we're considering. * aren't from the ringbuffer we're considering.
*/ */
if (request->ringbuf != ringbuf) if (target->ringbuf != ringbuf)
continue; continue;
/* Would completion of this request free enough space? */ /* Would completion of this request free enough space? */
space = __intel_ring_space(request->postfix, ringbuf->tail, space = __intel_ring_space(target->postfix, ringbuf->tail,
ringbuf->size); ringbuf->size);
if (space >= bytes) if (space >= bytes)
break; break;
} }
if (WARN_ON(&request->list == &ring->request_list)) if (WARN_ON(&target->list == &ring->request_list))
return -ENOSPC; return -ENOSPC;
ret = i915_wait_request(request); ret = i915_wait_request(target);
if (ret) if (ret)
return ret; return ret;
...@@ -703,7 +698,7 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf, ...@@ -703,7 +698,7 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
/* /*
* intel_logical_ring_advance_and_submit() - advance the tail and submit the workload * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
* @ringbuf: Logical Ringbuffer to advance. * @request: Request to advance the logical ringbuffer of.
* *
* The tail is updated in our logical ringbuffer struct, not in the actual context. What * The tail is updated in our logical ringbuffer struct, not in the actual context. What
* really happens during submission is that the context and current tail will be placed * really happens during submission is that the context and current tail will be placed
...@@ -711,23 +706,21 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf, ...@@ -711,23 +706,21 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
* point, the tail *inside* the context is updated and the ELSP written to. * point, the tail *inside* the context is updated and the ELSP written to.
*/ */
static void static void
intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf, intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
struct intel_context *ctx,
struct drm_i915_gem_request *request)
{ {
struct intel_engine_cs *ring = ringbuf->ring; struct intel_engine_cs *ring = request->ring;
intel_logical_ring_advance(ringbuf); intel_logical_ring_advance(request->ringbuf);
if (intel_ring_stopped(ring)) if (intel_ring_stopped(ring))
return; return;
execlists_context_queue(ring, ctx, ringbuf->tail, request); execlists_context_queue(request);
} }
static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf, static int logical_ring_wrap_buffer(struct drm_i915_gem_request *req)
struct intel_context *ctx)
{ {
struct intel_ringbuffer *ringbuf = req->ringbuf;
uint32_t __iomem *virt; uint32_t __iomem *virt;
int rem = ringbuf->size - ringbuf->tail; int rem = ringbuf->size - ringbuf->tail;
...@@ -735,7 +728,7 @@ static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf, ...@@ -735,7 +728,7 @@ static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
WARN_ON(ringbuf->reserved_in_use); WARN_ON(ringbuf->reserved_in_use);
if (ringbuf->space < rem) { if (ringbuf->space < rem) {
int ret = logical_ring_wait_for_space(ringbuf, ctx, rem); int ret = logical_ring_wait_for_space(req, rem);
if (ret) if (ret)
return ret; return ret;
...@@ -752,9 +745,9 @@ static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf, ...@@ -752,9 +745,9 @@ static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
return 0; return 0;
} }
static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
struct intel_context *ctx, int bytes)
{ {
struct intel_ringbuffer *ringbuf = req->ringbuf;
int ret; int ret;
/* /*
...@@ -766,7 +759,7 @@ static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, ...@@ -766,7 +759,7 @@ static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
bytes += ringbuf->reserved_size; bytes += ringbuf->reserved_size;
if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) { if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
ret = logical_ring_wrap_buffer(ringbuf, ctx); ret = logical_ring_wrap_buffer(req);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
...@@ -779,7 +772,7 @@ static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, ...@@ -779,7 +772,7 @@ static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
} }
if (unlikely(ringbuf->space < bytes)) { if (unlikely(ringbuf->space < bytes)) {
ret = logical_ring_wait_for_space(ringbuf, ctx, bytes); ret = logical_ring_wait_for_space(req, bytes);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
} }
...@@ -814,8 +807,7 @@ static int intel_logical_ring_begin(struct drm_i915_gem_request *req, ...@@ -814,8 +807,7 @@ static int intel_logical_ring_begin(struct drm_i915_gem_request *req,
if (ret) if (ret)
return ret; return ret;
ret = logical_ring_prepare(req->ringbuf, req->ctx, ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t));
num_dwords * sizeof(uint32_t));
if (ret) if (ret)
return ret; return ret;
...@@ -1557,7 +1549,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request) ...@@ -1557,7 +1549,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
intel_logical_ring_emit(ringbuf, MI_NOOP); intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_advance_and_submit(ringbuf, request->ctx, request); intel_logical_ring_advance_and_submit(request);
/* /*
* Here we add two extra NOOPs as padding to avoid * Here we add two extra NOOPs as padding to avoid
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment