Commit 72f95afa authored by Nick Hoath's avatar Nick Hoath Committed by Daniel Vetter

drm/i915: Removed duplicate members from submit_request

Where there were duplicate variables for the tail, context and ring (engine)
in the gem request and the execlist queue item, use the one from the request
and remove the duplicate from the execlist queue item.

Issue: VIZ-4274

v1: Rebase
v2: Fixed build issues. Keep separate postfix & tail pointers as these are
used in different ways. Reinserted missing full tail pointer update.
Signed-off-by: default avatarNick Hoath <nicholas.hoath@intel.com>
Reviewed-by: default avatarThomas Daniel <thomas.daniel@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 2d12955a
......@@ -1968,11 +1968,11 @@ static int i915_execlists(struct seq_file *m, void *data)
if (head_req) {
struct drm_i915_gem_object *ctx_obj;
ctx_obj = head_req->ctx->engine[ring_id].state;
ctx_obj = head_req->request->ctx->engine[ring_id].state;
seq_printf(m, "\tHead request id: %u\n",
intel_execlists_ctx_id(ctx_obj));
seq_printf(m, "\tHead request tail: %u\n",
head_req->tail);
head_req->request->tail);
}
seq_putc(m, '\n');
......
......@@ -2089,7 +2089,14 @@ struct drm_i915_gem_request {
/** Position in the ringbuffer of the start of the request */
u32 head;
/** Position in the ringbuffer of the end of the request */
/**
* Position in the ringbuffer of the start of the postfix.
* This is required to calculate the maximum available ringbuffer
* space without overwriting the postfix.
*/
u32 postfix;
/** Position in the ringbuffer of the end of the whole request */
u32 tail;
/** Context related to this request */
......
......@@ -2453,7 +2453,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
request_ring_position = intel_ring_get_tail(ringbuf);
if (i915.enable_execlists) {
ret = ring->emit_request(ringbuf);
ret = ring->emit_request(ringbuf, request);
if (ret)
return ret;
} else {
......@@ -2463,7 +2463,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
}
request->head = request_start;
request->tail = request_ring_position;
request->postfix = request_ring_position;
/* Whilst this request exists, batch_obj will be on the
* active_list, and so will hold the active reference. Only when this
......@@ -2657,7 +2657,7 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
execlist_link);
list_del(&submit_req->execlist_link);
intel_runtime_pm_put(dev_priv);
i915_gem_context_unreference(submit_req->ctx);
i915_gem_context_unreference(submit_req->request->ctx);
kfree(submit_req);
}
......@@ -2783,7 +2783,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
* of tail of the request to update the last known position
* of the GPU head.
*/
ringbuf->last_retired_head = request->tail;
ringbuf->last_retired_head = request->postfix;
i915_gem_free_request(request);
}
......
......@@ -1052,7 +1052,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
erq = &error->ring[i].requests[count++];
erq->seqno = request->seqno;
erq->jiffies = request->emitted_jiffies;
erq->tail = request->tail;
erq->tail = request->postfix;
}
}
}
......
......@@ -417,7 +417,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
execlist_link) {
if (!req0) {
req0 = cursor;
} else if (req0->ctx == cursor->ctx) {
} else if (req0->request->ctx == cursor->request->ctx) {
/* Same ctx: ignore first request, as second request
* will update tail past first request's workload */
cursor->elsp_submitted = req0->elsp_submitted;
......@@ -433,9 +433,9 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
WARN_ON(req1 && req1->elsp_submitted);
execlists_submit_contexts(ring, req0->ctx, req0->tail,
req1 ? req1->ctx : NULL,
req1 ? req1->tail : 0);
execlists_submit_contexts(ring, req0->request->ctx, req0->request->tail,
req1 ? req1->request->ctx : NULL,
req1 ? req1->request->tail : 0);
req0->elsp_submitted++;
if (req1)
......@@ -455,7 +455,7 @@ static bool execlists_check_remove_request(struct intel_engine_cs *ring,
if (head_req != NULL) {
struct drm_i915_gem_object *ctx_obj =
head_req->ctx->engine[ring->id].state;
head_req->request->ctx->engine[ring->id].state;
if (intel_execlists_ctx_id(ctx_obj) == request_id) {
WARN(head_req->elsp_submitted == 0,
"Never submitted head request\n");
......@@ -545,15 +545,10 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (req == NULL)
return -ENOMEM;
req->ctx = to;
i915_gem_context_reference(req->ctx);
if (to != ring->default_context)
intel_lr_context_pin(ring, to);
req->ring = ring;
req->tail = tail;
if (!request) {
/*
* If there isn't a request associated with this submission,
......@@ -563,11 +558,13 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL)
return -ENOMEM;
request->ctx = to;
request->ring = ring;
}
request->ctx = to;
request->tail = tail;
req->request = request;
i915_gem_request_reference(request);
i915_gem_context_reference(req->request->ctx);
intel_runtime_pm_get(dev_priv);
......@@ -584,7 +581,7 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
struct intel_ctx_submit_request,
execlist_link);
if (to == tail_req->ctx) {
if (to == tail_req->request->ctx) {
WARN(tail_req->elsp_submitted != 0,
"More than 2 already-submitted reqs queued\n");
list_del(&tail_req->execlist_link);
......@@ -774,14 +771,14 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
spin_unlock_irqrestore(&ring->execlist_lock, flags);
list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
struct intel_context *ctx = req->ctx;
struct intel_context *ctx = req->request->ctx;
struct drm_i915_gem_object *ctx_obj =
ctx->engine[ring->id].state;
if (ctx_obj && (ctx != ring->default_context))
intel_lr_context_unpin(ring, ctx);
intel_runtime_pm_put(dev_priv);
i915_gem_context_unreference(req->ctx);
i915_gem_context_unreference(ctx);
i915_gem_request_unreference(req->request);
list_del(&req->execlist_link);
kfree(req);
......
......@@ -105,10 +105,6 @@ u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
* All accesses to the queue are mediated by a spinlock (ring->execlist_lock).
*/
struct intel_ctx_submit_request {
struct intel_context *ctx;
struct intel_engine_cs *ring;
u32 tail;
struct list_head execlist_link;
int elsp_submitted;
......
......@@ -1949,7 +1949,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
return 0;
list_for_each_entry(request, &ring->request_list, list) {
if (__intel_ring_space(request->tail, ringbuf->tail,
if (__intel_ring_space(request->postfix, ringbuf->tail,
ringbuf->size) >= n) {
break;
}
......
......@@ -239,7 +239,8 @@ struct intel_engine_cs {
struct list_head execlist_retired_req_list;
u8 next_context_status_buffer;
u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
int (*emit_request)(struct intel_ringbuffer *ringbuf);
int (*emit_request)(struct intel_ringbuffer *ringbuf,
struct drm_i915_gem_request *request);
int (*emit_flush)(struct intel_ringbuffer *ringbuf,
u32 invalidate_domains,
u32 flush_domains);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment