Commit f2605207 authored by Chris Wilson's avatar Chris Wilson

drm/i915/execlists: Track begin/end of execlists submission sequences

We would like to start doing some bookkeeping at the beginning, between
contexts and at the end of execlists submission. We already mark the
beginning and end using EXECLISTS_ACTIVE_USER, to provide an indication
when the HW is idle. This give us a pair of sequence points we can then
expand on for further bookkeeping.

v2: Refactor guc submission to share the same begin/end.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Francisco Jerez <currojerez@riseup.net>
Reviewed-by: default avatarFrancisco Jerez <currojerez@riseup.net>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180331130626.10712-1-chris@chris-wilson.co.uk
parent 1f901d59
...@@ -728,7 +728,7 @@ static void guc_dequeue(struct intel_engine_cs *engine) ...@@ -728,7 +728,7 @@ static void guc_dequeue(struct intel_engine_cs *engine)
execlists->first = rb; execlists->first = rb;
if (submit) { if (submit) {
port_assign(port, last); port_assign(port, last);
execlists_set_active(execlists, EXECLISTS_ACTIVE_USER); execlists_user_begin(execlists, execlists->port);
guc_submit(engine); guc_submit(engine);
} }
...@@ -748,17 +748,20 @@ static void guc_submission_tasklet(unsigned long data) ...@@ -748,17 +748,20 @@ static void guc_submission_tasklet(unsigned long data)
struct execlist_port *port = execlists->port; struct execlist_port *port = execlists->port;
struct i915_request *rq; struct i915_request *rq;
rq = port_request(&port[0]); rq = port_request(port);
while (rq && i915_request_completed(rq)) { while (rq && i915_request_completed(rq)) {
trace_i915_request_out(rq); trace_i915_request_out(rq);
i915_request_put(rq); i915_request_put(rq);
execlists_port_complete(execlists, port); port = execlists_port_complete(execlists, port);
if (port_isset(port)) {
rq = port_request(&port[0]); execlists_user_begin(execlists, port);
rq = port_request(port);
} else {
execlists_user_end(execlists);
rq = NULL;
}
} }
if (!rq)
execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
if (execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT) && if (execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT) &&
intel_read_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX) == intel_read_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX) ==
......
...@@ -374,6 +374,19 @@ execlists_context_status_change(struct i915_request *rq, unsigned long status) ...@@ -374,6 +374,19 @@ execlists_context_status_change(struct i915_request *rq, unsigned long status)
status, rq); status, rq);
} }
inline void
execlists_user_begin(struct intel_engine_execlists *execlists,
const struct execlist_port *port)
{
execlists_set_active_once(execlists, EXECLISTS_ACTIVE_USER);
}
inline void
execlists_user_end(struct intel_engine_execlists *execlists)
{
execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
}
static inline void static inline void
execlists_context_schedule_in(struct i915_request *rq) execlists_context_schedule_in(struct i915_request *rq)
{ {
...@@ -711,7 +724,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -711,7 +724,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
spin_unlock_irq(&engine->timeline->lock); spin_unlock_irq(&engine->timeline->lock);
if (submit) { if (submit) {
execlists_set_active(execlists, EXECLISTS_ACTIVE_USER); execlists_user_begin(execlists, execlists->port);
execlists_submit_ports(engine); execlists_submit_ports(engine);
} }
...@@ -742,7 +755,7 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists) ...@@ -742,7 +755,7 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
port++; port++;
} }
execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER); execlists_user_end(execlists);
} }
static void clear_gtiir(struct intel_engine_cs *engine) static void clear_gtiir(struct intel_engine_cs *engine)
...@@ -873,7 +886,7 @@ static void execlists_submission_tasklet(unsigned long data) ...@@ -873,7 +886,7 @@ static void execlists_submission_tasklet(unsigned long data)
{ {
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
struct intel_engine_execlists * const execlists = &engine->execlists; struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port * const port = execlists->port; struct execlist_port *port = execlists->port;
struct drm_i915_private *dev_priv = engine->i915; struct drm_i915_private *dev_priv = engine->i915;
bool fw = false; bool fw = false;
...@@ -1012,10 +1025,28 @@ static void execlists_submission_tasklet(unsigned long data) ...@@ -1012,10 +1025,28 @@ static void execlists_submission_tasklet(unsigned long data)
GEM_BUG_ON(count == 0); GEM_BUG_ON(count == 0);
if (--count == 0) { if (--count == 0) {
/*
* On the final event corresponding to the
* submission of this context, we expect either
* an element-switch event or a completion
* event (and on completion, the active-idle
* marker). No more preemptions, lite-restore
* or otherwise.
*/
GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED); GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
GEM_BUG_ON(port_isset(&port[1]) && GEM_BUG_ON(port_isset(&port[1]) &&
!(status & GEN8_CTX_STATUS_ELEMENT_SWITCH)); !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH));
GEM_BUG_ON(!port_isset(&port[1]) &&
!(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
/*
* We rely on the hardware being strongly
* ordered, that the breadcrumb write is
* coherent (visible from the CPU) before the
* user interrupt and CSB is processed.
*/
GEM_BUG_ON(!i915_request_completed(rq)); GEM_BUG_ON(!i915_request_completed(rq));
execlists_context_schedule_out(rq); execlists_context_schedule_out(rq);
trace_i915_request_out(rq); trace_i915_request_out(rq);
i915_request_put(rq); i915_request_put(rq);
...@@ -1023,17 +1054,14 @@ static void execlists_submission_tasklet(unsigned long data) ...@@ -1023,17 +1054,14 @@ static void execlists_submission_tasklet(unsigned long data)
GEM_TRACE("%s completed ctx=%d\n", GEM_TRACE("%s completed ctx=%d\n",
engine->name, port->context_id); engine->name, port->context_id);
execlists_port_complete(execlists, port); port = execlists_port_complete(execlists, port);
if (port_isset(port))
execlists_user_begin(execlists, port);
else
execlists_user_end(execlists);
} else { } else {
port_set(port, port_pack(rq, count)); port_set(port, port_pack(rq, count));
} }
/* After the final element, the hw should be idle */
GEM_BUG_ON(port_count(port) == 0 &&
!(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
if (port_count(port) == 0)
execlists_clear_active(execlists,
EXECLISTS_ACTIVE_USER);
} }
if (head != execlists->csb_head) { if (head != execlists->csb_head) {
......
...@@ -638,6 +638,13 @@ execlists_set_active(struct intel_engine_execlists *execlists, ...@@ -638,6 +638,13 @@ execlists_set_active(struct intel_engine_execlists *execlists,
__set_bit(bit, (unsigned long *)&execlists->active); __set_bit(bit, (unsigned long *)&execlists->active);
} }
static inline bool
execlists_set_active_once(struct intel_engine_execlists *execlists,
unsigned int bit)
{
return !__test_and_set_bit(bit, (unsigned long *)&execlists->active);
}
static inline void static inline void
execlists_clear_active(struct intel_engine_execlists *execlists, execlists_clear_active(struct intel_engine_execlists *execlists,
unsigned int bit) unsigned int bit)
...@@ -652,6 +659,10 @@ execlists_is_active(const struct intel_engine_execlists *execlists, ...@@ -652,6 +659,10 @@ execlists_is_active(const struct intel_engine_execlists *execlists,
return test_bit(bit, (unsigned long *)&execlists->active); return test_bit(bit, (unsigned long *)&execlists->active);
} }
void execlists_user_begin(struct intel_engine_execlists *execlists,
const struct execlist_port *port);
void execlists_user_end(struct intel_engine_execlists *execlists);
void void
execlists_cancel_port_requests(struct intel_engine_execlists * const execlists); execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);
...@@ -664,7 +675,7 @@ execlists_num_ports(const struct intel_engine_execlists * const execlists) ...@@ -664,7 +675,7 @@ execlists_num_ports(const struct intel_engine_execlists * const execlists)
return execlists->port_mask + 1; return execlists->port_mask + 1;
} }
static inline void static inline struct execlist_port *
execlists_port_complete(struct intel_engine_execlists * const execlists, execlists_port_complete(struct intel_engine_execlists * const execlists,
struct execlist_port * const port) struct execlist_port * const port)
{ {
...@@ -675,6 +686,8 @@ execlists_port_complete(struct intel_engine_execlists * const execlists, ...@@ -675,6 +686,8 @@ execlists_port_complete(struct intel_engine_execlists * const execlists,
memmove(port, port + 1, m * sizeof(struct execlist_port)); memmove(port, port + 1, m * sizeof(struct execlist_port));
memset(port + m, 0, sizeof(struct execlist_port)); memset(port + m, 0, sizeof(struct execlist_port));
return port;
} }
static inline unsigned int static inline unsigned int
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment