Commit b620e870 authored by Mika Kuoppala's avatar Mika Kuoppala Committed by Mika Kuoppala

drm/i915: Make own struct for execlist items

Engine's execlist related items have been increasing to
a point where a separate struct is warranted. Carve execlist
specific items to a dedicated struct to add clarity.

v2: add kerneldoc and fix whitespace (Joonas, Chris)
v3: csb_mmio changes, rebase
v4: s/\b(el|execlist)\b/execlists/ (Joonas)
Suggested-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: default avatarMika Kuoppala <mika.kuoppala@intel.com>
Acked-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Michał Winiarski <michal.winiarski@intel.com> (v3)
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v3)
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20170922124307.10914-1-mika.kuoppala@intel.com
parent d27ffc1d
...@@ -3323,7 +3323,7 @@ static int i915_engine_info(struct seq_file *m, void *unused) ...@@ -3323,7 +3323,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
read = GEN8_CSB_READ_PTR(ptr); read = GEN8_CSB_READ_PTR(ptr);
write = GEN8_CSB_WRITE_PTR(ptr); write = GEN8_CSB_WRITE_PTR(ptr);
seq_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n", seq_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n",
read, engine->csb_head, read, engine->execlists.csb_head,
write, write,
intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)), intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
yesno(test_bit(ENGINE_IRQ_EXECLIST, yesno(test_bit(ENGINE_IRQ_EXECLIST,
...@@ -3345,10 +3345,10 @@ static int i915_engine_info(struct seq_file *m, void *unused) ...@@ -3345,10 +3345,10 @@ static int i915_engine_info(struct seq_file *m, void *unused)
} }
rcu_read_lock(); rcu_read_lock();
for (idx = 0; idx < ARRAY_SIZE(engine->execlist_port); idx++) { for (idx = 0; idx < ARRAY_SIZE(engine->execlists.port); idx++) {
unsigned int count; unsigned int count;
rq = port_unpack(&engine->execlist_port[idx], rq = port_unpack(&engine->execlists.port[idx],
&count); &count);
if (rq) { if (rq) {
seq_printf(m, "\t\tELSP[%d] count=%d, ", seq_printf(m, "\t\tELSP[%d] count=%d, ",
...@@ -3362,7 +3362,7 @@ static int i915_engine_info(struct seq_file *m, void *unused) ...@@ -3362,7 +3362,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
rcu_read_unlock(); rcu_read_unlock();
spin_lock_irq(&engine->timeline->lock); spin_lock_irq(&engine->timeline->lock);
for (rb = engine->execlist_first; rb; rb = rb_next(rb)){ for (rb = engine->execlists.first; rb; rb = rb_next(rb)) {
struct i915_priolist *p = struct i915_priolist *p =
rb_entry(rb, typeof(*p), node); rb_entry(rb, typeof(*p), node);
......
...@@ -2815,8 +2815,8 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine) ...@@ -2815,8 +2815,8 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
* Turning off the engine->irq_tasklet until the reset is over * Turning off the engine->irq_tasklet until the reset is over
* prevents the race. * prevents the race.
*/ */
tasklet_kill(&engine->irq_tasklet); tasklet_kill(&engine->execlists.irq_tasklet);
tasklet_disable(&engine->irq_tasklet); tasklet_disable(&engine->execlists.irq_tasklet);
if (engine->irq_seqno_barrier) if (engine->irq_seqno_barrier)
engine->irq_seqno_barrier(engine); engine->irq_seqno_barrier(engine);
...@@ -2995,7 +2995,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv) ...@@ -2995,7 +2995,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
void i915_gem_reset_finish_engine(struct intel_engine_cs *engine) void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
{ {
tasklet_enable(&engine->irq_tasklet); tasklet_enable(&engine->execlists.irq_tasklet);
kthread_unpark(engine->breadcrumbs.signaler); kthread_unpark(engine->breadcrumbs.signaler);
} }
......
...@@ -1327,10 +1327,10 @@ static void engine_record_requests(struct intel_engine_cs *engine, ...@@ -1327,10 +1327,10 @@ static void engine_record_requests(struct intel_engine_cs *engine,
static void error_record_engine_execlists(struct intel_engine_cs *engine, static void error_record_engine_execlists(struct intel_engine_cs *engine,
struct drm_i915_error_engine *ee) struct drm_i915_error_engine *ee)
{ {
const struct execlist_port *port = engine->execlist_port; const struct execlist_port *port = engine->execlists.port;
unsigned int n; unsigned int n;
for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) { for (n = 0; n < ARRAY_SIZE(engine->execlists.port); n++) {
struct drm_i915_gem_request *rq = port_request(&port[n]); struct drm_i915_gem_request *rq = port_request(&port[n]);
if (!rq) if (!rq)
......
...@@ -494,11 +494,12 @@ static void i915_guc_submit(struct intel_engine_cs *engine) ...@@ -494,11 +494,12 @@ static void i915_guc_submit(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915; struct drm_i915_private *dev_priv = engine->i915;
struct intel_guc *guc = &dev_priv->guc; struct intel_guc *guc = &dev_priv->guc;
struct i915_guc_client *client = guc->execbuf_client; struct i915_guc_client *client = guc->execbuf_client;
struct execlist_port *port = engine->execlist_port; struct intel_engine_execlists * const execlists = &engine->execlists;
unsigned int engine_id = engine->id; struct execlist_port *port = execlists->port;
const unsigned int engine_id = engine->id;
unsigned int n; unsigned int n;
for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) { for (n = 0; n < ARRAY_SIZE(execlists->port); n++) {
struct drm_i915_gem_request *rq; struct drm_i915_gem_request *rq;
unsigned int count; unsigned int count;
...@@ -558,7 +559,8 @@ static void port_assign(struct execlist_port *port, ...@@ -558,7 +559,8 @@ static void port_assign(struct execlist_port *port,
static void i915_guc_dequeue(struct intel_engine_cs *engine) static void i915_guc_dequeue(struct intel_engine_cs *engine)
{ {
struct execlist_port *port = engine->execlist_port; struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
struct drm_i915_gem_request *last = NULL; struct drm_i915_gem_request *last = NULL;
bool submit = false; bool submit = false;
struct rb_node *rb; struct rb_node *rb;
...@@ -567,15 +569,15 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine) ...@@ -567,15 +569,15 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine)
port++; port++;
spin_lock_irq(&engine->timeline->lock); spin_lock_irq(&engine->timeline->lock);
rb = engine->execlist_first; rb = execlists->first;
GEM_BUG_ON(rb_first(&engine->execlist_queue) != rb); GEM_BUG_ON(rb_first(&execlists->queue) != rb);
while (rb) { while (rb) {
struct i915_priolist *p = rb_entry(rb, typeof(*p), node); struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
struct drm_i915_gem_request *rq, *rn; struct drm_i915_gem_request *rq, *rn;
list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) { list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
if (last && rq->ctx != last->ctx) { if (last && rq->ctx != last->ctx) {
if (port != engine->execlist_port) { if (port != execlists->port) {
__list_del_many(&p->requests, __list_del_many(&p->requests,
&rq->priotree.link); &rq->priotree.link);
goto done; goto done;
...@@ -596,13 +598,13 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine) ...@@ -596,13 +598,13 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine)
} }
rb = rb_next(rb); rb = rb_next(rb);
rb_erase(&p->node, &engine->execlist_queue); rb_erase(&p->node, &execlists->queue);
INIT_LIST_HEAD(&p->requests); INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL) if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p); kmem_cache_free(engine->i915->priorities, p);
} }
done: done:
engine->execlist_first = rb; execlists->first = rb;
if (submit) { if (submit) {
port_assign(port, last); port_assign(port, last);
i915_guc_submit(engine); i915_guc_submit(engine);
...@@ -612,8 +614,8 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine) ...@@ -612,8 +614,8 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine)
static void i915_guc_irq_handler(unsigned long data) static void i915_guc_irq_handler(unsigned long data)
{ {
struct intel_engine_cs *engine = (struct intel_engine_cs *)data; struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
struct execlist_port *port = engine->execlist_port; struct execlist_port *port = engine->execlists.port;
struct drm_i915_gem_request *rq; struct drm_i915_gem_request *rq;
rq = port_request(&port[0]); rq = port_request(&port[0]);
...@@ -1144,7 +1146,7 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv) ...@@ -1144,7 +1146,7 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
* and it is guaranteed that it will remove the work item from the * and it is guaranteed that it will remove the work item from the
* queue before our request is completed. * queue before our request is completed.
*/ */
BUILD_BUG_ON(ARRAY_SIZE(engine->execlist_port) * BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.port) *
sizeof(struct guc_wq_item) * sizeof(struct guc_wq_item) *
I915_NUM_ENGINES > GUC_WQ_SIZE); I915_NUM_ENGINES > GUC_WQ_SIZE);
...@@ -1175,14 +1177,15 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv) ...@@ -1175,14 +1177,15 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
guc_interrupts_capture(dev_priv); guc_interrupts_capture(dev_priv);
for_each_engine(engine, dev_priv, id) { for_each_engine(engine, dev_priv, id) {
struct intel_engine_execlists * const execlists = &engine->execlists;
/* The tasklet was initialised by execlists, and may be in /* The tasklet was initialised by execlists, and may be in
* a state of flux (across a reset) and so we just want to * a state of flux (across a reset) and so we just want to
* take over the callback without changing any other state * take over the callback without changing any other state
* in the tasklet. * in the tasklet.
*/ */
engine->irq_tasklet.func = i915_guc_irq_handler; execlists->irq_tasklet.func = i915_guc_irq_handler;
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
tasklet_schedule(&engine->irq_tasklet); tasklet_schedule(&execlists->irq_tasklet);
} }
return 0; return 0;
......
...@@ -1346,10 +1346,11 @@ static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, ...@@ -1346,10 +1346,11 @@ static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
static void static void
gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
{ {
struct intel_engine_execlists * const execlists = &engine->execlists;
bool tasklet = false; bool tasklet = false;
if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) { if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) {
if (port_count(&engine->execlist_port[0])) { if (port_count(&execlists->port[0])) {
__set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); __set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
tasklet = true; tasklet = true;
} }
...@@ -1361,7 +1362,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) ...@@ -1361,7 +1362,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
} }
if (tasklet) if (tasklet)
tasklet_hi_schedule(&engine->irq_tasklet); tasklet_hi_schedule(&execlists->irq_tasklet);
} }
static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv, static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
......
...@@ -393,8 +393,8 @@ static void intel_engine_init_timeline(struct intel_engine_cs *engine) ...@@ -393,8 +393,8 @@ static void intel_engine_init_timeline(struct intel_engine_cs *engine)
*/ */
void intel_engine_setup_common(struct intel_engine_cs *engine) void intel_engine_setup_common(struct intel_engine_cs *engine)
{ {
engine->execlist_queue = RB_ROOT; engine->execlists.queue = RB_ROOT;
engine->execlist_first = NULL; engine->execlists.first = NULL;
intel_engine_init_timeline(engine); intel_engine_init_timeline(engine);
intel_engine_init_hangcheck(engine); intel_engine_init_hangcheck(engine);
...@@ -1475,11 +1475,11 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine) ...@@ -1475,11 +1475,11 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
return false; return false;
/* Both ports drained, no more ELSP submission? */ /* Both ports drained, no more ELSP submission? */
if (port_request(&engine->execlist_port[0])) if (port_request(&engine->execlists.port[0]))
return false; return false;
/* ELSP is empty, but there are ready requests? */ /* ELSP is empty, but there are ready requests? */
if (READ_ONCE(engine->execlist_first)) if (READ_ONCE(engine->execlists.first))
return false; return false;
/* Ring stopped? */ /* Ring stopped? */
...@@ -1528,8 +1528,8 @@ void intel_engines_mark_idle(struct drm_i915_private *i915) ...@@ -1528,8 +1528,8 @@ void intel_engines_mark_idle(struct drm_i915_private *i915)
for_each_engine(engine, i915, id) { for_each_engine(engine, i915, id) {
intel_engine_disarm_breadcrumbs(engine); intel_engine_disarm_breadcrumbs(engine);
i915_gem_batch_pool_fini(&engine->batch_pool); i915_gem_batch_pool_fini(&engine->batch_pool);
tasklet_kill(&engine->irq_tasklet); tasklet_kill(&engine->execlists.irq_tasklet);
engine->no_priolist = false; engine->execlists.no_priolist = false;
} }
} }
......
This diff is collapsed.
...@@ -184,6 +184,84 @@ struct i915_priolist { ...@@ -184,6 +184,84 @@ struct i915_priolist {
int priority; int priority;
}; };
/**
* struct intel_engine_execlists - execlist submission queue and port state
*
* The struct intel_engine_execlists represents the combined logical state of
* driver and the hardware state for execlist mode of submission.
*/
struct intel_engine_execlists {
/**
* @irq_tasklet: softirq tasklet for bottom handler
*/
struct tasklet_struct irq_tasklet;
/**
* @default_priolist: priority list for I915_PRIORITY_NORMAL
*/
struct i915_priolist default_priolist;
/**
* @no_priolist: priority lists disabled
*/
bool no_priolist;
/**
* @port: execlist port states
*
* For each hardware ELSP (ExecList Submission Port) we keep
* track of the last request and the number of times we submitted
* that port to hw. We then count the number of times the hw reports
* a context completion or preemption. As only one context can
* be active on hw, we limit resubmission of context to port[0]. This
* is called Lite Restore, of the context.
*/
struct execlist_port {
/**
* @request_count: combined request and submission count
*/
struct drm_i915_gem_request *request_count;
#define EXECLIST_COUNT_BITS 2
#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
#define port_set(p, packed) ((p)->request_count = (packed))
#define port_isset(p) ((p)->request_count)
#define port_index(p, e) ((p) - (e)->execlists.port)
/**
* @context_id: context ID for port
*/
GEM_DEBUG_DECL(u32 context_id);
} port[2];
/**
* @queue: queue of requests, in priority lists
*/
struct rb_root queue;
/**
* @first: leftmost level in priority @queue
*/
struct rb_node *first;
/**
* @fw_domains: forcewake domains for irq tasklet
*/
unsigned int fw_domains;
/**
* @csb_head: context status buffer head
*/
unsigned int csb_head;
/**
* @csb_use_mmio: access csb through mmio, instead of hwsp
*/
bool csb_use_mmio;
};
#define INTEL_ENGINE_CS_MAX_NAME 8 #define INTEL_ENGINE_CS_MAX_NAME 8
struct intel_engine_cs { struct intel_engine_cs {
...@@ -380,27 +458,7 @@ struct intel_engine_cs { ...@@ -380,27 +458,7 @@ struct intel_engine_cs {
u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs); u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs);
} semaphore; } semaphore;
/* Execlists */ struct intel_engine_execlists execlists;
struct tasklet_struct irq_tasklet;
struct i915_priolist default_priolist;
bool no_priolist;
struct execlist_port {
struct drm_i915_gem_request *request_count;
#define EXECLIST_COUNT_BITS 2
#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
#define port_set(p, packed) ((p)->request_count = (packed))
#define port_isset(p) ((p)->request_count)
#define port_index(p, e) ((p) - (e)->execlist_port)
GEM_DEBUG_DECL(u32 context_id);
} execlist_port[2];
struct rb_root execlist_queue;
struct rb_node *execlist_first;
unsigned int fw_domains;
unsigned int csb_head;
bool csb_use_mmio;
/* Contexts are pinned whilst they are active on the GPU. The last /* Contexts are pinned whilst they are active on the GPU. The last
* context executed remains active whilst the GPU is idle - the * context executed remains active whilst the GPU is idle - the
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment