Commit 422d7df4 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Replace engine->timeline with a plain list

To continue the onslaught of removing the assumption of a global
execution ordering, another casualty is the engine->timeline. Without an
actual timeline to track, it is overkill and we can replace it with a
much less grand plain list. We still need a list of requests inflight,
for the simple purpose of finding inflight requests (for retiring,
resetting, preemption etc).
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190614164606.15633-3-chris@chris-wilson.co.uk
parent 9db0c5ca
...@@ -565,4 +565,10 @@ static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists) ...@@ -565,4 +565,10 @@ static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
#endif #endif
void intel_engine_init_active(struct intel_engine_cs *engine,
unsigned int subclass);
#define ENGINE_PHYSICAL 0
#define ENGINE_MOCK 1
#define ENGINE_VIRTUAL 2
#endif /* _INTEL_RINGBUFFER_H_ */ #endif /* _INTEL_RINGBUFFER_H_ */
...@@ -617,14 +617,7 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine) ...@@ -617,14 +617,7 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine)
if (err) if (err)
return err; return err;
err = i915_timeline_init(engine->i915, intel_engine_init_active(engine, ENGINE_PHYSICAL);
&engine->timeline,
engine->status_page.vma);
if (err)
goto err_hwsp;
i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE);
intel_engine_init_breadcrumbs(engine); intel_engine_init_breadcrumbs(engine);
intel_engine_init_execlists(engine); intel_engine_init_execlists(engine);
intel_engine_init_hangcheck(engine); intel_engine_init_hangcheck(engine);
...@@ -637,10 +630,6 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine) ...@@ -637,10 +630,6 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine)
intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu); intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu);
return 0; return 0;
err_hwsp:
cleanup_status_page(engine);
return err;
} }
/** /**
...@@ -797,6 +786,27 @@ static int pin_context(struct i915_gem_context *ctx, ...@@ -797,6 +786,27 @@ static int pin_context(struct i915_gem_context *ctx,
return 0; return 0;
} }
void
intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
{
INIT_LIST_HEAD(&engine->active.requests);
spin_lock_init(&engine->active.lock);
lockdep_set_subclass(&engine->active.lock, subclass);
/*
* Due to an interesting quirk in lockdep's internal debug tracking,
* after setting a subclass we must ensure the lock is used. Otherwise,
* nr_unused_locks is incremented once too often.
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
local_irq_disable();
lock_map_acquire(&engine->active.lock.dep_map);
lock_map_release(&engine->active.lock.dep_map);
local_irq_enable();
#endif
}
/** /**
* intel_engines_init_common - initialize cengine state which might require hw access * intel_engines_init_common - initialize cengine state which might require hw access
* @engine: Engine to initialize. * @engine: Engine to initialize.
...@@ -860,6 +870,8 @@ int intel_engine_init_common(struct intel_engine_cs *engine) ...@@ -860,6 +870,8 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
*/ */
void intel_engine_cleanup_common(struct intel_engine_cs *engine) void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{ {
GEM_BUG_ON(!list_empty(&engine->active.requests));
cleanup_status_page(engine); cleanup_status_page(engine);
intel_engine_fini_breadcrumbs(engine); intel_engine_fini_breadcrumbs(engine);
...@@ -874,8 +886,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) ...@@ -874,8 +886,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
intel_context_unpin(engine->kernel_context); intel_context_unpin(engine->kernel_context);
GEM_BUG_ON(!llist_empty(&engine->barrier_tasks)); GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
i915_timeline_fini(&engine->timeline);
intel_wa_list_free(&engine->ctx_wa_list); intel_wa_list_free(&engine->ctx_wa_list);
intel_wa_list_free(&engine->wa_list); intel_wa_list_free(&engine->wa_list);
intel_wa_list_free(&engine->whitelist); intel_wa_list_free(&engine->whitelist);
...@@ -1482,16 +1492,6 @@ void intel_engine_dump(struct intel_engine_cs *engine, ...@@ -1482,16 +1492,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
drm_printf(m, "\tRequests:\n"); drm_printf(m, "\tRequests:\n");
rq = list_first_entry(&engine->timeline.requests,
struct i915_request, link);
if (&rq->link != &engine->timeline.requests)
print_request(m, rq, "\t\tfirst ");
rq = list_last_entry(&engine->timeline.requests,
struct i915_request, link);
if (&rq->link != &engine->timeline.requests)
print_request(m, rq, "\t\tlast ");
rq = intel_engine_find_active_request(engine); rq = intel_engine_find_active_request(engine);
if (rq) { if (rq) {
print_request(m, rq, "\t\tactive "); print_request(m, rq, "\t\tactive ");
...@@ -1572,7 +1572,7 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine) ...@@ -1572,7 +1572,7 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
if (!intel_engine_supports_stats(engine)) if (!intel_engine_supports_stats(engine))
return -ENODEV; return -ENODEV;
spin_lock_irqsave(&engine->timeline.lock, flags); spin_lock_irqsave(&engine->active.lock, flags);
write_seqlock(&engine->stats.lock); write_seqlock(&engine->stats.lock);
if (unlikely(engine->stats.enabled == ~0)) { if (unlikely(engine->stats.enabled == ~0)) {
...@@ -1598,7 +1598,7 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine) ...@@ -1598,7 +1598,7 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
unlock: unlock:
write_sequnlock(&engine->stats.lock); write_sequnlock(&engine->stats.lock);
spin_unlock_irqrestore(&engine->timeline.lock, flags); spin_unlock_irqrestore(&engine->active.lock, flags);
return err; return err;
} }
...@@ -1683,22 +1683,22 @@ intel_engine_find_active_request(struct intel_engine_cs *engine) ...@@ -1683,22 +1683,22 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
* At all other times, we must assume the GPU is still running, but * At all other times, we must assume the GPU is still running, but
* we only care about the snapshot of this moment. * we only care about the snapshot of this moment.
*/ */
spin_lock_irqsave(&engine->timeline.lock, flags); spin_lock_irqsave(&engine->active.lock, flags);
list_for_each_entry(request, &engine->timeline.requests, link) { list_for_each_entry(request, &engine->active.requests, sched.link) {
if (i915_request_completed(request)) if (i915_request_completed(request))
continue; continue;
if (!i915_request_started(request)) if (!i915_request_started(request))
break; continue;
/* More than one preemptible request may match! */ /* More than one preemptible request may match! */
if (!match_ring(request)) if (!match_ring(request))
break; continue;
active = request; active = request;
break; break;
} }
spin_unlock_irqrestore(&engine->timeline.lock, flags); spin_unlock_irqrestore(&engine->active.lock, flags);
return active; return active;
} }
......
...@@ -288,7 +288,11 @@ struct intel_engine_cs { ...@@ -288,7 +288,11 @@ struct intel_engine_cs {
struct intel_ring *buffer; struct intel_ring *buffer;
struct i915_timeline timeline; struct {
spinlock_t lock;
struct list_head requests;
} active;
struct llist_head barrier_tasks; struct llist_head barrier_tasks;
struct intel_context *kernel_context; /* pinned */ struct intel_context *kernel_context; /* pinned */
......
This diff is collapsed.
...@@ -49,12 +49,12 @@ static void engine_skip_context(struct i915_request *rq) ...@@ -49,12 +49,12 @@ static void engine_skip_context(struct i915_request *rq)
struct intel_engine_cs *engine = rq->engine; struct intel_engine_cs *engine = rq->engine;
struct i915_gem_context *hung_ctx = rq->gem_context; struct i915_gem_context *hung_ctx = rq->gem_context;
lockdep_assert_held(&engine->timeline.lock); lockdep_assert_held(&engine->active.lock);
if (!i915_request_is_active(rq)) if (!i915_request_is_active(rq))
return; return;
list_for_each_entry_continue(rq, &engine->timeline.requests, link) list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
if (rq->gem_context == hung_ctx) if (rq->gem_context == hung_ctx)
i915_request_skip(rq, -EIO); i915_request_skip(rq, -EIO);
} }
...@@ -130,7 +130,7 @@ void i915_reset_request(struct i915_request *rq, bool guilty) ...@@ -130,7 +130,7 @@ void i915_reset_request(struct i915_request *rq, bool guilty)
rq->fence.seqno, rq->fence.seqno,
yesno(guilty)); yesno(guilty));
lockdep_assert_held(&rq->engine->timeline.lock); lockdep_assert_held(&rq->engine->active.lock);
GEM_BUG_ON(i915_request_completed(rq)); GEM_BUG_ON(i915_request_completed(rq));
if (guilty) { if (guilty) {
...@@ -785,10 +785,10 @@ static void nop_submit_request(struct i915_request *request) ...@@ -785,10 +785,10 @@ static void nop_submit_request(struct i915_request *request)
engine->name, request->fence.context, request->fence.seqno); engine->name, request->fence.context, request->fence.seqno);
dma_fence_set_error(&request->fence, -EIO); dma_fence_set_error(&request->fence, -EIO);
spin_lock_irqsave(&engine->timeline.lock, flags); spin_lock_irqsave(&engine->active.lock, flags);
__i915_request_submit(request); __i915_request_submit(request);
i915_request_mark_complete(request); i915_request_mark_complete(request);
spin_unlock_irqrestore(&engine->timeline.lock, flags); spin_unlock_irqrestore(&engine->active.lock, flags);
intel_engine_queue_breadcrumbs(engine); intel_engine_queue_breadcrumbs(engine);
} }
......
...@@ -730,14 +730,13 @@ static void reset_prepare(struct intel_engine_cs *engine) ...@@ -730,14 +730,13 @@ static void reset_prepare(struct intel_engine_cs *engine)
static void reset_ring(struct intel_engine_cs *engine, bool stalled) static void reset_ring(struct intel_engine_cs *engine, bool stalled)
{ {
struct i915_timeline *tl = &engine->timeline;
struct i915_request *pos, *rq; struct i915_request *pos, *rq;
unsigned long flags; unsigned long flags;
u32 head; u32 head;
rq = NULL; rq = NULL;
spin_lock_irqsave(&tl->lock, flags); spin_lock_irqsave(&engine->active.lock, flags);
list_for_each_entry(pos, &tl->requests, link) { list_for_each_entry(pos, &engine->active.requests, sched.link) {
if (!i915_request_completed(pos)) { if (!i915_request_completed(pos)) {
rq = pos; rq = pos;
break; break;
...@@ -791,7 +790,7 @@ static void reset_ring(struct intel_engine_cs *engine, bool stalled) ...@@ -791,7 +790,7 @@ static void reset_ring(struct intel_engine_cs *engine, bool stalled)
} }
engine->buffer->head = intel_ring_wrap(engine->buffer, head); engine->buffer->head = intel_ring_wrap(engine->buffer, head);
spin_unlock_irqrestore(&tl->lock, flags); spin_unlock_irqrestore(&engine->active.lock, flags);
} }
static void reset_finish(struct intel_engine_cs *engine) static void reset_finish(struct intel_engine_cs *engine)
...@@ -877,10 +876,10 @@ static void cancel_requests(struct intel_engine_cs *engine) ...@@ -877,10 +876,10 @@ static void cancel_requests(struct intel_engine_cs *engine)
struct i915_request *request; struct i915_request *request;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&engine->timeline.lock, flags); spin_lock_irqsave(&engine->active.lock, flags);
/* Mark all submitted requests as skipped. */ /* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->timeline.requests, link) { list_for_each_entry(request, &engine->active.requests, sched.link) {
if (!i915_request_signaled(request)) if (!i915_request_signaled(request))
dma_fence_set_error(&request->fence, -EIO); dma_fence_set_error(&request->fence, -EIO);
...@@ -889,7 +888,7 @@ static void cancel_requests(struct intel_engine_cs *engine) ...@@ -889,7 +888,7 @@ static void cancel_requests(struct intel_engine_cs *engine)
/* Remaining _unready_ requests will be nop'ed when submitted */ /* Remaining _unready_ requests will be nop'ed when submitted */
spin_unlock_irqrestore(&engine->timeline.lock, flags); spin_unlock_irqrestore(&engine->active.lock, flags);
} }
static void i9xx_submit_request(struct i915_request *request) static void i9xx_submit_request(struct i915_request *request)
...@@ -1267,8 +1266,6 @@ intel_engine_create_ring(struct intel_engine_cs *engine, ...@@ -1267,8 +1266,6 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
GEM_BUG_ON(!is_power_of_2(size)); GEM_BUG_ON(!is_power_of_2(size));
GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES); GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
GEM_BUG_ON(timeline == &engine->timeline);
lockdep_assert_held(&engine->i915->drm.struct_mutex);
ring = kzalloc(sizeof(*ring), GFP_KERNEL); ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring) if (!ring)
......
...@@ -229,17 +229,17 @@ static void mock_cancel_requests(struct intel_engine_cs *engine) ...@@ -229,17 +229,17 @@ static void mock_cancel_requests(struct intel_engine_cs *engine)
struct i915_request *request; struct i915_request *request;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&engine->timeline.lock, flags); spin_lock_irqsave(&engine->active.lock, flags);
/* Mark all submitted requests as skipped. */ /* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->timeline.requests, sched.link) { list_for_each_entry(request, &engine->active.requests, sched.link) {
if (!i915_request_signaled(request)) if (!i915_request_signaled(request))
dma_fence_set_error(&request->fence, -EIO); dma_fence_set_error(&request->fence, -EIO);
i915_request_mark_complete(request); i915_request_mark_complete(request);
} }
spin_unlock_irqrestore(&engine->timeline.lock, flags); spin_unlock_irqrestore(&engine->active.lock, flags);
} }
struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
...@@ -285,28 +285,23 @@ int mock_engine_init(struct intel_engine_cs *engine) ...@@ -285,28 +285,23 @@ int mock_engine_init(struct intel_engine_cs *engine)
struct drm_i915_private *i915 = engine->i915; struct drm_i915_private *i915 = engine->i915;
int err; int err;
intel_engine_init_active(engine, ENGINE_MOCK);
intel_engine_init_breadcrumbs(engine); intel_engine_init_breadcrumbs(engine);
intel_engine_init_execlists(engine); intel_engine_init_execlists(engine);
intel_engine_init__pm(engine); intel_engine_init__pm(engine);
if (i915_timeline_init(i915, &engine->timeline, NULL))
goto err_breadcrumbs;
i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE);
engine->kernel_context = engine->kernel_context =
i915_gem_context_get_engine(i915->kernel_context, engine->id); i915_gem_context_get_engine(i915->kernel_context, engine->id);
if (IS_ERR(engine->kernel_context)) if (IS_ERR(engine->kernel_context))
goto err_timeline; goto err_breadcrumbs;
err = intel_context_pin(engine->kernel_context); err = intel_context_pin(engine->kernel_context);
intel_context_put(engine->kernel_context); intel_context_put(engine->kernel_context);
if (err) if (err)
goto err_timeline; goto err_breadcrumbs;
return 0; return 0;
err_timeline:
i915_timeline_fini(&engine->timeline);
err_breadcrumbs: err_breadcrumbs:
intel_engine_fini_breadcrumbs(engine); intel_engine_fini_breadcrumbs(engine);
return -ENOMEM; return -ENOMEM;
...@@ -340,7 +335,6 @@ void mock_engine_free(struct intel_engine_cs *engine) ...@@ -340,7 +335,6 @@ void mock_engine_free(struct intel_engine_cs *engine)
intel_context_unpin(engine->kernel_context); intel_context_unpin(engine->kernel_context);
intel_engine_fini_breadcrumbs(engine); intel_engine_fini_breadcrumbs(engine);
i915_timeline_fini(&engine->timeline);
kfree(engine); kfree(engine);
} }
...@@ -1275,7 +1275,7 @@ static void engine_record_requests(struct intel_engine_cs *engine, ...@@ -1275,7 +1275,7 @@ static void engine_record_requests(struct intel_engine_cs *engine,
count = 0; count = 0;
request = first; request = first;
list_for_each_entry_from(request, &engine->timeline.requests, link) list_for_each_entry_from(request, &engine->active.requests, sched.link)
count++; count++;
if (!count) if (!count)
return; return;
...@@ -1288,7 +1288,8 @@ static void engine_record_requests(struct intel_engine_cs *engine, ...@@ -1288,7 +1288,8 @@ static void engine_record_requests(struct intel_engine_cs *engine,
count = 0; count = 0;
request = first; request = first;
list_for_each_entry_from(request, &engine->timeline.requests, link) { list_for_each_entry_from(request,
&engine->active.requests, sched.link) {
if (count >= ee->num_requests) { if (count >= ee->num_requests) {
/* /*
* If the ring request list was changed in * If the ring request list was changed in
......
...@@ -232,9 +232,9 @@ static bool i915_request_retire(struct i915_request *rq) ...@@ -232,9 +232,9 @@ static bool i915_request_retire(struct i915_request *rq)
local_irq_disable(); local_irq_disable();
spin_lock(&rq->engine->timeline.lock); spin_lock(&rq->engine->active.lock);
list_del(&rq->link); list_del(&rq->sched.link);
spin_unlock(&rq->engine->timeline.lock); spin_unlock(&rq->engine->active.lock);
spin_lock(&rq->lock); spin_lock(&rq->lock);
i915_request_mark_complete(rq); i915_request_mark_complete(rq);
...@@ -254,6 +254,7 @@ static bool i915_request_retire(struct i915_request *rq) ...@@ -254,6 +254,7 @@ static bool i915_request_retire(struct i915_request *rq)
intel_context_unpin(rq->hw_context); intel_context_unpin(rq->hw_context);
i915_request_remove_from_client(rq); i915_request_remove_from_client(rq);
list_del(&rq->link);
free_capture_list(rq); free_capture_list(rq);
i915_sched_node_fini(&rq->sched); i915_sched_node_fini(&rq->sched);
...@@ -373,28 +374,17 @@ __i915_request_await_execution(struct i915_request *rq, ...@@ -373,28 +374,17 @@ __i915_request_await_execution(struct i915_request *rq,
return 0; return 0;
} }
static void move_to_timeline(struct i915_request *request,
struct i915_timeline *timeline)
{
GEM_BUG_ON(request->timeline == &request->engine->timeline);
lockdep_assert_held(&request->engine->timeline.lock);
spin_lock(&request->timeline->lock);
list_move_tail(&request->link, &timeline->requests);
spin_unlock(&request->timeline->lock);
}
void __i915_request_submit(struct i915_request *request) void __i915_request_submit(struct i915_request *request)
{ {
struct intel_engine_cs *engine = request->engine; struct intel_engine_cs *engine = request->engine;
GEM_TRACE("%s fence %llx:%lld -> current %d\n", GEM_TRACE("%s fence %llx:%lld, current %d\n",
engine->name, engine->name,
request->fence.context, request->fence.seqno, request->fence.context, request->fence.seqno,
hwsp_seqno(request)); hwsp_seqno(request));
GEM_BUG_ON(!irqs_disabled()); GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->timeline.lock); lockdep_assert_held(&engine->active.lock);
if (i915_gem_context_is_banned(request->gem_context)) if (i915_gem_context_is_banned(request->gem_context))
i915_request_skip(request, -EIO); i915_request_skip(request, -EIO);
...@@ -422,6 +412,8 @@ void __i915_request_submit(struct i915_request *request) ...@@ -422,6 +412,8 @@ void __i915_request_submit(struct i915_request *request)
/* We may be recursing from the signal callback of another i915 fence */ /* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
list_move_tail(&request->sched.link, &engine->active.requests);
GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)); GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
...@@ -437,9 +429,6 @@ void __i915_request_submit(struct i915_request *request) ...@@ -437,9 +429,6 @@ void __i915_request_submit(struct i915_request *request)
engine->emit_fini_breadcrumb(request, engine->emit_fini_breadcrumb(request,
request->ring->vaddr + request->postfix); request->ring->vaddr + request->postfix);
/* Transfer from per-context onto the global per-engine timeline */
move_to_timeline(request, &engine->timeline);
engine->serial++; engine->serial++;
trace_i915_request_execute(request); trace_i915_request_execute(request);
...@@ -451,11 +440,11 @@ void i915_request_submit(struct i915_request *request) ...@@ -451,11 +440,11 @@ void i915_request_submit(struct i915_request *request)
unsigned long flags; unsigned long flags;
/* Will be called from irq-context when using foreign fences. */ /* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&engine->timeline.lock, flags); spin_lock_irqsave(&engine->active.lock, flags);
__i915_request_submit(request); __i915_request_submit(request);
spin_unlock_irqrestore(&engine->timeline.lock, flags); spin_unlock_irqrestore(&engine->active.lock, flags);
} }
void __i915_request_unsubmit(struct i915_request *request) void __i915_request_unsubmit(struct i915_request *request)
...@@ -468,7 +457,7 @@ void __i915_request_unsubmit(struct i915_request *request) ...@@ -468,7 +457,7 @@ void __i915_request_unsubmit(struct i915_request *request)
hwsp_seqno(request)); hwsp_seqno(request));
GEM_BUG_ON(!irqs_disabled()); GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->timeline.lock); lockdep_assert_held(&engine->active.lock);
/* /*
* Only unwind in reverse order, required so that the per-context list * Only unwind in reverse order, required so that the per-context list
...@@ -486,9 +475,6 @@ void __i915_request_unsubmit(struct i915_request *request) ...@@ -486,9 +475,6 @@ void __i915_request_unsubmit(struct i915_request *request)
spin_unlock(&request->lock); spin_unlock(&request->lock);
/* Transfer back from the global per-engine timeline to per-context */
move_to_timeline(request, request->timeline);
/* We've already spun, don't charge on resubmitting. */ /* We've already spun, don't charge on resubmitting. */
if (request->sched.semaphores && i915_request_started(request)) { if (request->sched.semaphores && i915_request_started(request)) {
request->sched.attr.priority |= I915_PRIORITY_NOSEMAPHORE; request->sched.attr.priority |= I915_PRIORITY_NOSEMAPHORE;
...@@ -510,11 +496,11 @@ void i915_request_unsubmit(struct i915_request *request) ...@@ -510,11 +496,11 @@ void i915_request_unsubmit(struct i915_request *request)
unsigned long flags; unsigned long flags;
/* Will be called from irq-context when using foreign fences. */ /* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&engine->timeline.lock, flags); spin_lock_irqsave(&engine->active.lock, flags);
__i915_request_unsubmit(request); __i915_request_unsubmit(request);
spin_unlock_irqrestore(&engine->timeline.lock, flags); spin_unlock_irqrestore(&engine->active.lock, flags);
} }
static int __i915_sw_fence_call static int __i915_sw_fence_call
...@@ -669,7 +655,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) ...@@ -669,7 +655,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->engine = ce->engine; rq->engine = ce->engine;
rq->ring = ce->ring; rq->ring = ce->ring;
rq->timeline = tl; rq->timeline = tl;
GEM_BUG_ON(rq->timeline == &ce->engine->timeline);
rq->hwsp_seqno = tl->hwsp_seqno; rq->hwsp_seqno = tl->hwsp_seqno;
rq->hwsp_cacheline = tl->hwsp_cacheline; rq->hwsp_cacheline = tl->hwsp_cacheline;
rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */ rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
...@@ -1136,9 +1121,7 @@ __i915_request_add_to_timeline(struct i915_request *rq) ...@@ -1136,9 +1121,7 @@ __i915_request_add_to_timeline(struct i915_request *rq)
0); 0);
} }
spin_lock_irq(&timeline->lock);
list_add_tail(&rq->link, &timeline->requests); list_add_tail(&rq->link, &timeline->requests);
spin_unlock_irq(&timeline->lock);
/* /*
* Make sure that no request gazumped us - if it was allocated after * Make sure that no request gazumped us - if it was allocated after
......
...@@ -217,7 +217,7 @@ struct i915_request { ...@@ -217,7 +217,7 @@ struct i915_request {
bool waitboost; bool waitboost;
/** engine->request_list entry for this request */ /** timeline->request entry for this request */
struct list_head link; struct list_head link;
/** ring->request_list entry for this request */ /** ring->request_list entry for this request */
......
...@@ -77,7 +77,7 @@ i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio) ...@@ -77,7 +77,7 @@ i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
bool first = true; bool first = true;
int idx, i; int idx, i;
lockdep_assert_held(&engine->timeline.lock); lockdep_assert_held(&engine->active.lock);
assert_priolists(execlists); assert_priolists(execlists);
/* buckets sorted from highest [in slot 0] to lowest priority */ /* buckets sorted from highest [in slot 0] to lowest priority */
...@@ -162,9 +162,9 @@ sched_lock_engine(const struct i915_sched_node *node, ...@@ -162,9 +162,9 @@ sched_lock_engine(const struct i915_sched_node *node,
* check that the rq still belongs to the newly locked engine. * check that the rq still belongs to the newly locked engine.
*/ */
while (locked != (engine = READ_ONCE(rq->engine))) { while (locked != (engine = READ_ONCE(rq->engine))) {
spin_unlock(&locked->timeline.lock); spin_unlock(&locked->active.lock);
memset(cache, 0, sizeof(*cache)); memset(cache, 0, sizeof(*cache));
spin_lock(&engine->timeline.lock); spin_lock(&engine->active.lock);
locked = engine; locked = engine;
} }
...@@ -189,7 +189,7 @@ static void kick_submission(struct intel_engine_cs *engine, int prio) ...@@ -189,7 +189,7 @@ static void kick_submission(struct intel_engine_cs *engine, int prio)
* tasklet, i.e. we have not change the priority queue * tasklet, i.e. we have not change the priority queue
* sufficiently to oust the running context. * sufficiently to oust the running context.
*/ */
if (inflight && !i915_scheduler_need_preempt(prio, rq_prio(inflight))) if (!inflight || !i915_scheduler_need_preempt(prio, rq_prio(inflight)))
return; return;
tasklet_hi_schedule(&engine->execlists.tasklet); tasklet_hi_schedule(&engine->execlists.tasklet);
...@@ -278,7 +278,7 @@ static void __i915_schedule(struct i915_sched_node *node, ...@@ -278,7 +278,7 @@ static void __i915_schedule(struct i915_sched_node *node,
memset(&cache, 0, sizeof(cache)); memset(&cache, 0, sizeof(cache));
engine = node_to_request(node)->engine; engine = node_to_request(node)->engine;
spin_lock(&engine->timeline.lock); spin_lock(&engine->active.lock);
/* Fifo and depth-first replacement ensure our deps execute before us */ /* Fifo and depth-first replacement ensure our deps execute before us */
engine = sched_lock_engine(node, engine, &cache); engine = sched_lock_engine(node, engine, &cache);
...@@ -287,7 +287,7 @@ static void __i915_schedule(struct i915_sched_node *node, ...@@ -287,7 +287,7 @@ static void __i915_schedule(struct i915_sched_node *node,
node = dep->signaler; node = dep->signaler;
engine = sched_lock_engine(node, engine, &cache); engine = sched_lock_engine(node, engine, &cache);
lockdep_assert_held(&engine->timeline.lock); lockdep_assert_held(&engine->active.lock);
/* Recheck after acquiring the engine->timeline.lock */ /* Recheck after acquiring the engine->timeline.lock */
if (prio <= node->attr.priority || node_signaled(node)) if (prio <= node->attr.priority || node_signaled(node))
...@@ -296,14 +296,8 @@ static void __i915_schedule(struct i915_sched_node *node, ...@@ -296,14 +296,8 @@ static void __i915_schedule(struct i915_sched_node *node,
GEM_BUG_ON(node_to_request(node)->engine != engine); GEM_BUG_ON(node_to_request(node)->engine != engine);
node->attr.priority = prio; node->attr.priority = prio;
if (!list_empty(&node->link)) {
GEM_BUG_ON(intel_engine_is_virtual(engine)); if (list_empty(&node->link)) {
if (!cache.priolist)
cache.priolist =
i915_sched_lookup_priolist(engine,
prio);
list_move_tail(&node->link, cache.priolist);
} else {
/* /*
* If the request is not in the priolist queue because * If the request is not in the priolist queue because
* it is not yet runnable, then it doesn't contribute * it is not yet runnable, then it doesn't contribute
...@@ -312,8 +306,16 @@ static void __i915_schedule(struct i915_sched_node *node, ...@@ -312,8 +306,16 @@ static void __i915_schedule(struct i915_sched_node *node,
* queue; but in that case we may still need to reorder * queue; but in that case we may still need to reorder
* the inflight requests. * the inflight requests.
*/ */
if (!i915_sw_fence_done(&node_to_request(node)->submit)) continue;
continue; }
if (!intel_engine_is_virtual(engine) &&
!i915_request_is_active(node_to_request(node))) {
if (!cache.priolist)
cache.priolist =
i915_sched_lookup_priolist(engine,
prio);
list_move_tail(&node->link, cache.priolist);
} }
if (prio <= engine->execlists.queue_priority_hint) if (prio <= engine->execlists.queue_priority_hint)
...@@ -325,7 +327,7 @@ static void __i915_schedule(struct i915_sched_node *node, ...@@ -325,7 +327,7 @@ static void __i915_schedule(struct i915_sched_node *node,
kick_submission(engine, prio); kick_submission(engine, prio);
} }
spin_unlock(&engine->timeline.lock); spin_unlock(&engine->active.lock);
} }
void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr) void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
...@@ -439,8 +441,6 @@ void i915_sched_node_fini(struct i915_sched_node *node) ...@@ -439,8 +441,6 @@ void i915_sched_node_fini(struct i915_sched_node *node)
{ {
struct i915_dependency *dep, *tmp; struct i915_dependency *dep, *tmp;
GEM_BUG_ON(!list_empty(&node->link));
spin_lock_irq(&schedule_lock); spin_lock_irq(&schedule_lock);
/* /*
......
...@@ -251,7 +251,6 @@ int i915_timeline_init(struct drm_i915_private *i915, ...@@ -251,7 +251,6 @@ int i915_timeline_init(struct drm_i915_private *i915,
timeline->fence_context = dma_fence_context_alloc(1); timeline->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&timeline->lock);
mutex_init(&timeline->mutex); mutex_init(&timeline->mutex);
INIT_ACTIVE_REQUEST(&timeline->last_request); INIT_ACTIVE_REQUEST(&timeline->last_request);
......
...@@ -36,25 +36,6 @@ int i915_timeline_init(struct drm_i915_private *i915, ...@@ -36,25 +36,6 @@ int i915_timeline_init(struct drm_i915_private *i915,
struct i915_vma *hwsp); struct i915_vma *hwsp);
void i915_timeline_fini(struct i915_timeline *tl); void i915_timeline_fini(struct i915_timeline *tl);
static inline void
i915_timeline_set_subclass(struct i915_timeline *timeline,
unsigned int subclass)
{
lockdep_set_subclass(&timeline->lock, subclass);
/*
* Due to an interesting quirk in lockdep's internal debug tracking,
* after setting a subclass we must ensure the lock is used. Otherwise,
* nr_unused_locks is incremented once too often.
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
local_irq_disable();
lock_map_acquire(&timeline->lock.dep_map);
lock_map_release(&timeline->lock.dep_map);
local_irq_enable();
#endif
}
struct i915_timeline * struct i915_timeline *
i915_timeline_create(struct drm_i915_private *i915, i915_timeline_create(struct drm_i915_private *i915,
struct i915_vma *global_hwsp); struct i915_vma *global_hwsp);
......
...@@ -23,10 +23,6 @@ struct i915_timeline { ...@@ -23,10 +23,6 @@ struct i915_timeline {
u64 fence_context; u64 fence_context;
u32 seqno; u32 seqno;
spinlock_t lock;
#define TIMELINE_CLIENT 0 /* default subclass */
#define TIMELINE_ENGINE 1
#define TIMELINE_VIRTUAL 2
struct mutex mutex; /* protects the flow of requests */ struct mutex mutex; /* protects the flow of requests */
unsigned int pin_count; unsigned int pin_count;
......
...@@ -740,7 +740,7 @@ static bool __guc_dequeue(struct intel_engine_cs *engine) ...@@ -740,7 +740,7 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
bool submit = false; bool submit = false;
struct rb_node *rb; struct rb_node *rb;
lockdep_assert_held(&engine->timeline.lock); lockdep_assert_held(&engine->active.lock);
if (port_isset(port)) { if (port_isset(port)) {
if (intel_engine_has_preemption(engine)) { if (intel_engine_has_preemption(engine)) {
...@@ -822,7 +822,7 @@ static void guc_submission_tasklet(unsigned long data) ...@@ -822,7 +822,7 @@ static void guc_submission_tasklet(unsigned long data)
struct i915_request *rq; struct i915_request *rq;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&engine->timeline.lock, flags); spin_lock_irqsave(&engine->active.lock, flags);
rq = port_request(port); rq = port_request(port);
while (rq && i915_request_completed(rq)) { while (rq && i915_request_completed(rq)) {
...@@ -847,7 +847,7 @@ static void guc_submission_tasklet(unsigned long data) ...@@ -847,7 +847,7 @@ static void guc_submission_tasklet(unsigned long data)
if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT)) if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
guc_dequeue(engine); guc_dequeue(engine);
spin_unlock_irqrestore(&engine->timeline.lock, flags); spin_unlock_irqrestore(&engine->active.lock, flags);
} }
static void guc_reset_prepare(struct intel_engine_cs *engine) static void guc_reset_prepare(struct intel_engine_cs *engine)
...@@ -884,7 +884,7 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled) ...@@ -884,7 +884,7 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled)
struct i915_request *rq; struct i915_request *rq;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&engine->timeline.lock, flags); spin_lock_irqsave(&engine->active.lock, flags);
execlists_cancel_port_requests(execlists); execlists_cancel_port_requests(execlists);
...@@ -900,7 +900,7 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled) ...@@ -900,7 +900,7 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled)
intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled); intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled);
out_unlock: out_unlock:
spin_unlock_irqrestore(&engine->timeline.lock, flags); spin_unlock_irqrestore(&engine->active.lock, flags);
} }
static void guc_cancel_requests(struct intel_engine_cs *engine) static void guc_cancel_requests(struct intel_engine_cs *engine)
...@@ -926,13 +926,13 @@ static void guc_cancel_requests(struct intel_engine_cs *engine) ...@@ -926,13 +926,13 @@ static void guc_cancel_requests(struct intel_engine_cs *engine)
* submission's irq state, we also wish to remind ourselves that * submission's irq state, we also wish to remind ourselves that
* it is irq state.) * it is irq state.)
*/ */
spin_lock_irqsave(&engine->timeline.lock, flags); spin_lock_irqsave(&engine->active.lock, flags);
/* Cancel the requests on the HW and clear the ELSP tracker. */ /* Cancel the requests on the HW and clear the ELSP tracker. */
execlists_cancel_port_requests(execlists); execlists_cancel_port_requests(execlists);
/* Mark all executing requests as skipped. */ /* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->timeline.requests, link) { list_for_each_entry(rq, &engine->active.requests, sched.link) {
if (!i915_request_signaled(rq)) if (!i915_request_signaled(rq))
dma_fence_set_error(&rq->fence, -EIO); dma_fence_set_error(&rq->fence, -EIO);
...@@ -961,7 +961,7 @@ static void guc_cancel_requests(struct intel_engine_cs *engine) ...@@ -961,7 +961,7 @@ static void guc_cancel_requests(struct intel_engine_cs *engine)
execlists->queue = RB_ROOT_CACHED; execlists->queue = RB_ROOT_CACHED;
GEM_BUG_ON(port_isset(execlists->port)); GEM_BUG_ON(port_isset(execlists->port));
spin_unlock_irqrestore(&engine->timeline.lock, flags); spin_unlock_irqrestore(&engine->active.lock, flags);
} }
static void guc_reset_finish(struct intel_engine_cs *engine) static void guc_reset_finish(struct intel_engine_cs *engine)
......
...@@ -13,7 +13,6 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context) ...@@ -13,7 +13,6 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context)
timeline->i915 = NULL; timeline->i915 = NULL;
timeline->fence_context = context; timeline->fence_context = context;
spin_lock_init(&timeline->lock);
mutex_init(&timeline->mutex); mutex_init(&timeline->mutex);
INIT_ACTIVE_REQUEST(&timeline->last_request); INIT_ACTIVE_REQUEST(&timeline->last_request);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment