Commit 84135022 authored by Chris Wilson's avatar Chris Wilson

drm/i915/gt: Drop mutex serialisation between context pin/unpin

The last remaining reason for serialising the pin/unpin of the
intel_context is to ensure that our preallocated wakerefs are not
consumed too early (i.e. the unpin of the previous phase does not emit
the idle barriers for this phase before we even submit). All of the
other operations within the context pin/unpin are supposed to be
atomic...  Therefore, we can reduce the serialisation to being just on
the i915_active.preallocated_barriers itself and drop the nested
pin_mutex from intel_context_unpin().
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200106114234.2529613-5-chris@chris-wilson.co.uk
parent 3fbbbef4
...@@ -86,22 +86,20 @@ int __intel_context_do_pin(struct intel_context *ce) ...@@ -86,22 +86,20 @@ int __intel_context_do_pin(struct intel_context *ce)
void intel_context_unpin(struct intel_context *ce) void intel_context_unpin(struct intel_context *ce)
{ {
if (likely(atomic_add_unless(&ce->pin_count, -1, 1))) if (!atomic_dec_and_test(&ce->pin_count))
return; return;
/* We may be called from inside intel_context_pin() to evict another */ CE_TRACE(ce, "unpin\n");
intel_context_get(ce); ce->ops->unpin(ce);
mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
if (likely(atomic_dec_and_test(&ce->pin_count))) {
CE_TRACE(ce, "retire\n");
ce->ops->unpin(ce);
intel_context_active_release(ce);
}
mutex_unlock(&ce->pin_mutex); /*
* Once released, we may asynchronously drop the active reference.
* As that may be the only reference keeping the context alive,
* take an extra now so that it is not freed before we finish
* dereferencing it.
*/
intel_context_get(ce);
intel_context_active_release(ce);
intel_context_put(ce); intel_context_put(ce);
} }
......
...@@ -605,12 +605,15 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref, ...@@ -605,12 +605,15 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
struct intel_engine_cs *engine) struct intel_engine_cs *engine)
{ {
intel_engine_mask_t tmp, mask = engine->mask; intel_engine_mask_t tmp, mask = engine->mask;
struct llist_node *pos = NULL, *next;
struct intel_gt *gt = engine->gt; struct intel_gt *gt = engine->gt;
struct llist_node *pos, *next;
int err; int err;
GEM_BUG_ON(i915_active_is_idle(ref)); GEM_BUG_ON(i915_active_is_idle(ref));
GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
/* Wait until the previous preallocation is completed */
while (!llist_empty(&ref->preallocated_barriers))
cond_resched();
/* /*
* Preallocate a node for each physical engine supporting the target * Preallocate a node for each physical engine supporting the target
...@@ -653,16 +656,24 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref, ...@@ -653,16 +656,24 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN)); GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
GEM_BUG_ON(barrier_to_engine(node) != engine); GEM_BUG_ON(barrier_to_engine(node) != engine);
llist_add(barrier_to_ll(node), &ref->preallocated_barriers); next = barrier_to_ll(node);
next->next = pos;
if (!pos)
pos = next;
intel_engine_pm_get(engine); intel_engine_pm_get(engine);
} }
GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
llist_add_batch(next, pos, &ref->preallocated_barriers);
return 0; return 0;
unwind: unwind:
llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) { while (pos) {
struct active_node *node = barrier_from_ll(pos); struct active_node *node = barrier_from_ll(pos);
pos = pos->next;
atomic_dec(&ref->count); atomic_dec(&ref->count);
intel_engine_pm_put(barrier_to_engine(node)); intel_engine_pm_put(barrier_to_engine(node));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment