Commit 95f697eb authored by Chris Wilson's avatar Chris Wilson

drm/i915: Make context pinning part of intel_context_ops

Push the intel_context pin callback down from intel_engine_cs onto the
context itself by virtue of having a central caller for
intel_context_pin() being able to lookup the intel_context itself.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190308132522.21573-5-chris@chris-wilson.co.uk
parent c4d52feb
...@@ -126,6 +126,40 @@ intel_context_instance(struct i915_gem_context *ctx, ...@@ -126,6 +126,40 @@ intel_context_instance(struct i915_gem_context *ctx,
return pos; return pos;
} }
struct intel_context *
intel_context_pin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
struct intel_context *ce;
int err;
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
ce = intel_context_instance(ctx, engine);
if (IS_ERR(ce))
return ce;
if (unlikely(!ce->pin_count++)) {
err = ce->ops->pin(ce);
if (err)
goto err_unpin;
mutex_lock(&ctx->mutex);
list_add(&ce->active_link, &ctx->active_engines);
mutex_unlock(&ctx->mutex);
i915_gem_context_get(ctx);
GEM_BUG_ON(ce->gem_context != ctx);
}
GEM_BUG_ON(!ce->pin_count); /* no overflow! */
return ce;
err_unpin:
ce->pin_count = 0;
return ERR_PTR(err);
}
static void intel_context_retire(struct i915_active_request *active, static void intel_context_retire(struct i915_active_request *active,
struct i915_request *rq) struct i915_request *rq)
{ {
......
...@@ -48,11 +48,8 @@ __intel_context_insert(struct i915_gem_context *ctx, ...@@ -48,11 +48,8 @@ __intel_context_insert(struct i915_gem_context *ctx,
void void
__intel_context_remove(struct intel_context *ce); __intel_context_remove(struct intel_context *ce);
static inline struct intel_context * struct intel_context *
intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine) intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
{
return engine->context_pin(engine, ctx);
}
static inline void __intel_context_pin(struct intel_context *ce) static inline void __intel_context_pin(struct intel_context *ce)
{ {
......
...@@ -19,6 +19,7 @@ struct intel_context; ...@@ -19,6 +19,7 @@ struct intel_context;
struct intel_ring; struct intel_ring;
struct intel_context_ops { struct intel_context_ops {
int (*pin)(struct intel_context *ce);
void (*unpin)(struct intel_context *ce); void (*unpin)(struct intel_context *ce);
void (*destroy)(struct intel_context *ce); void (*destroy)(struct intel_context *ce);
}; };
......
...@@ -357,8 +357,6 @@ struct intel_engine_cs { ...@@ -357,8 +357,6 @@ struct intel_engine_cs {
void (*set_default_submission)(struct intel_engine_cs *engine); void (*set_default_submission)(struct intel_engine_cs *engine);
const struct intel_context_ops *cops; const struct intel_context_ops *cops;
struct intel_context *(*context_pin)(struct intel_engine_cs *engine,
struct i915_gem_context *ctx);
int (*request_alloc)(struct i915_request *rq); int (*request_alloc)(struct i915_request *rq);
int (*init_context)(struct i915_request *rq); int (*init_context)(struct i915_request *rq);
......
...@@ -166,9 +166,8 @@ ...@@ -166,9 +166,8 @@
#define ACTIVE_PRIORITY (I915_PRIORITY_NEWCLIENT | I915_PRIORITY_NOSEMAPHORE) #define ACTIVE_PRIORITY (I915_PRIORITY_NEWCLIENT | I915_PRIORITY_NOSEMAPHORE)
static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, static int execlists_context_deferred_alloc(struct intel_context *ce,
struct intel_engine_cs *engine, struct intel_engine_cs *engine);
struct intel_context *ce);
static void execlists_init_reg_state(u32 *reg_state, static void execlists_init_reg_state(u32 *reg_state,
struct intel_context *ce, struct intel_context *ce,
struct intel_engine_cs *engine, struct intel_engine_cs *engine,
...@@ -330,11 +329,10 @@ assert_priority_queue(const struct i915_request *prev, ...@@ -330,11 +329,10 @@ assert_priority_queue(const struct i915_request *prev,
* engine info, SW context ID and SW counter need to form a unique number * engine info, SW context ID and SW counter need to form a unique number
* (Context ID) per lrc. * (Context ID) per lrc.
*/ */
static void static u64
intel_lr_context_descriptor_update(struct i915_gem_context *ctx, lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
struct intel_engine_cs *engine,
struct intel_context *ce)
{ {
struct i915_gem_context *ctx = ce->gem_context;
u64 desc; u64 desc;
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH))); BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
...@@ -352,7 +350,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx, ...@@ -352,7 +350,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
* Consider updating oa_get_render_ctx_id in i915_perf.c when changing * Consider updating oa_get_render_ctx_id in i915_perf.c when changing
* anything below. * anything below.
*/ */
if (INTEL_GEN(ctx->i915) >= 11) { if (INTEL_GEN(engine->i915) >= 11) {
GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH)); GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH));
desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT; desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT;
/* bits 37-47 */ /* bits 37-47 */
...@@ -369,7 +367,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx, ...@@ -369,7 +367,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */ desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
} }
ce->lrc_desc = desc; return desc;
} }
static void unwind_wa_tail(struct i915_request *rq) static void unwind_wa_tail(struct i915_request *rq)
...@@ -1290,7 +1288,7 @@ static void execlists_context_unpin(struct intel_context *ce) ...@@ -1290,7 +1288,7 @@ static void execlists_context_unpin(struct intel_context *ce)
i915_gem_context_put(ce->gem_context); i915_gem_context_put(ce->gem_context);
} }
static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma) static int __context_pin(struct i915_vma *vma)
{ {
unsigned int flags; unsigned int flags;
int err; int err;
...@@ -1313,11 +1311,14 @@ static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma) ...@@ -1313,11 +1311,14 @@ static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
} }
static void static void
__execlists_update_reg_state(struct intel_engine_cs *engine, __execlists_update_reg_state(struct intel_context *ce,
struct intel_context *ce) struct intel_engine_cs *engine)
{ {
u32 *regs = ce->lrc_reg_state;
struct intel_ring *ring = ce->ring; struct intel_ring *ring = ce->ring;
u32 *regs = ce->lrc_reg_state;
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(ring->vma); regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(ring->vma);
regs[CTX_RING_HEAD + 1] = ring->head; regs[CTX_RING_HEAD + 1] = ring->head;
...@@ -1329,25 +1330,26 @@ __execlists_update_reg_state(struct intel_engine_cs *engine, ...@@ -1329,25 +1330,26 @@ __execlists_update_reg_state(struct intel_engine_cs *engine,
gen8_make_rpcs(engine->i915, &ce->sseu); gen8_make_rpcs(engine->i915, &ce->sseu);
} }
static struct intel_context * static int
__execlists_context_pin(struct intel_engine_cs *engine, __execlists_context_pin(struct intel_context *ce,
struct i915_gem_context *ctx, struct intel_engine_cs *engine)
struct intel_context *ce)
{ {
void *vaddr; void *vaddr;
int ret; int ret;
ret = execlists_context_deferred_alloc(ctx, engine, ce); GEM_BUG_ON(!ce->gem_context->ppgtt);
ret = execlists_context_deferred_alloc(ce, engine);
if (ret) if (ret)
goto err; goto err;
GEM_BUG_ON(!ce->state); GEM_BUG_ON(!ce->state);
ret = __context_pin(ctx, ce->state); ret = __context_pin(ce->state);
if (ret) if (ret)
goto err; goto err;
vaddr = i915_gem_object_pin_map(ce->state->obj, vaddr = i915_gem_object_pin_map(ce->state->obj,
i915_coherent_map_type(ctx->i915) | i915_coherent_map_type(engine->i915) |
I915_MAP_OVERRIDE); I915_MAP_OVERRIDE);
if (IS_ERR(vaddr)) { if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr); ret = PTR_ERR(vaddr);
...@@ -1358,26 +1360,16 @@ __execlists_context_pin(struct intel_engine_cs *engine, ...@@ -1358,26 +1360,16 @@ __execlists_context_pin(struct intel_engine_cs *engine,
if (ret) if (ret)
goto unpin_map; goto unpin_map;
ret = i915_gem_context_pin_hw_id(ctx); ret = i915_gem_context_pin_hw_id(ce->gem_context);
if (ret) if (ret)
goto unpin_ring; goto unpin_ring;
intel_lr_context_descriptor_update(ctx, engine, ce); ce->lrc_desc = lrc_descriptor(ce, engine);
GEM_BUG_ON(!intel_ring_offset_valid(ce->ring, ce->ring->head));
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
__execlists_update_reg_state(ce, engine);
__execlists_update_reg_state(engine, ce);
ce->state->obj->pin_global++; ce->state->obj->pin_global++;
return 0;
mutex_lock(&ctx->mutex);
list_add(&ce->active_link, &ctx->active_engines);
mutex_unlock(&ctx->mutex);
i915_gem_context_get(ctx);
return ce;
unpin_ring: unpin_ring:
intel_ring_unpin(ce->ring); intel_ring_unpin(ce->ring);
...@@ -1386,31 +1378,16 @@ __execlists_context_pin(struct intel_engine_cs *engine, ...@@ -1386,31 +1378,16 @@ __execlists_context_pin(struct intel_engine_cs *engine,
unpin_vma: unpin_vma:
__i915_vma_unpin(ce->state); __i915_vma_unpin(ce->state);
err: err:
ce->pin_count = 0; return ret;
return ERR_PTR(ret);
} }
static struct intel_context * static int execlists_context_pin(struct intel_context *ce)
execlists_context_pin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx)
{ {
struct intel_context *ce; return __execlists_context_pin(ce, ce->engine);
ce = intel_context_instance(ctx, engine);
if (IS_ERR(ce))
return ce;
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
GEM_BUG_ON(!ctx->ppgtt);
if (likely(ce->pin_count++))
return ce;
GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
return __execlists_context_pin(engine, ctx, ce);
} }
static const struct intel_context_ops execlists_context_ops = { static const struct intel_context_ops execlists_context_ops = {
.pin = execlists_context_pin,
.unpin = execlists_context_unpin, .unpin = execlists_context_unpin,
.destroy = execlists_context_destroy, .destroy = execlists_context_destroy,
}; };
...@@ -2034,7 +2011,7 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled) ...@@ -2034,7 +2011,7 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
intel_ring_update_space(rq->ring); intel_ring_update_space(rq->ring);
execlists_init_reg_state(regs, rq->hw_context, engine, rq->ring); execlists_init_reg_state(regs, rq->hw_context, engine, rq->ring);
__execlists_update_reg_state(engine, rq->hw_context); __execlists_update_reg_state(rq->hw_context, engine);
out_unlock: out_unlock:
spin_unlock_irqrestore(&engine->timeline.lock, flags); spin_unlock_irqrestore(&engine->timeline.lock, flags);
...@@ -2359,7 +2336,6 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) ...@@ -2359,7 +2336,6 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
engine->reset.finish = execlists_reset_finish; engine->reset.finish = execlists_reset_finish;
engine->cops = &execlists_context_ops; engine->cops = &execlists_context_ops;
engine->context_pin = execlists_context_pin;
engine->request_alloc = execlists_request_alloc; engine->request_alloc = execlists_request_alloc;
engine->emit_flush = gen8_emit_flush; engine->emit_flush = gen8_emit_flush;
...@@ -2836,9 +2812,13 @@ populate_lr_context(struct intel_context *ce, ...@@ -2836,9 +2812,13 @@ populate_lr_context(struct intel_context *ce,
return ret; return ret;
} }
static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, static struct i915_timeline *get_timeline(struct i915_gem_context *ctx)
struct intel_engine_cs *engine, {
struct intel_context *ce) return i915_timeline_create(ctx->i915, ctx->name, NULL);
}
static int execlists_context_deferred_alloc(struct intel_context *ce,
struct intel_engine_cs *engine)
{ {
struct drm_i915_gem_object *ctx_obj; struct drm_i915_gem_object *ctx_obj;
struct i915_vma *vma; struct i915_vma *vma;
...@@ -2858,23 +2838,25 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, ...@@ -2858,23 +2838,25 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
*/ */
context_size += LRC_HEADER_PAGES * PAGE_SIZE; context_size += LRC_HEADER_PAGES * PAGE_SIZE;
ctx_obj = i915_gem_object_create(ctx->i915, context_size); ctx_obj = i915_gem_object_create(engine->i915, context_size);
if (IS_ERR(ctx_obj)) if (IS_ERR(ctx_obj))
return PTR_ERR(ctx_obj); return PTR_ERR(ctx_obj);
vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.vm, NULL); vma = i915_vma_instance(ctx_obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
ret = PTR_ERR(vma); ret = PTR_ERR(vma);
goto error_deref_obj; goto error_deref_obj;
} }
timeline = i915_timeline_create(ctx->i915, ctx->name, NULL); timeline = get_timeline(ce->gem_context);
if (IS_ERR(timeline)) { if (IS_ERR(timeline)) {
ret = PTR_ERR(timeline); ret = PTR_ERR(timeline);
goto error_deref_obj; goto error_deref_obj;
} }
ring = intel_engine_create_ring(engine, timeline, ctx->ring_size); ring = intel_engine_create_ring(engine,
timeline,
ce->gem_context->ring_size);
i915_timeline_put(timeline); i915_timeline_put(timeline);
if (IS_ERR(ring)) { if (IS_ERR(ring)) {
ret = PTR_ERR(ring); ret = PTR_ERR(ring);
...@@ -2919,7 +2901,7 @@ void intel_lr_context_resume(struct drm_i915_private *i915) ...@@ -2919,7 +2901,7 @@ void intel_lr_context_resume(struct drm_i915_private *i915)
list_for_each_entry(ce, &ctx->active_engines, active_link) { list_for_each_entry(ce, &ctx->active_engines, active_link) {
GEM_BUG_ON(!ce->ring); GEM_BUG_ON(!ce->ring);
intel_ring_reset(ce->ring, 0); intel_ring_reset(ce->ring, 0);
__execlists_update_reg_state(ce->engine, ce); __execlists_update_reg_state(ce, ce->engine);
} }
} }
} }
......
...@@ -1509,11 +1509,9 @@ alloc_context_vma(struct intel_engine_cs *engine) ...@@ -1509,11 +1509,9 @@ alloc_context_vma(struct intel_engine_cs *engine)
return ERR_PTR(err); return ERR_PTR(err);
} }
static struct intel_context * static int ring_context_pin(struct intel_context *ce)
__ring_context_pin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx,
struct intel_context *ce)
{ {
struct intel_engine_cs *engine = ce->engine;
int err; int err;
/* One ringbuffer to rule them all */ /* One ringbuffer to rule them all */
...@@ -1524,55 +1522,29 @@ __ring_context_pin(struct intel_engine_cs *engine, ...@@ -1524,55 +1522,29 @@ __ring_context_pin(struct intel_engine_cs *engine,
struct i915_vma *vma; struct i915_vma *vma;
vma = alloc_context_vma(engine); vma = alloc_context_vma(engine);
if (IS_ERR(vma)) { if (IS_ERR(vma))
err = PTR_ERR(vma); return PTR_ERR(vma);
goto err;
}
ce->state = vma; ce->state = vma;
} }
err = __context_pin(ce); err = __context_pin(ce);
if (err) if (err)
goto err; return err;
err = __context_pin_ppgtt(ce->gem_context); err = __context_pin_ppgtt(ce->gem_context);
if (err) if (err)
goto err_unpin; goto err_unpin;
mutex_lock(&ctx->mutex); return 0;
list_add(&ce->active_link, &ctx->active_engines);
mutex_unlock(&ctx->mutex);
i915_gem_context_get(ctx);
return ce;
err_unpin: err_unpin:
__context_unpin(ce); __context_unpin(ce);
err: return err;
ce->pin_count = 0;
return ERR_PTR(err);
}
static struct intel_context *
ring_context_pin(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
{
struct intel_context *ce;
ce = intel_context_instance(ctx, engine);
if (IS_ERR(ce))
return ce;
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
if (likely(ce->pin_count++))
return ce;
GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
return __ring_context_pin(engine, ctx, ce);
} }
static const struct intel_context_ops ring_context_ops = { static const struct intel_context_ops ring_context_ops = {
.pin = ring_context_pin,
.unpin = ring_context_unpin, .unpin = ring_context_unpin,
.destroy = ring_context_destroy, .destroy = ring_context_destroy,
}; };
...@@ -2283,7 +2255,6 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, ...@@ -2283,7 +2255,6 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
engine->reset.finish = reset_finish; engine->reset.finish = reset_finish;
engine->cops = &ring_context_ops; engine->cops = &ring_context_ops;
engine->context_pin = ring_context_pin;
engine->request_alloc = ring_request_alloc; engine->request_alloc = ring_request_alloc;
/* /*
......
...@@ -137,41 +137,20 @@ static void mock_context_destroy(struct intel_context *ce) ...@@ -137,41 +137,20 @@ static void mock_context_destroy(struct intel_context *ce)
mock_ring_free(ce->ring); mock_ring_free(ce->ring);
} }
static struct intel_context * static int mock_context_pin(struct intel_context *ce)
mock_context_pin(struct intel_engine_cs *engine,
struct i915_gem_context *ctx)
{ {
struct intel_context *ce;
int err = -ENOMEM;
ce = intel_context_instance(ctx, engine);
if (IS_ERR(ce))
return ce;
if (ce->pin_count++)
return ce;
if (!ce->ring) { if (!ce->ring) {
ce->ring = mock_ring(engine); ce->ring = mock_ring(ce->engine);
if (!ce->ring) if (!ce->ring)
goto err; return -ENOMEM;
} }
mock_timeline_pin(ce->ring->timeline); mock_timeline_pin(ce->ring->timeline);
return 0;
mutex_lock(&ctx->mutex);
list_add(&ce->active_link, &ctx->active_engines);
mutex_unlock(&ctx->mutex);
i915_gem_context_get(ctx);
return ce;
err:
ce->pin_count = 0;
return ERR_PTR(err);
} }
static const struct intel_context_ops mock_context_ops = { static const struct intel_context_ops mock_context_ops = {
.pin = mock_context_pin,
.unpin = mock_context_unpin, .unpin = mock_context_unpin,
.destroy = mock_context_destroy, .destroy = mock_context_destroy,
}; };
...@@ -235,7 +214,6 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, ...@@ -235,7 +214,6 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
engine->base.status_page.addr = (void *)(engine + 1); engine->base.status_page.addr = (void *)(engine + 1);
engine->base.cops = &mock_context_ops; engine->base.cops = &mock_context_ops;
engine->base.context_pin = mock_context_pin;
engine->base.request_alloc = mock_request_alloc; engine->base.request_alloc = mock_request_alloc;
engine->base.emit_flush = mock_emit_flush; engine->base.emit_flush = mock_emit_flush;
engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb; engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment