Commit bf3783e5 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Use VMA as the primary object for context state

When working with contexts, we most frequently want the GGTT VMA for the
context state, first and foremost. Since the object is available via the
VMA, we need only then store the VMA.

v2: Formatting tweaks to debugfs output, restored some comments removed
in the next patch
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1471254551-25805-15-git-send-email-chris@chris-wilson.co.uk
parent f23eda8c
...@@ -354,7 +354,7 @@ static int per_file_ctx_stats(int id, void *ptr, void *data) ...@@ -354,7 +354,7 @@ static int per_file_ctx_stats(int id, void *ptr, void *data)
for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) { for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
if (ctx->engine[n].state) if (ctx->engine[n].state)
per_file_stats(0, ctx->engine[n].state, data); per_file_stats(0, ctx->engine[n].state->obj, data);
if (ctx->engine[n].ring) if (ctx->engine[n].ring)
per_file_stats(0, ctx->engine[n].ring->obj, data); per_file_stats(0, ctx->engine[n].ring->obj, data);
} }
...@@ -1977,7 +1977,7 @@ static int i915_context_status(struct seq_file *m, void *unused) ...@@ -1977,7 +1977,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
seq_printf(m, "%s: ", engine->name); seq_printf(m, "%s: ", engine->name);
seq_putc(m, ce->initialised ? 'I' : 'i'); seq_putc(m, ce->initialised ? 'I' : 'i');
if (ce->state) if (ce->state)
describe_obj(m, ce->state); describe_obj(m, ce->state->obj);
if (ce->ring) if (ce->ring)
describe_ctx_ring(m, ce->ring); describe_ctx_ring(m, ce->ring);
seq_putc(m, '\n'); seq_putc(m, '\n');
...@@ -1995,36 +1995,34 @@ static void i915_dump_lrc_obj(struct seq_file *m, ...@@ -1995,36 +1995,34 @@ static void i915_dump_lrc_obj(struct seq_file *m,
struct i915_gem_context *ctx, struct i915_gem_context *ctx,
struct intel_engine_cs *engine) struct intel_engine_cs *engine)
{ {
struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; struct i915_vma *vma = ctx->engine[engine->id].state;
struct page *page; struct page *page;
uint32_t *reg_state;
int j; int j;
unsigned long ggtt_offset = 0;
seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id); seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
if (ctx_obj == NULL) { if (!vma) {
seq_puts(m, "\tNot allocated\n"); seq_puts(m, "\tFake context\n");
return; return;
} }
if (!i915_gem_obj_ggtt_bound(ctx_obj)) if (vma->flags & I915_VMA_GLOBAL_BIND)
seq_puts(m, "\tNot bound in GGTT\n"); seq_printf(m, "\tBound in GGTT at 0x%08x\n",
else lower_32_bits(vma->node.start));
ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj);
if (i915_gem_object_get_pages(ctx_obj)) { if (i915_gem_object_get_pages(vma->obj)) {
seq_puts(m, "\tFailed to get pages for context object\n"); seq_puts(m, "\tFailed to get pages for context object\n\n");
return; return;
} }
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); page = i915_gem_object_get_page(vma->obj, LRC_STATE_PN);
if (!WARN_ON(page == NULL)) { if (page) {
reg_state = kmap_atomic(page); u32 *reg_state = kmap_atomic(page);
for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) { for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n", seq_printf(m,
ggtt_offset + 4096 + (j * 4), "\t[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
j * 4,
reg_state[j], reg_state[j + 1], reg_state[j], reg_state[j + 1],
reg_state[j + 2], reg_state[j + 3]); reg_state[j + 2], reg_state[j + 3]);
} }
......
...@@ -893,9 +893,8 @@ struct i915_gem_context { ...@@ -893,9 +893,8 @@ struct i915_gem_context {
u32 ggtt_alignment; u32 ggtt_alignment;
struct intel_context { struct intel_context {
struct drm_i915_gem_object *state; struct i915_vma *state;
struct intel_ring *ring; struct intel_ring *ring;
struct i915_vma *lrc_vma;
uint32_t *lrc_reg_state; uint32_t *lrc_reg_state;
u64 lrc_desc; u64 lrc_desc;
int pin_count; int pin_count;
......
...@@ -155,7 +155,7 @@ void i915_gem_context_free(struct kref *ctx_ref) ...@@ -155,7 +155,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
if (ce->ring) if (ce->ring)
intel_ring_free(ce->ring); intel_ring_free(ce->ring);
i915_gem_object_put(ce->state); i915_vma_put(ce->state);
} }
list_del(&ctx->link); list_del(&ctx->link);
...@@ -281,13 +281,24 @@ __create_hw_context(struct drm_device *dev, ...@@ -281,13 +281,24 @@ __create_hw_context(struct drm_device *dev,
ctx->ggtt_alignment = get_context_alignment(dev_priv); ctx->ggtt_alignment = get_context_alignment(dev_priv);
if (dev_priv->hw_context_size) { if (dev_priv->hw_context_size) {
struct drm_i915_gem_object *obj = struct drm_i915_gem_object *obj;
i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size); struct i915_vma *vma;
obj = i915_gem_alloc_context_obj(dev,
dev_priv->hw_context_size);
if (IS_ERR(obj)) { if (IS_ERR(obj)) {
ret = PTR_ERR(obj); ret = PTR_ERR(obj);
goto err_out; goto err_out;
} }
ctx->engine[RCS].state = obj;
vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
if (IS_ERR(vma)) {
i915_gem_object_put(obj);
ret = PTR_ERR(vma);
goto err_out;
}
ctx->engine[RCS].state = vma;
} }
/* Default context will never have a file_priv */ /* Default context will never have a file_priv */
...@@ -399,7 +410,7 @@ static void i915_gem_context_unpin(struct i915_gem_context *ctx, ...@@ -399,7 +410,7 @@ static void i915_gem_context_unpin(struct i915_gem_context *ctx,
struct intel_context *ce = &ctx->engine[engine->id]; struct intel_context *ce = &ctx->engine[engine->id];
if (ce->state) if (ce->state)
i915_gem_object_ggtt_unpin(ce->state); i915_vma_unpin(ce->state);
i915_gem_context_put(ctx); i915_gem_context_put(ctx);
} }
...@@ -620,9 +631,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) ...@@ -620,9 +631,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT); intel_ring_emit(ring, MI_SET_CONTEXT);
intel_ring_emit(ring, intel_ring_emit(ring, req->ctx->engine[RCS].state->node.start | flags);
i915_gem_obj_ggtt_offset(req->ctx->engine[RCS].state) |
flags);
/* /*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv * WaMiSetContext_Hang:snb,ivb,vlv
...@@ -755,6 +764,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -755,6 +764,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
struct i915_gem_context *to = req->ctx; struct i915_gem_context *to = req->ctx;
struct intel_engine_cs *engine = req->engine; struct intel_engine_cs *engine = req->engine;
struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt; struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
struct i915_vma *vma = to->engine[RCS].state;
struct i915_gem_context *from; struct i915_gem_context *from;
u32 hw_flags; u32 hw_flags;
int ret, i; int ret, i;
...@@ -763,8 +773,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -763,8 +773,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
return 0; return 0;
/* Trying to pin first makes error handling easier. */ /* Trying to pin first makes error handling easier. */
ret = i915_gem_object_ggtt_pin(to->engine[RCS].state, NULL, 0, ret = i915_vma_pin(vma, 0, to->ggtt_alignment, PIN_GLOBAL);
to->ggtt_alignment, 0);
if (ret) if (ret)
return ret; return ret;
...@@ -785,9 +794,9 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -785,9 +794,9 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
* *
* XXX: We need a real interface to do this instead of trickery. * XXX: We need a real interface to do this instead of trickery.
*/ */
ret = i915_gem_object_set_to_gtt_domain(to->engine[RCS].state, false); ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
if (ret) if (ret)
goto unpin_out; goto err;
if (needs_pd_load_pre(ppgtt, engine, to)) { if (needs_pd_load_pre(ppgtt, engine, to)) {
/* Older GENs and non render rings still want the load first, /* Older GENs and non render rings still want the load first,
...@@ -797,7 +806,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -797,7 +806,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
trace_switch_mm(engine, to); trace_switch_mm(engine, to);
ret = ppgtt->switch_mm(ppgtt, req); ret = ppgtt->switch_mm(ppgtt, req);
if (ret) if (ret)
goto unpin_out; goto err;
} }
if (!to->engine[RCS].initialised || i915_gem_context_is_default(to)) if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
...@@ -814,7 +823,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -814,7 +823,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
if (to != from || (hw_flags & MI_FORCE_RESTORE)) { if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
ret = mi_set_context(req, hw_flags); ret = mi_set_context(req, hw_flags);
if (ret) if (ret)
goto unpin_out; goto err;
} }
/* The backing object for the context is done after switching to the /* The backing object for the context is done after switching to the
...@@ -824,8 +833,6 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -824,8 +833,6 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
* MI_SET_CONTEXT instead of when the next seqno has completed. * MI_SET_CONTEXT instead of when the next seqno has completed.
*/ */
if (from != NULL) { if (from != NULL) {
struct drm_i915_gem_object *obj = from->engine[RCS].state;
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the * whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be * object dirty. The only exception is that the context must be
...@@ -833,11 +840,9 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -833,11 +840,9 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
* able to defer doing this until we know the object would be * able to defer doing this until we know the object would be
* swapped, but there is no way to do that yet. * swapped, but there is no way to do that yet.
*/ */
obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; i915_vma_move_to_active(from->engine[RCS].state, req, 0);
i915_vma_move_to_active(i915_gem_obj_to_ggtt(obj), req, 0); /* state is kept alive until the next request */
i915_vma_unpin(from->engine[RCS].state);
/* obj is kept alive until the next request by its active ref */
i915_gem_object_ggtt_unpin(obj);
i915_gem_context_put(from); i915_gem_context_put(from);
} }
engine->last_context = i915_gem_context_get(to); engine->last_context = i915_gem_context_get(to);
...@@ -882,8 +887,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -882,8 +887,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
return 0; return 0;
unpin_out: err:
i915_gem_object_ggtt_unpin(to->engine[RCS].state); i915_vma_unpin(vma);
return ret; return ret;
} }
......
...@@ -1103,9 +1103,10 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv, ...@@ -1103,9 +1103,10 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
i915_error_ggtt_object_create(dev_priv, i915_error_ggtt_object_create(dev_priv,
engine->scratch.obj); engine->scratch.obj);
ee->ctx = if (request->ctx->engine[i].state) {
i915_error_ggtt_object_create(dev_priv, ee->ctx = i915_error_ggtt_object_create(dev_priv,
request->ctx->engine[i].state); request->ctx->engine[i].state->obj);
}
if (request->pid) { if (request->pid) {
struct task_struct *task; struct task_struct *task;
......
...@@ -358,7 +358,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc, ...@@ -358,7 +358,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
lrc->context_desc = lower_32_bits(ce->lrc_desc); lrc->context_desc = lower_32_bits(ce->lrc_desc);
/* The state page is after PPHWSP */ /* The state page is after PPHWSP */
gfx_addr = i915_gem_obj_ggtt_offset(ce->state); gfx_addr = ce->state->node.start;
lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE; lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE;
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) | lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
(guc_engine_id << GUC_ELC_ENGINE_OFFSET); (guc_engine_id << GUC_ELC_ENGINE_OFFSET);
...@@ -1080,7 +1080,7 @@ int intel_guc_suspend(struct drm_device *dev) ...@@ -1080,7 +1080,7 @@ int intel_guc_suspend(struct drm_device *dev)
/* any value greater than GUC_POWER_D0 */ /* any value greater than GUC_POWER_D0 */
data[1] = GUC_POWER_D1; data[1] = GUC_POWER_D1;
/* first page is shared data with GuC */ /* first page is shared data with GuC */
data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state); data[2] = ctx->engine[RCS].state->node.start;
return host2guc_action(guc, data, ARRAY_SIZE(data)); return host2guc_action(guc, data, ARRAY_SIZE(data));
} }
...@@ -1105,7 +1105,7 @@ int intel_guc_resume(struct drm_device *dev) ...@@ -1105,7 +1105,7 @@ int intel_guc_resume(struct drm_device *dev)
data[0] = HOST2GUC_ACTION_EXIT_S_STATE; data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
data[1] = GUC_POWER_D0; data[1] = GUC_POWER_D0;
/* first page is shared data with GuC */ /* first page is shared data with GuC */
data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state); data[2] = ctx->engine[RCS].state->node.start;
return host2guc_action(guc, data, ARRAY_SIZE(data)); return host2guc_action(guc, data, ARRAY_SIZE(data));
} }
...@@ -315,7 +315,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx, ...@@ -315,7 +315,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
desc = ctx->desc_template; /* bits 3-4 */ desc = ctx->desc_template; /* bits 3-4 */
desc |= engine->ctx_desc_template; /* bits 0-11 */ desc |= engine->ctx_desc_template; /* bits 0-11 */
desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE; desc |= ce->state->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
/* bits 12-31 */ /* bits 12-31 */
desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */ desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
...@@ -763,7 +763,6 @@ void intel_execlists_cancel_requests(struct intel_engine_cs *engine) ...@@ -763,7 +763,6 @@ void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
static int intel_lr_context_pin(struct i915_gem_context *ctx, static int intel_lr_context_pin(struct i915_gem_context *ctx,
struct intel_engine_cs *engine) struct intel_engine_cs *engine)
{ {
struct drm_i915_private *dev_priv = ctx->i915;
struct intel_context *ce = &ctx->engine[engine->id]; struct intel_context *ce = &ctx->engine[engine->id];
void *vaddr; void *vaddr;
u32 *lrc_reg_state; u32 *lrc_reg_state;
...@@ -774,16 +773,15 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx, ...@@ -774,16 +773,15 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
if (ce->pin_count++) if (ce->pin_count++)
return 0; return 0;
ret = i915_gem_object_ggtt_pin(ce->state, NULL, ret = i915_vma_pin(ce->state, 0, GEN8_LR_CONTEXT_ALIGN,
0, GEN8_LR_CONTEXT_ALIGN, PIN_OFFSET_BIAS | GUC_WOPCM_TOP | PIN_GLOBAL);
PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
if (ret) if (ret)
goto err; goto err;
vaddr = i915_gem_object_pin_map(ce->state, I915_MAP_WB); vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
if (IS_ERR(vaddr)) { if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr); ret = PTR_ERR(vaddr);
goto unpin_ctx_obj; goto unpin_vma;
} }
lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
...@@ -792,24 +790,25 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx, ...@@ -792,24 +790,25 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
if (ret) if (ret)
goto unpin_map; goto unpin_map;
ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state);
intel_lr_context_descriptor_update(ctx, engine); intel_lr_context_descriptor_update(ctx, engine);
lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ring->vma->node.start; lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ring->vma->node.start;
ce->lrc_reg_state = lrc_reg_state; ce->lrc_reg_state = lrc_reg_state;
ce->state->dirty = true; ce->state->obj->dirty = true;
/* Invalidate GuC TLB. */ /* Invalidate GuC TLB. */
if (i915.enable_guc_submission) if (i915.enable_guc_submission) {
struct drm_i915_private *dev_priv = ctx->i915;
I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
}
i915_gem_context_get(ctx); i915_gem_context_get(ctx);
return 0; return 0;
unpin_map: unpin_map:
i915_gem_object_unpin_map(ce->state); i915_gem_object_unpin_map(ce->state->obj);
unpin_ctx_obj: unpin_vma:
i915_gem_object_ggtt_unpin(ce->state); __i915_vma_unpin(ce->state);
err: err:
ce->pin_count = 0; ce->pin_count = 0;
return ret; return ret;
...@@ -828,12 +827,8 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx, ...@@ -828,12 +827,8 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
intel_ring_unpin(ce->ring); intel_ring_unpin(ce->ring);
i915_gem_object_unpin_map(ce->state); i915_gem_object_unpin_map(ce->state->obj);
i915_gem_object_ggtt_unpin(ce->state); i915_vma_unpin(ce->state);
ce->lrc_vma = NULL;
ce->lrc_desc = 0;
ce->lrc_reg_state = NULL;
i915_gem_context_put(ctx); i915_gem_context_put(ctx);
} }
...@@ -1747,19 +1742,18 @@ logical_ring_default_irqs(struct intel_engine_cs *engine) ...@@ -1747,19 +1742,18 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
} }
static int static int
lrc_setup_hws(struct intel_engine_cs *engine, lrc_setup_hws(struct intel_engine_cs *engine, struct i915_vma *vma)
struct drm_i915_gem_object *dctx_obj)
{ {
void *hws; void *hws;
/* The HWSP is part of the default context object in LRC mode. */ /* The HWSP is part of the default context object in LRC mode. */
engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) + engine->status_page.gfx_addr =
LRC_PPHWSP_PN * PAGE_SIZE; vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
hws = i915_gem_object_pin_map(dctx_obj, I915_MAP_WB); hws = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
if (IS_ERR(hws)) if (IS_ERR(hws))
return PTR_ERR(hws); return PTR_ERR(hws);
engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE; engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE;
engine->status_page.obj = dctx_obj; engine->status_page.obj = vma->obj;
return 0; return 0;
} }
...@@ -2131,6 +2125,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, ...@@ -2131,6 +2125,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
{ {
struct drm_i915_gem_object *ctx_obj; struct drm_i915_gem_object *ctx_obj;
struct intel_context *ce = &ctx->engine[engine->id]; struct intel_context *ce = &ctx->engine[engine->id];
struct i915_vma *vma;
uint32_t context_size; uint32_t context_size;
struct intel_ring *ring; struct intel_ring *ring;
int ret; int ret;
...@@ -2148,6 +2143,12 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, ...@@ -2148,6 +2143,12 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
return PTR_ERR(ctx_obj); return PTR_ERR(ctx_obj);
} }
vma = i915_vma_create(ctx_obj, &ctx->i915->ggtt.base, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto error_deref_obj;
}
ring = intel_engine_create_ring(engine, ctx->ring_size); ring = intel_engine_create_ring(engine, ctx->ring_size);
if (IS_ERR(ring)) { if (IS_ERR(ring)) {
ret = PTR_ERR(ring); ret = PTR_ERR(ring);
...@@ -2161,7 +2162,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, ...@@ -2161,7 +2162,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
} }
ce->ring = ring; ce->ring = ring;
ce->state = ctx_obj; ce->state = vma;
ce->initialised = engine->init_context == NULL; ce->initialised = engine->init_context == NULL;
return 0; return 0;
...@@ -2170,8 +2171,6 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, ...@@ -2170,8 +2171,6 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
intel_ring_free(ring); intel_ring_free(ring);
error_deref_obj: error_deref_obj:
i915_gem_object_put(ctx_obj); i915_gem_object_put(ctx_obj);
ce->ring = NULL;
ce->state = NULL;
return ret; return ret;
} }
...@@ -2182,24 +2181,23 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv, ...@@ -2182,24 +2181,23 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv,
for_each_engine(engine, dev_priv) { for_each_engine(engine, dev_priv) {
struct intel_context *ce = &ctx->engine[engine->id]; struct intel_context *ce = &ctx->engine[engine->id];
struct drm_i915_gem_object *ctx_obj = ce->state;
void *vaddr; void *vaddr;
uint32_t *reg_state; uint32_t *reg_state;
if (!ctx_obj) if (!ce->state)
continue; continue;
vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB); vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
if (WARN_ON(IS_ERR(vaddr))) if (WARN_ON(IS_ERR(vaddr)))
continue; continue;
reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
ctx_obj->dirty = true;
reg_state[CTX_RING_HEAD+1] = 0; reg_state[CTX_RING_HEAD+1] = 0;
reg_state[CTX_RING_TAIL+1] = 0; reg_state[CTX_RING_TAIL+1] = 0;
i915_gem_object_unpin_map(ctx_obj); ce->state->obj->dirty = true;
i915_gem_object_unpin_map(ce->state->obj);
ce->ring->head = 0; ce->ring->head = 0;
ce->ring->tail = 0; ce->ring->tail = 0;
......
...@@ -2092,8 +2092,8 @@ static int intel_ring_context_pin(struct i915_gem_context *ctx, ...@@ -2092,8 +2092,8 @@ static int intel_ring_context_pin(struct i915_gem_context *ctx,
return 0; return 0;
if (ce->state) { if (ce->state) {
ret = i915_gem_object_ggtt_pin(ce->state, NULL, 0, ret = i915_vma_pin(ce->state, 0, ctx->ggtt_alignment,
ctx->ggtt_alignment, PIN_HIGH); PIN_GLOBAL | PIN_HIGH);
if (ret) if (ret)
goto error; goto error;
} }
...@@ -2127,7 +2127,7 @@ static void intel_ring_context_unpin(struct i915_gem_context *ctx, ...@@ -2127,7 +2127,7 @@ static void intel_ring_context_unpin(struct i915_gem_context *ctx,
return; return;
if (ce->state) if (ce->state)
i915_gem_object_ggtt_unpin(ce->state); i915_vma_unpin(ce->state);
i915_gem_context_put(ctx); i915_gem_context_put(ctx);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment