Commit 7d774cac authored by Tvrtko Ursulin's avatar Tvrtko Ursulin

drm/i915: Use new i915_gem_object_pin_map for LRC

We can use the new pin/lazy unpin API for simplicity
and more performance in the execlist submission paths.

v2:
  * Fix error handling and convert more users.
  * Compact some names for readability.

v3:
  * intel_lr_context_free was not unpinning.
  * Special case for GPU reset which otherwise unbalances
    the HWS object pages pin count by running the engine
    initialization only (not destructors).

v4:
  * Rebased on top of hws setup/init split.
Signed-off-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/1460472042-1998-1-git-send-email-tvrtko.ursulin@linux.intel.com
[tursulin: renames: s/hwd/hws/, s/obj_addr/vaddr/]
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
parent 04794adb
...@@ -342,7 +342,7 @@ void i915_gem_context_reset(struct drm_device *dev) ...@@ -342,7 +342,7 @@ void i915_gem_context_reset(struct drm_device *dev)
struct intel_context *ctx; struct intel_context *ctx;
list_for_each_entry(ctx, &dev_priv->context_list, link) list_for_each_entry(ctx, &dev_priv->context_list, link)
intel_lr_context_reset(dev, ctx); intel_lr_context_reset(dev_priv, ctx);
} }
for (i = 0; i < I915_NUM_ENGINES; i++) { for (i = 0; i < I915_NUM_ENGINES; i++) {
......
...@@ -1091,8 +1091,8 @@ static int intel_lr_context_do_pin(struct intel_context *ctx, ...@@ -1091,8 +1091,8 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf; struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf;
struct page *lrc_state_page; void *vaddr;
uint32_t *lrc_reg_state; u32 *lrc_reg_state;
int ret; int ret;
WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex)); WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
...@@ -1102,19 +1102,20 @@ static int intel_lr_context_do_pin(struct intel_context *ctx, ...@@ -1102,19 +1102,20 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
if (ret) if (ret)
return ret; return ret;
lrc_state_page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); vaddr = i915_gem_object_pin_map(ctx_obj);
if (WARN_ON(!lrc_state_page)) { if (IS_ERR(vaddr)) {
ret = -ENODEV; ret = PTR_ERR(vaddr);
goto unpin_ctx_obj; goto unpin_ctx_obj;
} }
lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf); ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf);
if (ret) if (ret)
goto unpin_ctx_obj; goto unpin_map;
ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj); ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
intel_lr_context_descriptor_update(ctx, engine); intel_lr_context_descriptor_update(ctx, engine);
lrc_reg_state = kmap(lrc_state_page);
lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start; lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
ctx->engine[engine->id].lrc_reg_state = lrc_reg_state; ctx->engine[engine->id].lrc_reg_state = lrc_reg_state;
ctx_obj->dirty = true; ctx_obj->dirty = true;
...@@ -1125,6 +1126,8 @@ static int intel_lr_context_do_pin(struct intel_context *ctx, ...@@ -1125,6 +1126,8 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
return ret; return ret;
unpin_map:
i915_gem_object_unpin_map(ctx_obj);
unpin_ctx_obj: unpin_ctx_obj:
i915_gem_object_ggtt_unpin(ctx_obj); i915_gem_object_ggtt_unpin(ctx_obj);
...@@ -1157,7 +1160,7 @@ void intel_lr_context_unpin(struct intel_context *ctx, ...@@ -1157,7 +1160,7 @@ void intel_lr_context_unpin(struct intel_context *ctx,
WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex)); WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex));
if (--ctx->engine[engine->id].pin_count == 0) { if (--ctx->engine[engine->id].pin_count == 0) {
kunmap(kmap_to_page(ctx->engine[engine->id].lrc_reg_state)); i915_gem_object_unpin_map(ctx_obj);
intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf); intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj); i915_gem_object_ggtt_unpin(ctx_obj);
ctx->engine[engine->id].lrc_vma = NULL; ctx->engine[engine->id].lrc_vma = NULL;
...@@ -2054,7 +2057,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) ...@@ -2054,7 +2057,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
i915_gem_batch_pool_fini(&engine->batch_pool); i915_gem_batch_pool_fini(&engine->batch_pool);
if (engine->status_page.obj) { if (engine->status_page.obj) {
kunmap(sg_page(engine->status_page.obj->pages->sgl)); i915_gem_object_unpin_map(engine->status_page.obj);
engine->status_page.obj = NULL; engine->status_page.obj = NULL;
} }
...@@ -2092,18 +2095,22 @@ logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift) ...@@ -2092,18 +2095,22 @@ logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
} }
static void static int
lrc_setup_hws(struct intel_engine_cs *engine, lrc_setup_hws(struct intel_engine_cs *engine,
struct drm_i915_gem_object *dctx_obj) struct drm_i915_gem_object *dctx_obj)
{ {
struct page *page; void *hws;
/* The HWSP is part of the default context object in LRC mode. */ /* The HWSP is part of the default context object in LRC mode. */
engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) + engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) +
LRC_PPHWSP_PN * PAGE_SIZE; LRC_PPHWSP_PN * PAGE_SIZE;
page = i915_gem_object_get_page(dctx_obj, LRC_PPHWSP_PN); hws = i915_gem_object_pin_map(dctx_obj);
engine->status_page.page_addr = kmap(page); if (IS_ERR(hws))
return PTR_ERR(hws);
engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE;
engine->status_page.obj = dctx_obj; engine->status_page.obj = dctx_obj;
return 0;
} }
static int static int
...@@ -2165,7 +2172,11 @@ logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine) ...@@ -2165,7 +2172,11 @@ logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
} }
/* And setup the hardware status page. */ /* And setup the hardware status page. */
lrc_setup_hws(engine, dctx->engine[engine->id].state); ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
if (ret) {
DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
goto error;
}
return 0; return 0;
...@@ -2417,15 +2428,16 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine) ...@@ -2417,15 +2428,16 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
} }
static int static int
populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj, populate_lr_context(struct intel_context *ctx,
struct drm_i915_gem_object *ctx_obj,
struct intel_engine_cs *engine, struct intel_engine_cs *engine,
struct intel_ringbuffer *ringbuf) struct intel_ringbuffer *ringbuf)
{ {
struct drm_device *dev = engine->dev; struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
struct page *page; void *vaddr;
uint32_t *reg_state; u32 *reg_state;
int ret; int ret;
if (!ppgtt) if (!ppgtt)
...@@ -2437,18 +2449,17 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o ...@@ -2437,18 +2449,17 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
return ret; return ret;
} }
ret = i915_gem_object_get_pages(ctx_obj); vaddr = i915_gem_object_pin_map(ctx_obj);
if (ret) { if (IS_ERR(vaddr)) {
DRM_DEBUG_DRIVER("Could not get object pages\n"); ret = PTR_ERR(vaddr);
DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
return ret; return ret;
} }
ctx_obj->dirty = true;
i915_gem_object_pin_pages(ctx_obj);
/* The second page of the context object contains some fields which must /* The second page of the context object contains some fields which must
* be set up prior to the first execution. */ * be set up prior to the first execution. */
page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
reg_state = kmap_atomic(page);
/* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
* commands followed by (reg, value) pairs. The values we are setting here are * commands followed by (reg, value) pairs. The values we are setting here are
...@@ -2553,8 +2564,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o ...@@ -2553,8 +2564,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
make_rpcs(dev)); make_rpcs(dev));
} }
kunmap_atomic(reg_state); i915_gem_object_unpin_map(ctx_obj);
i915_gem_object_unpin_pages(ctx_obj);
return 0; return 0;
} }
...@@ -2581,6 +2591,7 @@ void intel_lr_context_free(struct intel_context *ctx) ...@@ -2581,6 +2591,7 @@ void intel_lr_context_free(struct intel_context *ctx)
if (ctx == ctx->i915->kernel_context) { if (ctx == ctx->i915->kernel_context) {
intel_unpin_ringbuffer_obj(ringbuf); intel_unpin_ringbuffer_obj(ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj); i915_gem_object_ggtt_unpin(ctx_obj);
i915_gem_object_unpin_map(ctx_obj);
} }
WARN_ON(ctx->engine[i].pin_count); WARN_ON(ctx->engine[i].pin_count);
...@@ -2709,10 +2720,9 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx, ...@@ -2709,10 +2720,9 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
return ret; return ret;
} }
void intel_lr_context_reset(struct drm_device *dev, void intel_lr_context_reset(struct drm_i915_private *dev_priv,
struct intel_context *ctx) struct intel_context *ctx)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
for_each_engine(engine, dev_priv) { for_each_engine(engine, dev_priv) {
...@@ -2720,23 +2730,23 @@ void intel_lr_context_reset(struct drm_device *dev, ...@@ -2720,23 +2730,23 @@ void intel_lr_context_reset(struct drm_device *dev,
ctx->engine[engine->id].state; ctx->engine[engine->id].state;
struct intel_ringbuffer *ringbuf = struct intel_ringbuffer *ringbuf =
ctx->engine[engine->id].ringbuf; ctx->engine[engine->id].ringbuf;
void *vaddr;
uint32_t *reg_state; uint32_t *reg_state;
struct page *page;
if (!ctx_obj) if (!ctx_obj)
continue; continue;
if (i915_gem_object_get_pages(ctx_obj)) { vaddr = i915_gem_object_pin_map(ctx_obj);
WARN(1, "Failed get_pages for context obj\n"); if (WARN_ON(IS_ERR(vaddr)))
continue; continue;
}
page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
reg_state = kmap_atomic(page); ctx_obj->dirty = true;
reg_state[CTX_RING_HEAD+1] = 0; reg_state[CTX_RING_HEAD+1] = 0;
reg_state[CTX_RING_TAIL+1] = 0; reg_state[CTX_RING_TAIL+1] = 0;
kunmap_atomic(reg_state); i915_gem_object_unpin_map(ctx_obj);
ringbuf->head = 0; ringbuf->head = 0;
ringbuf->tail = 0; ringbuf->tail = 0;
......
...@@ -104,7 +104,10 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx, ...@@ -104,7 +104,10 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
struct intel_engine_cs *engine); struct intel_engine_cs *engine);
void intel_lr_context_unpin(struct intel_context *ctx, void intel_lr_context_unpin(struct intel_context *ctx,
struct intel_engine_cs *engine); struct intel_engine_cs *engine);
void intel_lr_context_reset(struct drm_device *dev,
struct drm_i915_private;
void intel_lr_context_reset(struct drm_i915_private *dev_priv,
struct intel_context *ctx); struct intel_context *ctx);
uint64_t intel_lr_context_descriptor(struct intel_context *ctx, uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
struct intel_engine_cs *engine); struct intel_engine_cs *engine);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment