Commit 57e88531 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Use VMA for ringbuffer tracking

Use the GGTT VMA as the primary cookie for handing ring objects as
the most common action upon the ring is mapping and unmapping which act
upon the VMA itself. By restructuring the code to work with the ring
VMA, we can shrink the code and remove a few cycles from context pinning.

v2: Move the flush of the object back to before the first pin. We use
the am-I-bound? query to only have to check the flush on the first
bind and so avoid stalling on active rings.
Lots of little renames and small hoops.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1471254551-25805-18-git-send-email-chris@chris-wilson.co.uk
parent e5cdb22b
......@@ -356,7 +356,7 @@ static int per_file_ctx_stats(int id, void *ptr, void *data)
if (ctx->engine[n].state)
per_file_stats(0, ctx->engine[n].state->obj, data);
if (ctx->engine[n].ring)
per_file_stats(0, ctx->engine[n].ring->obj, data);
per_file_stats(0, ctx->engine[n].ring->vma->obj, data);
}
return 0;
......
......@@ -1128,12 +1128,12 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
ee->cpu_ring_tail = ring->tail;
ee->ringbuffer =
i915_error_ggtt_object_create(dev_priv,
ring->obj);
ring->vma->obj);
}
ee->hws_page =
i915_error_ggtt_object_create(dev_priv,
engine->status_page.obj);
engine->status_page.vma->obj);
ee->wa_ctx = i915_error_ggtt_object_create(dev_priv,
engine->wa_ctx.obj);
......
......@@ -343,7 +343,6 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
struct intel_context *ce = &ctx->engine[engine->id];
uint32_t guc_engine_id = engine->guc_id;
struct guc_execlist_context *lrc = &desc.lrc[guc_engine_id];
struct drm_i915_gem_object *obj;
/* TODO: We have a design issue to be solved here. Only when we
* receive the first batch, we know which engine is used by the
......@@ -358,17 +357,14 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
lrc->context_desc = lower_32_bits(ce->lrc_desc);
/* The state page is after PPHWSP */
gfx_addr = ce->state->node.start;
lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE;
lrc->ring_lcra =
ce->state->node.start + LRC_STATE_PN * PAGE_SIZE;
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
(guc_engine_id << GUC_ELC_ENGINE_OFFSET);
obj = ce->ring->obj;
gfx_addr = i915_gem_obj_ggtt_offset(obj);
lrc->ring_begin = gfx_addr;
lrc->ring_end = gfx_addr + obj->base.size - 1;
lrc->ring_next_free_location = gfx_addr;
lrc->ring_begin = ce->ring->vma->node.start;
lrc->ring_end = lrc->ring_begin + ce->ring->size - 1;
lrc->ring_next_free_location = lrc->ring_begin;
lrc->ring_current_tail_pointer_value = 0;
desc.engines_used |= (1 << guc_engine_id);
......@@ -943,7 +939,7 @@ static void guc_create_ads(struct intel_guc *guc)
* to find it.
*/
engine = &dev_priv->engine[RCS];
ads->golden_context_lrca = engine->status_page.gfx_addr;
ads->golden_context_lrca = engine->status_page.ggtt_offset;
for_each_engine(engine, dev_priv)
ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine);
......
......@@ -1273,7 +1273,7 @@ static void lrc_init_hws(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE(RING_HWS_PGA(engine->mmio_base),
(u32)engine->status_page.gfx_addr);
engine->status_page.ggtt_offset);
POSTING_READ(RING_HWS_PGA(engine->mmio_base));
}
......@@ -1695,9 +1695,9 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
intel_engine_cleanup_common(engine);
if (engine->status_page.obj) {
i915_gem_object_unpin_map(engine->status_page.obj);
engine->status_page.obj = NULL;
if (engine->status_page.vma) {
i915_gem_object_unpin_map(engine->status_page.vma->obj);
engine->status_page.vma = NULL;
}
intel_lr_context_unpin(dev_priv->kernel_context, engine);
......@@ -1744,16 +1744,17 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
static int
lrc_setup_hws(struct intel_engine_cs *engine, struct i915_vma *vma)
{
const int hws_offset = LRC_PPHWSP_PN * PAGE_SIZE;
void *hws;
/* The HWSP is part of the default context object in LRC mode. */
engine->status_page.gfx_addr =
vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
hws = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
if (IS_ERR(hws))
return PTR_ERR(hws);
engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE;
engine->status_page.obj = vma->obj;
engine->status_page.page_addr = hws + hws_offset;
engine->status_page.ggtt_offset = vma->node.start + hws_offset;
engine->status_page.vma = vma;
return 0;
}
......
This diff is collapsed.
......@@ -26,10 +26,10 @@
*/
#define I915_RING_FREE_SPACE 64
struct intel_hw_status_page {
u32 *page_addr;
unsigned int gfx_addr;
struct drm_i915_gem_object *obj;
struct intel_hw_status_page {
struct i915_vma *vma;
u32 *page_addr;
u32 ggtt_offset;
};
#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
......@@ -83,9 +83,8 @@ struct intel_engine_hangcheck {
};
struct intel_ring {
struct drm_i915_gem_object *obj;
void *vaddr;
struct i915_vma *vma;
void *vaddr;
struct intel_engine_cs *engine;
struct list_head link;
......@@ -97,6 +96,7 @@ struct intel_ring {
int space;
int size;
int effective_size;
bool needs_iomap;
/** We track the position of the requests in the ring buffer, and
* when each is retired we increment last_retired_head as the GPU
......@@ -516,7 +516,7 @@ int init_workarounds_ring(struct intel_engine_cs *engine);
static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
{
return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
}
/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment