Commit 73dec95e authored by Tvrtko Ursulin's avatar Tvrtko Ursulin

drm/i915: Emit to ringbuffer directly

This removes the usage of intel_ring_emit in favour of
directly writing to the ring buffer.

intel_ring_emit was preventing the compiler for optimising
fetch and increment of the current ring buffer pointer and
therefore generating very verbose code for every write.

It had no useful purpose since all ringbuffer operations
are started and ended with intel_ring_begin and
intel_ring_advance respectively, with no bail out in the
middle possible, so it is fine to increment the tail in
intel_ring_begin and let the code manage the pointer
itself.

Useless instruction removal amounts to approximately
two and half kilobytes of saved text on my build.

Not sure if this has any measurable performance
implications but executing a ton of useless instructions
on fast paths cannot be good.

v2:
 * Change return from intel_ring_begin to error pointer by
   popular demand.
 * Move tail increment to intel_ring_advance to enable some
   error checking.

v3:
 * Move tail advance back into intel_ring_begin.
 * Rebase and tidy.

v4:
 * Complete rebase after a few months since v3.

v5:
 * Remove unecessary cast and fix !debug compile. (Chris Wilson)

v6:
 * Make intel_ring_offset take request as well.
 * Fix recording of request postfix plus a sprinkle of asserts.
   (Chris Wilson)

v7:
 * Use intel_ring_offset to get the postfix. (Chris Wilson)
 * Convert GVT code as well.

v8:
 * Rename *out++ to *cs++.

v9:
 * Fix GVT out to cs conversion in GVT.

v10:
 * Rebase for new intel_ring_begin in selftests.
Signed-off-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Zhi Wang <zhi.a.wang@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Acked-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170214113242.29241-1-tvrtko.ursulin@linux.intel.com
parent d2d15016
......@@ -1513,7 +1513,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
len += copy_len;
gma += copy_len;
}
return 0;
return len;
}
......@@ -1630,7 +1630,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
gma, gma + bb_size,
dst);
if (ret) {
if (ret < 0) {
gvt_err("fail to copy guest ring buffer\n");
goto unmap_src;
}
......@@ -2594,11 +2594,8 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
struct intel_ring *ring = shadow_ctx->engine[ring_id].ring;
unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
unsigned int copy_len = 0;
u32 *cs;
int ret;
guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
......@@ -2612,36 +2609,33 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
gma_top = workload->rb_start + guest_rb_size;
/* allocate shadow ring buffer */
ret = intel_ring_begin(workload->req, workload->rb_len / 4);
if (ret)
return ret;
cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
if (IS_ERR(cs))
return PTR_ERR(cs);
/* get shadow ring buffer va */
workload->shadow_ring_buffer_va = ring->vaddr + ring->tail;
workload->shadow_ring_buffer_va = cs;
/* head > tail --> copy head <-> top */
if (gma_head > gma_tail) {
ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
gma_head, gma_top,
workload->shadow_ring_buffer_va);
if (ret) {
gma_head, gma_top, cs);
if (ret < 0) {
gvt_err("fail to copy guest ring buffer\n");
return ret;
}
copy_len = gma_top - gma_head;
cs += ret / sizeof(u32);
gma_head = workload->rb_start;
}
/* copy head or start <-> tail */
ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
gma_head, gma_tail,
workload->shadow_ring_buffer_va + copy_len);
if (ret) {
ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, cs);
if (ret < 0) {
gvt_err("fail to copy guest ring buffer\n");
return ret;
}
ring->tail += workload->rb_len;
intel_ring_advance(ring);
cs += ret / sizeof(u32);
intel_ring_advance(workload->req, cs);
return 0;
}
......@@ -2695,7 +2689,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
wa_ctx->workload->vgpu->gtt.ggtt_mm,
guest_gma, guest_gma + ctx_size,
map);
if (ret) {
if (ret < 0) {
gvt_err("fail to copy guest indirect ctx\n");
goto unmap_src;
}
......
......@@ -596,10 +596,9 @@ static inline int
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
{
struct drm_i915_private *dev_priv = req->i915;
struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
enum intel_engine_id id;
u32 flags = hw_flags | MI_MM_SPACE_GTT;
u32 *cs, flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */
i915.semaphores ?
......@@ -629,99 +628,92 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
if (INTEL_GEN(dev_priv) >= 7)
len += 2 + (num_rings ? 4*num_rings + 6 : 0);
ret = intel_ring_begin(req, len);
if (ret)
return ret;
cs = intel_ring_begin(req, len);
if (IS_ERR(cs))
return PTR_ERR(cs);
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
if (INTEL_GEN(dev_priv) >= 7) {
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
if (num_rings) {
struct intel_engine_cs *signaller;
intel_ring_emit(ring,
MI_LOAD_REGISTER_IMM(num_rings));
*cs++ = MI_LOAD_REGISTER_IMM(num_rings);
for_each_engine(signaller, dev_priv, id) {
if (signaller == engine)
continue;
intel_ring_emit_reg(ring,
RING_PSMI_CTL(signaller->mmio_base));
intel_ring_emit(ring,
_MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
*cs++ = i915_mmio_reg_offset(
RING_PSMI_CTL(signaller->mmio_base));
*cs++ = _MASKED_BIT_ENABLE(
GEN6_PSMI_SLEEP_MSG_DISABLE);
}
}
}
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT);
intel_ring_emit(ring,
i915_ggtt_offset(req->ctx->engine[RCS].state) | flags);
*cs++ = MI_NOOP;
*cs++ = MI_SET_CONTEXT;
*cs++ = i915_ggtt_offset(req->ctx->engine[RCS].state) | flags;
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv
*/
intel_ring_emit(ring, MI_NOOP);
*cs++ = MI_NOOP;
if (INTEL_GEN(dev_priv) >= 7) {
if (num_rings) {
struct intel_engine_cs *signaller;
i915_reg_t last_reg = {}; /* keep gcc quiet */
intel_ring_emit(ring,
MI_LOAD_REGISTER_IMM(num_rings));
*cs++ = MI_LOAD_REGISTER_IMM(num_rings);
for_each_engine(signaller, dev_priv, id) {
if (signaller == engine)
continue;
last_reg = RING_PSMI_CTL(signaller->mmio_base);
intel_ring_emit_reg(ring, last_reg);
intel_ring_emit(ring,
_MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
*cs++ = i915_mmio_reg_offset(last_reg);
*cs++ = _MASKED_BIT_DISABLE(
GEN6_PSMI_SLEEP_MSG_DISABLE);
}
/* Insert a delay before the next switch! */
intel_ring_emit(ring,
MI_STORE_REGISTER_MEM |
MI_SRM_LRM_GLOBAL_GTT);
intel_ring_emit_reg(ring, last_reg);
intel_ring_emit(ring,
i915_ggtt_offset(engine->scratch));
intel_ring_emit(ring, MI_NOOP);
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(last_reg);
*cs++ = i915_ggtt_offset(engine->scratch);
*cs++ = MI_NOOP;
}
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
}
intel_ring_advance(ring);
intel_ring_advance(req, cs);
return ret;
}
static int remap_l3(struct drm_i915_gem_request *req, int slice)
{
u32 *remap_info = req->i915->l3_parity.remap_info[slice];
struct intel_ring *ring = req->ring;
int i, ret;
u32 *cs, *remap_info = req->i915->l3_parity.remap_info[slice];
int i;
if (!remap_info)
return 0;
ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
if (ret)
return ret;
cs = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
/*
* Note: We do not worry about the concurrent register cacheline hang
* here because no other code should access these registers other than
* at initialization time.
*/
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
*cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
intel_ring_emit(ring, remap_info[i]);
*cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
*cs++ = remap_info[i];
}
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
*cs++ = MI_NOOP;
intel_ring_advance(req, cs);
return 0;
}
......
......@@ -1336,25 +1336,25 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
static int
i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
{
struct intel_ring *ring = req->ring;
int ret, i;
u32 *cs;
int i;
if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
DRM_DEBUG("sol reset is gen7/rcs only\n");
return -EINVAL;
}
ret = intel_ring_begin(req, 4 * 3);
if (ret)
return ret;
cs = intel_ring_begin(req, 4 * 3);
if (IS_ERR(cs))
return PTR_ERR(cs);
for (i = 0; i < 4; i++) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
intel_ring_emit(ring, 0);
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
*cs++ = 0;
}
intel_ring_advance(ring);
intel_ring_advance(req, cs);
return 0;
}
......@@ -1415,7 +1415,7 @@ execbuf_submit(struct i915_execbuffer_params *params,
struct drm_i915_private *dev_priv = params->request->i915;
u64 exec_start, exec_len;
int instp_mode;
u32 instp_mask;
u32 instp_mask, *cs;
int ret;
ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
......@@ -1461,17 +1461,15 @@ execbuf_submit(struct i915_execbuffer_params *params,
if (params->engine->id == RCS &&
instp_mode != dev_priv->relative_constants_mode) {
struct intel_ring *ring = params->request->ring;
ret = intel_ring_begin(params->request, 4);
if (ret)
return ret;
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(ring, INSTPM);
intel_ring_emit(ring, instp_mask << 16 | instp_mode);
intel_ring_advance(ring);
cs = intel_ring_begin(params->request, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_NOOP;
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(INSTPM);
*cs++ = instp_mask << 16 | instp_mode;
intel_ring_advance(params->request, cs);
dev_priv->relative_constants_mode = instp_mode;
}
......
......@@ -686,23 +686,22 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
unsigned entry,
dma_addr_t addr)
{
struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
int ret;
u32 *cs;
BUG_ON(entry >= 4);
ret = intel_ring_begin(req, 6);
if (ret)
return ret;
cs = intel_ring_begin(req, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, entry));
intel_ring_emit(ring, upper_32_bits(addr));
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, entry));
intel_ring_emit(ring, lower_32_bits(addr));
intel_ring_advance(ring);
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
*cs++ = upper_32_bits(addr);
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
*cs++ = lower_32_bits(addr);
intel_ring_advance(req, cs);
return 0;
}
......@@ -1730,8 +1729,8 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
u32 *cs;
int ret;
/* NB: TLBs must be flushed and invalidated before a switch */
......@@ -1739,17 +1738,17 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
if (ret)
return ret;
ret = intel_ring_begin(req, 6);
if (ret)
return ret;
cs = intel_ring_begin(req, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
intel_ring_emit(ring, PP_DIR_DCLV_2G);
intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
intel_ring_emit(ring, get_pd_offset(ppgtt));
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
*cs++ = MI_LOAD_REGISTER_IMM(2);
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
*cs++ = PP_DIR_DCLV_2G;
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
*cs++ = get_pd_offset(ppgtt);
*cs++ = MI_NOOP;
intel_ring_advance(req, cs);
return 0;
}
......@@ -1757,8 +1756,8 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
u32 *cs;
int ret;
/* NB: TLBs must be flushed and invalidated before a switch */
......@@ -1766,17 +1765,17 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
if (ret)
return ret;
ret = intel_ring_begin(req, 6);
if (ret)
return ret;
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
intel_ring_emit(ring, PP_DIR_DCLV_2G);
intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
intel_ring_emit(ring, get_pd_offset(ppgtt));
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
cs = intel_ring_begin(req, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_LOAD_REGISTER_IMM(2);
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
*cs++ = PP_DIR_DCLV_2G;
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
*cs++ = get_pd_offset(ppgtt);
*cs++ = MI_NOOP;
intel_ring_advance(req, cs);
/* XXX: RCS is the only one to auto invalidate the TLBs? */
if (engine->id != RCS) {
......
......@@ -824,6 +824,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
struct intel_ring *ring = request->ring;
struct intel_timeline *timeline = request->timeline;
struct drm_i915_gem_request *prev;
u32 *cs;
int err;
lockdep_assert_held(&request->i915->drm.struct_mutex);
......@@ -862,10 +863,9 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
* GPU processing the request, we never over-estimate the
* position of the ring's HEAD.
*/
err = intel_ring_begin(request, engine->emit_breadcrumb_sz);
GEM_BUG_ON(err);
request->postfix = ring->tail;
ring->tail += engine->emit_breadcrumb_sz * sizeof(u32);
cs = intel_ring_begin(request, engine->emit_breadcrumb_sz);
GEM_BUG_ON(IS_ERR(cs));
request->postfix = intel_ring_offset(request, cs);
/* Seal the request and mark it as pending execution. Note that
* we may inspect this state, without holding any locks, during
......
......@@ -10158,14 +10158,12 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
struct intel_ring *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
int ret;
u32 flip_mask, *cs;
ret = intel_ring_begin(req, 6);
if (ret)
return ret;
cs = intel_ring_begin(req, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
/* Can't queue multiple flips, so wait for the previous
* one to finish before executing the next.
......@@ -10174,13 +10172,12 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
intel_ring_emit(ring, 0); /* aux display base address, unused */
*cs++ = MI_WAIT_FOR_EVENT | flip_mask;
*cs++ = MI_NOOP;
*cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
*cs++ = fb->pitches[0];
*cs++ = intel_crtc->flip_work->gtt_offset;
*cs++ = 0; /* aux display base address, unused */
return 0;
}
......@@ -10192,26 +10189,23 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
struct intel_ring *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
int ret;
u32 flip_mask, *cs;
ret = intel_ring_begin(req, 6);
if (ret)
return ret;
cs = intel_ring_begin(req, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
if (intel_crtc->plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
intel_ring_emit(ring, MI_NOOP);
*cs++ = MI_WAIT_FOR_EVENT | flip_mask;
*cs++ = MI_NOOP;
*cs++ = MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
*cs++ = fb->pitches[0];
*cs++ = intel_crtc->flip_work->gtt_offset;
*cs++ = MI_NOOP;
return 0;
}
......@@ -10223,25 +10217,22 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
int ret;
u32 pf, pipesrc, *cs;
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
cs = intel_ring_begin(req, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
/* i965+ uses the linear or tiled offsets from the
* Display Registers (which do not change across a page-flip)
* so we need only reprogram the base address.
*/
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset |
intel_fb_modifier_to_tiling(fb->modifier));
*cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
*cs++ = fb->pitches[0];
*cs++ = intel_crtc->flip_work->gtt_offset |
intel_fb_modifier_to_tiling(fb->modifier);
/* XXX Enabling the panel-fitter across page-flip is so far
* untested on non-native modes, so ignore it for now.
......@@ -10249,7 +10240,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
intel_ring_emit(ring, pf | pipesrc);
*cs++ = pf | pipesrc;
return 0;
}
......@@ -10261,21 +10252,17 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
int ret;
u32 pf, pipesrc, *cs;
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
cs = intel_ring_begin(req, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0] |
intel_fb_modifier_to_tiling(fb->modifier));
intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
*cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
*cs++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
*cs++ = intel_crtc->flip_work->gtt_offset;
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
......@@ -10285,7 +10272,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
intel_ring_emit(ring, pf | pipesrc);
*cs++ = pf | pipesrc;
return 0;
}
......@@ -10298,9 +10285,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
uint32_t flags)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_ring *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t plane_bit = 0;
u32 *cs, plane_bit = 0;
int len, ret;
switch (intel_crtc->plane) {
......@@ -10344,9 +10330,9 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
if (ret)
return ret;
ret = intel_ring_begin(req, len);
if (ret)
return ret;
cs = intel_ring_begin(req, len);
if (IS_ERR(cs))
return PTR_ERR(cs);
/* Unmask the flip-done completion message. Note that the bspec says that
* we should do this for both the BCS and RCS, and that we must not unmask
......@@ -10358,31 +10344,28 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
* to zero does lead to lockups within MI_DISPLAY_FLIP.
*/
if (req->engine->id == RCS) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(ring, DERRMR);
intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
DERRMR_PIPEB_PRI_FLIP_DONE |
DERRMR_PIPEC_PRI_FLIP_DONE));
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(DERRMR);
*cs++ = ~(DERRMR_PIPEA_PRI_FLIP_DONE |
DERRMR_PIPEB_PRI_FLIP_DONE |
DERRMR_PIPEC_PRI_FLIP_DONE);
if (IS_GEN8(dev_priv))
intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT);
*cs++ = MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT;
else
intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
MI_SRM_LRM_GLOBAL_GTT);
intel_ring_emit_reg(ring, DERRMR);
intel_ring_emit(ring,
i915_ggtt_offset(req->engine->scratch) + 256);
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(DERRMR);
*cs++ = i915_ggtt_offset(req->engine->scratch) + 256;
if (IS_GEN8(dev_priv)) {
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
*cs++ = 0;
*cs++ = MI_NOOP;
}
}
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, fb->pitches[0] |
intel_fb_modifier_to_tiling(fb->modifier));
intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
intel_ring_emit(ring, (MI_NOOP));
*cs++ = MI_DISPLAY_FLIP_I915 | plane_bit;
*cs++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
*cs++ = intel_crtc->flip_work->gtt_offset;
*cs++ = MI_NOOP;
return 0;
}
......
This diff is collapsed.
......@@ -276,23 +276,22 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
static int emit_mocs_control_table(struct drm_i915_gem_request *req,
const struct drm_i915_mocs_table *table)
{
struct intel_ring *ring = req->ring;
enum intel_engine_id engine = req->engine->id;
unsigned int index;
int ret;
u32 *cs;
if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
return -ENODEV;
ret = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
if (ret)
return ret;
cs = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
if (IS_ERR(cs))
return PTR_ERR(cs);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
*cs++ = MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES);
for (index = 0; index < table->size; index++) {
intel_ring_emit_reg(ring, mocs_register(engine, index));
intel_ring_emit(ring, table->table[index].control_value);
*cs++ = i915_mmio_reg_offset(mocs_register(engine, index));
*cs++ = table->table[index].control_value;
}
/*
......@@ -304,12 +303,12 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
* that value to all the used entries.
*/
for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
intel_ring_emit_reg(ring, mocs_register(engine, index));
intel_ring_emit(ring, table->table[0].control_value);
*cs++ = i915_mmio_reg_offset(mocs_register(engine, index));
*cs++ = table->table[0].control_value;
}
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
*cs++ = MI_NOOP;
intel_ring_advance(req, cs);
return 0;
}
......@@ -336,29 +335,27 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
const struct drm_i915_mocs_table *table)
{
struct intel_ring *ring = req->ring;
unsigned int i;
int ret;
u32 *cs;
if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
return -ENODEV;
ret = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
if (ret)
return ret;
cs = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
if (IS_ERR(cs))
return PTR_ERR(cs);
intel_ring_emit(ring,
MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
*cs++ = MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2);
for (i = 0; i < table->size/2; i++) {
intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
intel_ring_emit(ring, l3cc_combine(table, 2*i, 2*i+1));
*cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
*cs++ = l3cc_combine(table, 2 * i, 2 * i + 1);
}
if (table->size & 0x01) {
/* Odd table size - 1 left over */
intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
intel_ring_emit(ring, l3cc_combine(table, 2*i, 0));
*cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
*cs++ = l3cc_combine(table, 2 * i, 0);
i++;
}
......@@ -368,12 +365,12 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
* they are reserved by the hardware.
*/
for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
intel_ring_emit(ring, l3cc_combine(table, 0, 0));
*cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
*cs++ = l3cc_combine(table, 0, 0);
}
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
*cs++ = MI_NOOP;
intel_ring_advance(req, cs);
return 0;
}
......
......@@ -267,8 +267,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
struct drm_i915_gem_request *req;
struct intel_ring *ring;
int ret;
u32 *cs;
WARN_ON(overlay->active);
WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
......@@ -277,10 +276,10 @@ static int intel_overlay_on(struct intel_overlay *overlay)
if (IS_ERR(req))
return PTR_ERR(req);
ret = intel_ring_begin(req, 4);
if (ret) {
cs = intel_ring_begin(req, 4);
if (IS_ERR(cs)) {
i915_add_request_no_flush(req);
return ret;
return PTR_ERR(cs);
}
overlay->active = true;
......@@ -288,12 +287,11 @@ static int intel_overlay_on(struct intel_overlay *overlay)
if (IS_I830(dev_priv))
i830_overlay_clock_gating(dev_priv, false);
ring = req->ring;
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_ON;
*cs++ = overlay->flip_addr | OFC_UPDATE;
*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
*cs++ = MI_NOOP;
intel_ring_advance(req, cs);
return intel_overlay_do_wait_request(overlay, req, NULL);
}
......@@ -326,10 +324,8 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
{
struct drm_i915_private *dev_priv = overlay->i915;
struct drm_i915_gem_request *req;
struct intel_ring *ring;
u32 flip_addr = overlay->flip_addr;
u32 tmp;
int ret;
u32 tmp, *cs;
WARN_ON(!overlay->active);
......@@ -345,16 +341,15 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
if (IS_ERR(req))
return PTR_ERR(req);
ret = intel_ring_begin(req, 2);
if (ret) {
cs = intel_ring_begin(req, 2);
if (IS_ERR(cs)) {
i915_add_request_no_flush(req);
return ret;
return PTR_ERR(cs);
}
ring = req->ring;
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
intel_ring_emit(ring, flip_addr);
intel_ring_advance(ring);
*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
*cs++ = flip_addr;
intel_ring_advance(req, cs);
intel_overlay_flip_prepare(overlay, vma);
......@@ -408,9 +403,7 @@ static void intel_overlay_off_tail(struct i915_gem_active *active,
static int intel_overlay_off(struct intel_overlay *overlay)
{
struct drm_i915_gem_request *req;
struct intel_ring *ring;
u32 flip_addr = overlay->flip_addr;
int ret;
u32 *cs, flip_addr = overlay->flip_addr;
WARN_ON(!overlay->active);
......@@ -424,25 +417,23 @@ static int intel_overlay_off(struct intel_overlay *overlay)
if (IS_ERR(req))
return PTR_ERR(req);
ret = intel_ring_begin(req, 6);
if (ret) {
cs = intel_ring_begin(req, 6);
if (IS_ERR(cs)) {
i915_add_request_no_flush(req);
return ret;
return PTR_ERR(cs);
}
ring = req->ring;
/* wait for overlay to go idle */
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
intel_ring_emit(ring, flip_addr);
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
*cs++ = flip_addr;
*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
/* turn overlay off */
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
intel_ring_emit(ring, flip_addr);
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_OFF;
*cs++ = flip_addr;
*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
intel_ring_advance(ring);
intel_ring_advance(req, cs);
intel_overlay_flip_prepare(overlay, NULL);
......@@ -465,6 +456,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
u32 *cs;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
......@@ -478,23 +470,20 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
/* synchronous slowpath */
struct drm_i915_gem_request *req;
struct intel_ring *ring;
req = alloc_request(overlay);
if (IS_ERR(req))
return PTR_ERR(req);
ret = intel_ring_begin(req, 2);
if (ret) {
cs = intel_ring_begin(req, 2);
if (IS_ERR(cs)) {
i915_add_request_no_flush(req);
return ret;
return PTR_ERR(cs);
}
ring = req->ring;
intel_ring_emit(ring,
MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
*cs++ = MI_NOOP;
intel_ring_advance(req, cs);
ret = intel_overlay_do_wait_request(overlay, req,
intel_overlay_release_old_vid_tail);
......
This diff is collapsed.
......@@ -145,7 +145,6 @@ struct intel_ring {
u32 head;
u32 tail;
GEM_DEBUG_DECL(u32 advance);
int space;
int size;
......@@ -292,7 +291,7 @@ struct intel_engine_cs {
#define I915_DISPATCH_PINNED BIT(1)
#define I915_DISPATCH_RS BIT(2)
void (*emit_breadcrumb)(struct drm_i915_gem_request *req,
u32 *out);
u32 *cs);
int emit_breadcrumb_sz;
/* Pass the request to the hardware queue (e.g. directly into
......@@ -375,7 +374,7 @@ struct intel_engine_cs {
/* AKA wait() */
int (*sync_to)(struct drm_i915_gem_request *req,
struct drm_i915_gem_request *signal);
u32 *(*signal)(struct drm_i915_gem_request *req, u32 *out);
u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs);
} semaphore;
/* Execlists */
......@@ -497,21 +496,12 @@ void intel_engine_cleanup(struct intel_engine_cs *engine);
void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
static inline void intel_ring_emit(struct intel_ring *ring, u32 data)
{
*(uint32_t *)(ring->vaddr + ring->tail) = data;
ring->tail += 4;
}
u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req, int n);
static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
{
intel_ring_emit(ring, i915_mmio_reg_offset(reg));
}
static inline void intel_ring_advance(struct intel_ring *ring)
static inline void
intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
{
/* Dummy function.
*
......@@ -521,14 +511,16 @@ static inline void intel_ring_advance(struct intel_ring *ring)
* reserved for the command packet (i.e. the value passed to
* intel_ring_begin()).
*/
GEM_DEBUG_BUG_ON(ring->tail != ring->advance);
GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != cs);
}
static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
static inline u32
intel_ring_offset(struct drm_i915_gem_request *req, void *addr)
{
/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
u32 offset = addr - ring->vaddr;
return offset & (ring->size - 1);
u32 offset = addr - req->ring->vaddr;
GEM_BUG_ON(offset > req->ring->size);
return offset & (req->ring->size - 1);
}
int __intel_ring_space(int head, int tail, int size);
......
......@@ -186,6 +186,7 @@ static int gpu_set(struct drm_i915_gem_object *obj,
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct drm_i915_gem_request *rq;
struct i915_vma *vma;
u32 *cs;
int err;
err = i915_gem_object_set_to_gtt_domain(obj, true);
......@@ -202,30 +203,30 @@ static int gpu_set(struct drm_i915_gem_object *obj,
return PTR_ERR(rq);
}
err = intel_ring_begin(rq, 4);
if (err) {
cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs)) {
__i915_add_request(rq, false);
i915_vma_unpin(vma);
return err;
return PTR_ERR(cs);
}
if (INTEL_GEN(i915) >= 8) {
intel_ring_emit(rq->ring, MI_STORE_DWORD_IMM_GEN4 | 1 << 22);
intel_ring_emit(rq->ring, lower_32_bits(i915_ggtt_offset(vma) + offset));
intel_ring_emit(rq->ring, upper_32_bits(i915_ggtt_offset(vma) + offset));
intel_ring_emit(rq->ring, v);
*cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
*cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset);
*cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset);
*cs++ = v;
} else if (INTEL_GEN(i915) >= 4) {
intel_ring_emit(rq->ring, MI_STORE_DWORD_IMM_GEN4 | 1 << 22);
intel_ring_emit(rq->ring, 0);
intel_ring_emit(rq->ring, i915_ggtt_offset(vma) + offset);
intel_ring_emit(rq->ring, v);
*cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
*cs++ = 0;
*cs++ = i915_ggtt_offset(vma) + offset;
*cs++ = v;
} else {
intel_ring_emit(rq->ring, MI_STORE_DWORD_IMM | 1 << 22);
intel_ring_emit(rq->ring, i915_ggtt_offset(vma) + offset);
intel_ring_emit(rq->ring, v);
intel_ring_emit(rq->ring, MI_NOOP);
*cs++ = MI_STORE_DWORD_IMM | 1 << 22;
*cs++ = i915_ggtt_offset(vma) + offset;
*cs++ = v;
*cs++ = MI_NOOP;
}
intel_ring_advance(rq->ring);
intel_ring_advance(rq, cs);
i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
i915_vma_unpin(vma);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment