Commit 7c9cf4e3 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Reduce engine->emit_flush() to a single mode parameter

Rather than passing a complete set of GPU cache domains for either
invalidation or for flushing, or even both, just pass a single parameter
to the engine->emit_flush to determine the required operations.

engine->emit_flush(GPU, 0) -> engine->emit_flush(EMIT_INVALIDATE)
engine->emit_flush(0, GPU) -> engine->emit_flush(EMIT_FLUSH)
engine->emit_flush(GPU, GPU) -> engine->emit_flush(EMIT_FLUSH | EMIT_INVALIDATE)

This allows us to extend the behaviour easily in future, for example if
we want just a command barrier without the overhead of flushing.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Dave Gordon <david.s.gordon@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1470174640-18242-8-git-send-email-chris@chris-wilson.co.uk
parent c7fe7d25
...@@ -568,7 +568,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) ...@@ -568,7 +568,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
* itlb_before_ctx_switch. * itlb_before_ctx_switch.
*/ */
if (IS_GEN6(dev_priv)) { if (IS_GEN6(dev_priv)) {
ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, 0); ret = engine->emit_flush(req, EMIT_INVALIDATE);
if (ret) if (ret)
return ret; return ret;
} }
......
...@@ -999,7 +999,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, ...@@ -999,7 +999,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
wmb(); wmb();
/* Unconditionally invalidate GPU caches and TLBs. */ /* Unconditionally invalidate GPU caches and TLBs. */
return req->engine->emit_flush(req, I915_GEM_GPU_DOMAINS, 0); return req->engine->emit_flush(req, EMIT_INVALIDATE);
} }
static bool static bool
......
...@@ -1666,8 +1666,7 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, ...@@ -1666,8 +1666,7 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
int ret; int ret;
/* NB: TLBs must be flushed and invalidated before a switch */ /* NB: TLBs must be flushed and invalidated before a switch */
ret = engine->emit_flush(req, ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH);
I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret) if (ret)
return ret; return ret;
...@@ -1694,8 +1693,7 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, ...@@ -1694,8 +1693,7 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
int ret; int ret;
/* NB: TLBs must be flushed and invalidated before a switch */ /* NB: TLBs must be flushed and invalidated before a switch */
ret = engine->emit_flush(req, ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH);
I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret) if (ret)
return ret; return ret;
...@@ -1713,9 +1711,7 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, ...@@ -1713,9 +1711,7 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
/* XXX: RCS is the only one to auto invalidate the TLBs? */ /* XXX: RCS is the only one to auto invalidate the TLBs? */
if (engine->id != RCS) { if (engine->id != RCS) {
ret = engine->emit_flush(req, ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH);
I915_GEM_GPU_DOMAINS,
I915_GEM_GPU_DOMAINS);
if (ret) if (ret)
return ret; return ret;
} }
......
...@@ -451,7 +451,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, ...@@ -451,7 +451,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
* what. * what.
*/ */
if (flush_caches) { if (flush_caches) {
ret = engine->emit_flush(request, 0, I915_GEM_GPU_DOMAINS); ret = engine->emit_flush(request, EMIT_FLUSH);
/* Not allowed to fail! */ /* Not allowed to fail! */
WARN(ret, "engine->emit_flush() failed: %d!\n", ret); WARN(ret, "engine->emit_flush() failed: %d!\n", ret);
......
...@@ -672,7 +672,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req, ...@@ -672,7 +672,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
/* Unconditionally invalidate gpu caches and ensure that we do flush /* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch. * any residual writes from the previous batch.
*/ */
return req->engine->emit_flush(req, I915_GEM_GPU_DOMAINS, 0); return req->engine->emit_flush(req, EMIT_INVALIDATE);
} }
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request) int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
...@@ -998,9 +998,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) ...@@ -998,9 +998,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
if (w->count == 0) if (w->count == 0)
return 0; return 0;
ret = req->engine->emit_flush(req, ret = req->engine->emit_flush(req, EMIT_BARRIER);
I915_GEM_GPU_DOMAINS,
I915_GEM_GPU_DOMAINS);
if (ret) if (ret)
return ret; return ret;
...@@ -1017,9 +1015,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) ...@@ -1017,9 +1015,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
intel_ring_advance(ring); intel_ring_advance(ring);
ret = req->engine->emit_flush(req, ret = req->engine->emit_flush(req, EMIT_BARRIER);
I915_GEM_GPU_DOMAINS,
I915_GEM_GPU_DOMAINS);
if (ret) if (ret)
return ret; return ret;
...@@ -1598,9 +1594,7 @@ static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine) ...@@ -1598,9 +1594,7 @@ static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
I915_WRITE_IMR(engine, ~engine->irq_keep_mask); I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
} }
static int gen8_emit_flush(struct drm_i915_gem_request *request, static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
u32 invalidate_domains,
u32 unused)
{ {
struct intel_ring *ring = request->ring; struct intel_ring *ring = request->ring;
u32 cmd; u32 cmd;
...@@ -1619,7 +1613,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, ...@@ -1619,7 +1613,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
*/ */
cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
if (invalidate_domains & I915_GEM_GPU_DOMAINS) { if (mode & EMIT_INVALIDATE) {
cmd |= MI_INVALIDATE_TLB; cmd |= MI_INVALIDATE_TLB;
if (request->engine->id == VCS) if (request->engine->id == VCS)
cmd |= MI_INVALIDATE_BSD; cmd |= MI_INVALIDATE_BSD;
...@@ -1637,8 +1631,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, ...@@ -1637,8 +1631,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
} }
static int gen8_emit_flush_render(struct drm_i915_gem_request *request, static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
u32 invalidate_domains, u32 mode)
u32 flush_domains)
{ {
struct intel_ring *ring = request->ring; struct intel_ring *ring = request->ring;
struct intel_engine_cs *engine = request->engine; struct intel_engine_cs *engine = request->engine;
...@@ -1650,14 +1643,14 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, ...@@ -1650,14 +1643,14 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
flags |= PIPE_CONTROL_CS_STALL; flags |= PIPE_CONTROL_CS_STALL;
if (flush_domains) { if (mode & EMIT_FLUSH) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE; flags |= PIPE_CONTROL_FLUSH_ENABLE;
} }
if (invalidate_domains) { if (mode & EMIT_INVALIDATE) {
flags |= PIPE_CONTROL_TLB_INVALIDATE; flags |= PIPE_CONTROL_TLB_INVALIDATE;
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
......
...@@ -67,19 +67,15 @@ static void __intel_engine_submit(struct intel_engine_cs *engine) ...@@ -67,19 +67,15 @@ static void __intel_engine_submit(struct intel_engine_cs *engine)
} }
static int static int
gen2_render_ring_flush(struct drm_i915_gem_request *req, gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
u32 invalidate_domains,
u32 flush_domains)
{ {
struct intel_ring *ring = req->ring; struct intel_ring *ring = req->ring;
u32 cmd; u32 cmd;
int ret; int ret;
cmd = MI_FLUSH; cmd = MI_FLUSH;
if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
cmd |= MI_NO_WRITE_FLUSH;
if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) if (mode & EMIT_INVALIDATE)
cmd |= MI_READ_FLUSH; cmd |= MI_READ_FLUSH;
ret = intel_ring_begin(req, 2); ret = intel_ring_begin(req, 2);
...@@ -94,9 +90,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req, ...@@ -94,9 +90,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req,
} }
static int static int
gen4_render_ring_flush(struct drm_i915_gem_request *req, gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
u32 invalidate_domains,
u32 flush_domains)
{ {
struct intel_ring *ring = req->ring; struct intel_ring *ring = req->ring;
u32 cmd; u32 cmd;
...@@ -131,7 +125,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, ...@@ -131,7 +125,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
*/ */
cmd = MI_FLUSH; cmd = MI_FLUSH;
if (invalidate_domains) { if (mode & EMIT_INVALIDATE) {
cmd |= MI_EXE_FLUSH; cmd |= MI_EXE_FLUSH;
if (IS_G4X(req->i915) || IS_GEN5(req->i915)) if (IS_G4X(req->i915) || IS_GEN5(req->i915))
cmd |= MI_INVALIDATE_ISP; cmd |= MI_INVALIDATE_ISP;
...@@ -222,8 +216,7 @@ intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req) ...@@ -222,8 +216,7 @@ intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
} }
static int static int
gen6_render_ring_flush(struct drm_i915_gem_request *req, gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
u32 invalidate_domains, u32 flush_domains)
{ {
struct intel_ring *ring = req->ring; struct intel_ring *ring = req->ring;
u32 scratch_addr = u32 scratch_addr =
...@@ -240,7 +233,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, ...@@ -240,7 +233,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
* number of bits based on the write domains has little performance * number of bits based on the write domains has little performance
* impact. * impact.
*/ */
if (flush_domains) { if (mode & EMIT_FLUSH) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
/* /*
...@@ -249,7 +242,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, ...@@ -249,7 +242,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
*/ */
flags |= PIPE_CONTROL_CS_STALL; flags |= PIPE_CONTROL_CS_STALL;
} }
if (invalidate_domains) { if (mode & EMIT_INVALIDATE) {
flags |= PIPE_CONTROL_TLB_INVALIDATE; flags |= PIPE_CONTROL_TLB_INVALIDATE;
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
...@@ -297,8 +290,7 @@ gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req) ...@@ -297,8 +290,7 @@ gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
} }
static int static int
gen7_render_ring_flush(struct drm_i915_gem_request *req, gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
u32 invalidate_domains, u32 flush_domains)
{ {
struct intel_ring *ring = req->ring; struct intel_ring *ring = req->ring;
u32 scratch_addr = u32 scratch_addr =
...@@ -320,13 +312,13 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, ...@@ -320,13 +312,13 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
* number of bits based on the write domains has little performance * number of bits based on the write domains has little performance
* impact. * impact.
*/ */
if (flush_domains) { if (mode & EMIT_FLUSH) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE; flags |= PIPE_CONTROL_FLUSH_ENABLE;
} }
if (invalidate_domains) { if (mode & EMIT_INVALIDATE) {
flags |= PIPE_CONTROL_TLB_INVALIDATE; flags |= PIPE_CONTROL_TLB_INVALIDATE;
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
...@@ -384,8 +376,7 @@ gen8_emit_pipe_control(struct drm_i915_gem_request *req, ...@@ -384,8 +376,7 @@ gen8_emit_pipe_control(struct drm_i915_gem_request *req,
} }
static int static int
gen8_render_ring_flush(struct drm_i915_gem_request *req, gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
u32 invalidate_domains, u32 flush_domains)
{ {
u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
u32 flags = 0; u32 flags = 0;
...@@ -393,13 +384,13 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req, ...@@ -393,13 +384,13 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
flags |= PIPE_CONTROL_CS_STALL; flags |= PIPE_CONTROL_CS_STALL;
if (flush_domains) { if (mode & EMIT_FLUSH) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
flags |= PIPE_CONTROL_FLUSH_ENABLE; flags |= PIPE_CONTROL_FLUSH_ENABLE;
} }
if (invalidate_domains) { if (mode & EMIT_INVALIDATE) {
flags |= PIPE_CONTROL_TLB_INVALIDATE; flags |= PIPE_CONTROL_TLB_INVALIDATE;
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
...@@ -688,9 +679,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) ...@@ -688,9 +679,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
if (w->count == 0) if (w->count == 0)
return 0; return 0;
ret = req->engine->emit_flush(req, ret = req->engine->emit_flush(req, EMIT_BARRIER);
I915_GEM_GPU_DOMAINS,
I915_GEM_GPU_DOMAINS);
if (ret) if (ret)
return ret; return ret;
...@@ -707,9 +696,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) ...@@ -707,9 +696,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
intel_ring_advance(ring); intel_ring_advance(ring);
ret = req->engine->emit_flush(req, ret = req->engine->emit_flush(req, EMIT_BARRIER);
I915_GEM_GPU_DOMAINS,
I915_GEM_GPU_DOMAINS);
if (ret) if (ret)
return ret; return ret;
...@@ -1700,9 +1687,7 @@ i8xx_irq_disable(struct intel_engine_cs *engine) ...@@ -1700,9 +1687,7 @@ i8xx_irq_disable(struct intel_engine_cs *engine)
} }
static int static int
bsd_ring_flush(struct drm_i915_gem_request *req, bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
u32 invalidate_domains,
u32 flush_domains)
{ {
struct intel_ring *ring = req->ring; struct intel_ring *ring = req->ring;
int ret; int ret;
...@@ -2533,8 +2518,7 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine, ...@@ -2533,8 +2518,7 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
} }
static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
u32 invalidate, u32 flush)
{ {
struct intel_ring *ring = req->ring; struct intel_ring *ring = req->ring;
uint32_t cmd; uint32_t cmd;
...@@ -2561,7 +2545,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, ...@@ -2561,7 +2545,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
* operation is complete. This bit is only valid when the * operation is complete. This bit is only valid when the
* Post-Sync Operation field is a value of 1h or 3h." * Post-Sync Operation field is a value of 1h or 3h."
*/ */
if (invalidate & I915_GEM_GPU_DOMAINS) if (mode & EMIT_INVALIDATE)
cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
intel_ring_emit(ring, cmd); intel_ring_emit(ring, cmd);
...@@ -2653,8 +2637,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, ...@@ -2653,8 +2637,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
/* Blitter support (SandyBridge+) */ /* Blitter support (SandyBridge+) */
static int gen6_ring_flush(struct drm_i915_gem_request *req, static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
u32 invalidate, u32 flush)
{ {
struct intel_ring *ring = req->ring; struct intel_ring *ring = req->ring;
uint32_t cmd; uint32_t cmd;
...@@ -2681,7 +2664,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, ...@@ -2681,7 +2664,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
* operation is complete. This bit is only valid when the * operation is complete. This bit is only valid when the
* Post-Sync Operation field is a value of 1h or 3h." * Post-Sync Operation field is a value of 1h or 3h."
*/ */
if (invalidate & I915_GEM_DOMAIN_RENDER) if (mode & EMIT_INVALIDATE)
cmd |= MI_INVALIDATE_TLB; cmd |= MI_INVALIDATE_TLB;
intel_ring_emit(ring, cmd); intel_ring_emit(ring, cmd);
intel_ring_emit(ring, intel_ring_emit(ring,
......
...@@ -292,8 +292,10 @@ struct intel_engine_cs { ...@@ -292,8 +292,10 @@ struct intel_engine_cs {
u32 ctx_desc_template; u32 ctx_desc_template;
int (*emit_request)(struct drm_i915_gem_request *request); int (*emit_request)(struct drm_i915_gem_request *request);
int (*emit_flush)(struct drm_i915_gem_request *request, int (*emit_flush)(struct drm_i915_gem_request *request,
u32 invalidate_domains, u32 mode);
u32 flush_domains); #define EMIT_INVALIDATE BIT(0)
#define EMIT_FLUSH BIT(1)
#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
int (*emit_bb_start)(struct drm_i915_gem_request *req, int (*emit_bb_start)(struct drm_i915_gem_request *req,
u64 offset, unsigned dispatch_flags); u64 offset, unsigned dispatch_flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment