Commit 65e4760e authored by Chris Wilson's avatar Chris Wilson

drm/i915: Introduce a global_seqno for each request

Though we will have multiple timelines, we still have a single timeline
of execution. This we can use to provide an execution and retirement order
of requests. This keeps tracking execution of requests simple, and vital
for preserving a single waiter (i.e. so that we can order the waiters so
that only the earliest to wakeup need be woken). To accomplish this we
distinguish the seqno used to order requests per-context (external) and
that used internally for execution.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-26-chris@chris-wilson.co.uk
parent 4680816b
...@@ -637,7 +637,7 @@ static void print_request(struct seq_file *m, ...@@ -637,7 +637,7 @@ static void print_request(struct seq_file *m,
rcu_read_lock(); rcu_read_lock();
task = pid ? pid_task(pid, PIDTYPE_PID) : NULL; task = pid ? pid_task(pid, PIDTYPE_PID) : NULL;
seq_printf(m, "%s%x [%x:%x] @ %d: %s [%d]\n", prefix, seq_printf(m, "%s%x [%x:%x] @ %d: %s [%d]\n", prefix,
rq->fence.seqno, rq->ctx->hw_id, rq->fence.seqno, rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno,
jiffies_to_msecs(jiffies - rq->emitted_jiffies), jiffies_to_msecs(jiffies - rq->emitted_jiffies),
task ? task->comm : "<unknown>", task ? task->comm : "<unknown>",
task ? task->pid : -1); task ? task->pid : -1);
......
...@@ -4050,7 +4050,7 @@ __i915_request_irq_complete(struct drm_i915_gem_request *req) ...@@ -4050,7 +4050,7 @@ __i915_request_irq_complete(struct drm_i915_gem_request *req)
/* Before we do the heavier coherent read of the seqno, /* Before we do the heavier coherent read of the seqno,
* check the value (hopefully) in the CPU cacheline. * check the value (hopefully) in the CPU cacheline.
*/ */
if (i915_gem_request_completed(req)) if (__i915_gem_request_completed(req))
return true; return true;
/* Ensure our read of the seqno is coherent so that we /* Ensure our read of the seqno is coherent so that we
...@@ -4101,7 +4101,7 @@ __i915_request_irq_complete(struct drm_i915_gem_request *req) ...@@ -4101,7 +4101,7 @@ __i915_request_irq_complete(struct drm_i915_gem_request *req)
wake_up_process(tsk); wake_up_process(tsk);
rcu_read_unlock(); rcu_read_unlock();
if (i915_gem_request_completed(req)) if (__i915_gem_request_completed(req))
return true; return true;
} }
......
...@@ -2615,7 +2615,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine) ...@@ -2615,7 +2615,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
return; return;
DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n", DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
engine->name, request->fence.seqno); engine->name, request->global_seqno);
/* Setup the CS to resume from the breadcrumb of the hung request */ /* Setup the CS to resume from the breadcrumb of the hung request */
engine->reset_hw(engine, request); engine->reset_hw(engine, request);
......
...@@ -376,7 +376,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, ...@@ -376,7 +376,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
* of being read by __i915_gem_active_get_rcu(). As such, * of being read by __i915_gem_active_get_rcu(). As such,
* we have to be very careful when overwriting the contents. During * we have to be very careful when overwriting the contents. During
* the RCU lookup, we change chase the request->engine pointer, * the RCU lookup, we change chase the request->engine pointer,
* read the request->fence.seqno and increment the reference count. * read the request->global_seqno and increment the reference count.
* *
* The reference count is incremented atomically. If it is zero, * The reference count is incremented atomically. If it is zero,
* the lookup knows the request is unallocated and complete. Otherwise, * the lookup knows the request is unallocated and complete. Otherwise,
...@@ -418,6 +418,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, ...@@ -418,6 +418,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
INIT_LIST_HEAD(&req->active_list); INIT_LIST_HEAD(&req->active_list);
req->i915 = dev_priv; req->i915 = dev_priv;
req->engine = engine; req->engine = engine;
req->global_seqno = seqno;
req->ctx = i915_gem_context_get(ctx); req->ctx = i915_gem_context_get(ctx);
/* No zalloc, must clear what we need by hand */ /* No zalloc, must clear what we need by hand */
...@@ -475,8 +476,15 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to, ...@@ -475,8 +476,15 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
return ret < 0 ? ret : 0; return ret < 0 ? ret : 0;
} }
if (!from->global_seqno) {
ret = i915_sw_fence_await_dma_fence(&to->submit,
&from->fence, 0,
GFP_KERNEL);
return ret < 0 ? ret : 0;
}
idx = intel_engine_sync_index(from->engine, to->engine); idx = intel_engine_sync_index(from->engine, to->engine);
if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx]) if (from->global_seqno <= from->engine->semaphore.sync_seqno[idx])
return 0; return 0;
trace_i915_gem_ring_sync_to(to, from); trace_i915_gem_ring_sync_to(to, from);
...@@ -494,7 +502,7 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to, ...@@ -494,7 +502,7 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
return ret; return ret;
} }
from->engine->semaphore.sync_seqno[idx] = from->fence.seqno; from->engine->semaphore.sync_seqno[idx] = from->global_seqno;
return 0; return 0;
} }
...@@ -774,7 +782,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req, ...@@ -774,7 +782,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
timeout_us += local_clock_us(&cpu); timeout_us += local_clock_us(&cpu);
do { do {
if (i915_gem_request_completed(req)) if (__i915_gem_request_completed(req))
return true; return true;
if (signal_pending_state(state, current)) if (signal_pending_state(state, current))
...@@ -883,6 +891,7 @@ long i915_wait_request(struct drm_i915_gem_request *req, ...@@ -883,6 +891,7 @@ long i915_wait_request(struct drm_i915_gem_request *req,
GEM_BUG_ON(!i915_sw_fence_done(&req->submit)); GEM_BUG_ON(!i915_sw_fence_done(&req->submit));
} }
GEM_BUG_ON(!req->global_seqno);
/* Optimistic short spin before touching IRQs */ /* Optimistic short spin before touching IRQs */
if (i915_spin_request(req, state, 5)) if (i915_spin_request(req, state, 5))
...@@ -892,7 +901,7 @@ long i915_wait_request(struct drm_i915_gem_request *req, ...@@ -892,7 +901,7 @@ long i915_wait_request(struct drm_i915_gem_request *req,
if (flags & I915_WAIT_LOCKED) if (flags & I915_WAIT_LOCKED)
add_wait_queue(&req->i915->gpu_error.wait_queue, &reset); add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
intel_wait_init(&wait, req->fence.seqno); intel_wait_init(&wait, req->global_seqno);
if (intel_engine_add_wait(req->engine, &wait)) if (intel_engine_add_wait(req->engine, &wait))
/* In order to check that we haven't missed the interrupt /* In order to check that we haven't missed the interrupt
* as we enabled it, we need to kick ourselves to do a * as we enabled it, we need to kick ourselves to do a
......
...@@ -87,6 +87,8 @@ struct drm_i915_gem_request { ...@@ -87,6 +87,8 @@ struct drm_i915_gem_request {
struct i915_sw_fence submit; struct i915_sw_fence submit;
wait_queue_t submitq; wait_queue_t submitq;
u32 global_seqno;
/** GEM sequence number associated with the previous request, /** GEM sequence number associated with the previous request,
* when the HWS breadcrumb is equal to this the GPU is processing * when the HWS breadcrumb is equal to this the GPU is processing
* this request. * this request.
...@@ -163,7 +165,7 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req); ...@@ -163,7 +165,7 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
static inline u32 static inline u32
i915_gem_request_get_seqno(struct drm_i915_gem_request *req) i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
{ {
return req ? req->fence.seqno : 0; return req ? req->global_seqno : 0;
} }
static inline struct intel_engine_cs * static inline struct intel_engine_cs *
...@@ -248,17 +250,37 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2) ...@@ -248,17 +250,37 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
} }
static inline bool static inline bool
i915_gem_request_started(const struct drm_i915_gem_request *req) __i915_gem_request_started(const struct drm_i915_gem_request *req)
{ {
GEM_BUG_ON(!req->global_seqno);
return i915_seqno_passed(intel_engine_get_seqno(req->engine), return i915_seqno_passed(intel_engine_get_seqno(req->engine),
req->previous_seqno); req->previous_seqno);
} }
static inline bool static inline bool
i915_gem_request_completed(const struct drm_i915_gem_request *req) i915_gem_request_started(const struct drm_i915_gem_request *req)
{ {
if (!req->global_seqno)
return false;
return __i915_gem_request_started(req);
}
static inline bool
__i915_gem_request_completed(const struct drm_i915_gem_request *req)
{
GEM_BUG_ON(!req->global_seqno);
return i915_seqno_passed(intel_engine_get_seqno(req->engine), return i915_seqno_passed(intel_engine_get_seqno(req->engine),
req->fence.seqno); req->global_seqno);
}
static inline bool
i915_gem_request_completed(const struct drm_i915_gem_request *req)
{
if (!req->global_seqno)
return false;
return __i915_gem_request_completed(req);
} }
bool __i915_spin_request(const struct drm_i915_gem_request *request, bool __i915_spin_request(const struct drm_i915_gem_request *request,
...@@ -266,7 +288,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *request, ...@@ -266,7 +288,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *request,
static inline bool i915_spin_request(const struct drm_i915_gem_request *request, static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
int state, unsigned long timeout_us) int state, unsigned long timeout_us)
{ {
return (i915_gem_request_started(request) && return (__i915_gem_request_started(request) &&
__i915_spin_request(request, state, timeout_us)); __i915_spin_request(request, state, timeout_us));
} }
......
...@@ -1176,7 +1176,7 @@ static void record_request(struct drm_i915_gem_request *request, ...@@ -1176,7 +1176,7 @@ static void record_request(struct drm_i915_gem_request *request,
struct drm_i915_error_request *erq) struct drm_i915_error_request *erq)
{ {
erq->context = request->ctx->hw_id; erq->context = request->ctx->hw_id;
erq->seqno = request->fence.seqno; erq->seqno = request->global_seqno;
erq->jiffies = request->emitted_jiffies; erq->jiffies = request->emitted_jiffies;
erq->head = request->head; erq->head = request->head;
erq->tail = request->tail; erq->tail = request->tail;
......
...@@ -554,7 +554,7 @@ static void guc_wq_item_append(struct i915_guc_client *gc, ...@@ -554,7 +554,7 @@ static void guc_wq_item_append(struct i915_guc_client *gc,
wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine); wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine);
wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT; wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
wqi->fence_id = rq->fence.seqno; wqi->fence_id = rq->global_seqno;
kunmap_atomic(base); kunmap_atomic(base);
} }
...@@ -655,7 +655,7 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq) ...@@ -655,7 +655,7 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
client->b_fail += 1; client->b_fail += 1;
guc->submissions[engine_id] += 1; guc->submissions[engine_id] += 1;
guc->last_seqno[engine_id] = rq->fence.seqno; guc->last_seqno[engine_id] = rq->global_seqno;
spin_unlock(&client->wq_lock); spin_unlock(&client->wq_lock);
} }
......
...@@ -466,7 +466,7 @@ TRACE_EVENT(i915_gem_ring_sync_to, ...@@ -466,7 +466,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
__entry->dev = from->i915->drm.primary->index; __entry->dev = from->i915->drm.primary->index;
__entry->sync_from = from->engine->id; __entry->sync_from = from->engine->id;
__entry->sync_to = to->engine->id; __entry->sync_to = to->engine->id;
__entry->seqno = from->fence.seqno; __entry->seqno = from->global_seqno;
), ),
TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u", TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
...@@ -489,7 +489,7 @@ TRACE_EVENT(i915_gem_ring_dispatch, ...@@ -489,7 +489,7 @@ TRACE_EVENT(i915_gem_ring_dispatch,
TP_fast_assign( TP_fast_assign(
__entry->dev = req->i915->drm.primary->index; __entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id; __entry->ring = req->engine->id;
__entry->seqno = req->fence.seqno; __entry->seqno = req->global_seqno;
__entry->flags = flags; __entry->flags = flags;
dma_fence_enable_sw_signaling(&req->fence); dma_fence_enable_sw_signaling(&req->fence);
), ),
...@@ -534,7 +534,7 @@ DECLARE_EVENT_CLASS(i915_gem_request, ...@@ -534,7 +534,7 @@ DECLARE_EVENT_CLASS(i915_gem_request,
TP_fast_assign( TP_fast_assign(
__entry->dev = req->i915->drm.primary->index; __entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id; __entry->ring = req->engine->id;
__entry->seqno = req->fence.seqno; __entry->seqno = req->global_seqno;
), ),
TP_printk("dev=%u, ring=%u, seqno=%u", TP_printk("dev=%u, ring=%u, seqno=%u",
...@@ -596,7 +596,7 @@ TRACE_EVENT(i915_gem_request_wait_begin, ...@@ -596,7 +596,7 @@ TRACE_EVENT(i915_gem_request_wait_begin,
TP_fast_assign( TP_fast_assign(
__entry->dev = req->i915->drm.primary->index; __entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id; __entry->ring = req->engine->id;
__entry->seqno = req->fence.seqno; __entry->seqno = req->global_seqno;
__entry->blocking = __entry->blocking =
mutex_is_locked(&req->i915->drm.struct_mutex); mutex_is_locked(&req->i915->drm.struct_mutex);
), ),
......
...@@ -504,9 +504,11 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request) ...@@ -504,9 +504,11 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
/* locked by dma_fence_enable_sw_signaling() */ /* locked by dma_fence_enable_sw_signaling() */
assert_spin_locked(&request->lock); assert_spin_locked(&request->lock);
if (!request->global_seqno)
return;
request->signaling.wait.tsk = b->signaler; request->signaling.wait.tsk = b->signaler;
request->signaling.wait.seqno = request->fence.seqno; request->signaling.wait.seqno = request->global_seqno;
i915_gem_request_get(request); i915_gem_request_get(request);
spin_lock(&b->lock); spin_lock(&b->lock);
...@@ -530,8 +532,8 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request) ...@@ -530,8 +532,8 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
p = &b->signals.rb_node; p = &b->signals.rb_node;
while (*p) { while (*p) {
parent = *p; parent = *p;
if (i915_seqno_passed(request->fence.seqno, if (i915_seqno_passed(request->global_seqno,
to_signaler(parent)->fence.seqno)) { to_signaler(parent)->global_seqno)) {
p = &parent->rb_right; p = &parent->rb_right;
first = false; first = false;
} else { } else {
......
...@@ -1584,7 +1584,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request) ...@@ -1584,7 +1584,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
intel_hws_seqno_address(request->engine) | intel_hws_seqno_address(request->engine) |
MI_FLUSH_DW_USE_GTT); MI_FLUSH_DW_USE_GTT);
intel_ring_emit(ring, 0); intel_ring_emit(ring, 0);
intel_ring_emit(ring, request->fence.seqno); intel_ring_emit(ring, request->global_seqno);
intel_ring_emit(ring, MI_USER_INTERRUPT); intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_NOOP);
return intel_logical_ring_advance(request); return intel_logical_ring_advance(request);
...@@ -1613,7 +1613,7 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request) ...@@ -1613,7 +1613,7 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
PIPE_CONTROL_QW_WRITE)); PIPE_CONTROL_QW_WRITE));
intel_ring_emit(ring, intel_hws_seqno_address(request->engine)); intel_ring_emit(ring, intel_hws_seqno_address(request->engine));
intel_ring_emit(ring, 0); intel_ring_emit(ring, 0);
intel_ring_emit(ring, i915_gem_request_get_seqno(request)); intel_ring_emit(ring, request->global_seqno);
/* We're thrashing one dword of HWS. */ /* We're thrashing one dword of HWS. */
intel_ring_emit(ring, 0); intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_USER_INTERRUPT); intel_ring_emit(ring, MI_USER_INTERRUPT);
......
...@@ -1238,7 +1238,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *req) ...@@ -1238,7 +1238,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *req)
PIPE_CONTROL_CS_STALL); PIPE_CONTROL_CS_STALL);
intel_ring_emit(ring, lower_32_bits(gtt_offset)); intel_ring_emit(ring, lower_32_bits(gtt_offset));
intel_ring_emit(ring, upper_32_bits(gtt_offset)); intel_ring_emit(ring, upper_32_bits(gtt_offset));
intel_ring_emit(ring, req->fence.seqno); intel_ring_emit(ring, req->global_seqno);
intel_ring_emit(ring, 0); intel_ring_emit(ring, 0);
intel_ring_emit(ring, intel_ring_emit(ring,
MI_SEMAPHORE_SIGNAL | MI_SEMAPHORE_SIGNAL |
...@@ -1274,7 +1274,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *req) ...@@ -1274,7 +1274,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *req)
lower_32_bits(gtt_offset) | lower_32_bits(gtt_offset) |
MI_FLUSH_DW_USE_GTT); MI_FLUSH_DW_USE_GTT);
intel_ring_emit(ring, upper_32_bits(gtt_offset)); intel_ring_emit(ring, upper_32_bits(gtt_offset));
intel_ring_emit(ring, req->fence.seqno); intel_ring_emit(ring, req->global_seqno);
intel_ring_emit(ring, intel_ring_emit(ring,
MI_SEMAPHORE_SIGNAL | MI_SEMAPHORE_SIGNAL |
MI_SEMAPHORE_TARGET(waiter->hw_id)); MI_SEMAPHORE_TARGET(waiter->hw_id));
...@@ -1308,7 +1308,7 @@ static int gen6_signal(struct drm_i915_gem_request *req) ...@@ -1308,7 +1308,7 @@ static int gen6_signal(struct drm_i915_gem_request *req)
if (i915_mmio_reg_valid(mbox_reg)) { if (i915_mmio_reg_valid(mbox_reg)) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit_reg(ring, mbox_reg); intel_ring_emit_reg(ring, mbox_reg);
intel_ring_emit(ring, req->fence.seqno); intel_ring_emit(ring, req->global_seqno);
} }
} }
...@@ -1339,7 +1339,7 @@ static int i9xx_emit_request(struct drm_i915_gem_request *req) ...@@ -1339,7 +1339,7 @@ static int i9xx_emit_request(struct drm_i915_gem_request *req)
intel_ring_emit(ring, MI_STORE_DWORD_INDEX); intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
intel_ring_emit(ring, req->fence.seqno); intel_ring_emit(ring, req->global_seqno);
intel_ring_emit(ring, MI_USER_INTERRUPT); intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_advance(ring); intel_ring_advance(ring);
...@@ -1389,7 +1389,7 @@ static int gen8_render_emit_request(struct drm_i915_gem_request *req) ...@@ -1389,7 +1389,7 @@ static int gen8_render_emit_request(struct drm_i915_gem_request *req)
PIPE_CONTROL_QW_WRITE)); PIPE_CONTROL_QW_WRITE));
intel_ring_emit(ring, intel_hws_seqno_address(engine)); intel_ring_emit(ring, intel_hws_seqno_address(engine));
intel_ring_emit(ring, 0); intel_ring_emit(ring, 0);
intel_ring_emit(ring, i915_gem_request_get_seqno(req)); intel_ring_emit(ring, req->global_seqno);
/* We're thrashing one dword of HWS. */ /* We're thrashing one dword of HWS. */
intel_ring_emit(ring, 0); intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_USER_INTERRUPT); intel_ring_emit(ring, MI_USER_INTERRUPT);
...@@ -1427,7 +1427,7 @@ gen8_ring_sync_to(struct drm_i915_gem_request *req, ...@@ -1427,7 +1427,7 @@ gen8_ring_sync_to(struct drm_i915_gem_request *req,
MI_SEMAPHORE_WAIT | MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT | MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_SAD_GTE_SDD); MI_SEMAPHORE_SAD_GTE_SDD);
intel_ring_emit(ring, signal->fence.seqno); intel_ring_emit(ring, signal->global_seqno);
intel_ring_emit(ring, lower_32_bits(offset)); intel_ring_emit(ring, lower_32_bits(offset));
intel_ring_emit(ring, upper_32_bits(offset)); intel_ring_emit(ring, upper_32_bits(offset));
intel_ring_advance(ring); intel_ring_advance(ring);
...@@ -1465,7 +1465,7 @@ gen6_ring_sync_to(struct drm_i915_gem_request *req, ...@@ -1465,7 +1465,7 @@ gen6_ring_sync_to(struct drm_i915_gem_request *req,
* seqno is >= the last seqno executed. However for hardware the * seqno is >= the last seqno executed. However for hardware the
* comparison is strictly greater than. * comparison is strictly greater than.
*/ */
intel_ring_emit(ring, signal->fence.seqno - 1); intel_ring_emit(ring, signal->global_seqno - 1);
intel_ring_emit(ring, 0); intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring); intel_ring_advance(ring);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment