Commit f71e01a7 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Extend execution fence to support a callback

In the next patch, we will want to configure the slave request
depending on which physical engine the master request is executed on.
For this, we introduce a callback from the execute fence to convey this
information.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190521211134.16117-8-chris@chris-wilson.co.uk
parent 78e41ddd
...@@ -38,6 +38,8 @@ struct execute_cb { ...@@ -38,6 +38,8 @@ struct execute_cb {
struct list_head link; struct list_head link;
struct irq_work work; struct irq_work work;
struct i915_sw_fence *fence; struct i915_sw_fence *fence;
void (*hook)(struct i915_request *rq, struct dma_fence *signal);
struct i915_request *signal;
}; };
static struct i915_global_request { static struct i915_global_request {
...@@ -329,6 +331,17 @@ static void irq_execute_cb(struct irq_work *wrk) ...@@ -329,6 +331,17 @@ static void irq_execute_cb(struct irq_work *wrk)
kmem_cache_free(global.slab_execute_cbs, cb); kmem_cache_free(global.slab_execute_cbs, cb);
} }
static void irq_execute_cb_hook(struct irq_work *wrk)
{
struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
cb->hook(container_of(cb->fence, struct i915_request, submit),
&cb->signal->fence);
i915_request_put(cb->signal);
irq_execute_cb(wrk);
}
static void __notify_execute_cb(struct i915_request *rq) static void __notify_execute_cb(struct i915_request *rq)
{ {
struct execute_cb *cb; struct execute_cb *cb;
...@@ -355,14 +368,19 @@ static void __notify_execute_cb(struct i915_request *rq) ...@@ -355,14 +368,19 @@ static void __notify_execute_cb(struct i915_request *rq)
} }
static int static int
i915_request_await_execution(struct i915_request *rq, __i915_request_await_execution(struct i915_request *rq,
struct i915_request *signal, struct i915_request *signal,
gfp_t gfp) void (*hook)(struct i915_request *rq,
struct dma_fence *signal),
gfp_t gfp)
{ {
struct execute_cb *cb; struct execute_cb *cb;
if (i915_request_is_active(signal)) if (i915_request_is_active(signal)) {
if (hook)
hook(rq, &signal->fence);
return 0; return 0;
}
cb = kmem_cache_alloc(global.slab_execute_cbs, gfp); cb = kmem_cache_alloc(global.slab_execute_cbs, gfp);
if (!cb) if (!cb)
...@@ -372,8 +390,18 @@ i915_request_await_execution(struct i915_request *rq, ...@@ -372,8 +390,18 @@ i915_request_await_execution(struct i915_request *rq,
i915_sw_fence_await(cb->fence); i915_sw_fence_await(cb->fence);
init_irq_work(&cb->work, irq_execute_cb); init_irq_work(&cb->work, irq_execute_cb);
if (hook) {
cb->hook = hook;
cb->signal = i915_request_get(signal);
cb->work.func = irq_execute_cb_hook;
}
spin_lock_irq(&signal->lock); spin_lock_irq(&signal->lock);
if (i915_request_is_active(signal)) { if (i915_request_is_active(signal)) {
if (hook) {
hook(rq, &signal->fence);
i915_request_put(signal);
}
i915_sw_fence_complete(cb->fence); i915_sw_fence_complete(cb->fence);
kmem_cache_free(global.slab_execute_cbs, cb); kmem_cache_free(global.slab_execute_cbs, cb);
} else { } else {
...@@ -834,7 +862,7 @@ emit_semaphore_wait(struct i915_request *to, ...@@ -834,7 +862,7 @@ emit_semaphore_wait(struct i915_request *to,
return err; return err;
/* Only submit our spinner after the signaler is running! */ /* Only submit our spinner after the signaler is running! */
err = i915_request_await_execution(to, from, gfp); err = __i915_request_await_execution(to, from, NULL, gfp);
if (err) if (err)
return err; return err;
...@@ -970,6 +998,52 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) ...@@ -970,6 +998,52 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
return 0; return 0;
} }
int
i915_request_await_execution(struct i915_request *rq,
struct dma_fence *fence,
void (*hook)(struct i915_request *rq,
struct dma_fence *signal))
{
struct dma_fence **child = &fence;
unsigned int nchild = 1;
int ret;
if (dma_fence_is_array(fence)) {
struct dma_fence_array *array = to_dma_fence_array(fence);
/* XXX Error for signal-on-any fence arrays */
child = array->fences;
nchild = array->num_fences;
GEM_BUG_ON(!nchild);
}
do {
fence = *child++;
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
continue;
/*
* We don't squash repeated fence dependencies here as we
* want to run our callback in all cases.
*/
if (dma_fence_is_i915(fence))
ret = __i915_request_await_execution(rq,
to_request(fence),
hook,
I915_FENCE_GFP);
else
ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
I915_FENCE_TIMEOUT,
GFP_KERNEL);
if (ret < 0)
return ret;
} while (--nchild);
return 0;
}
/** /**
* i915_request_await_object - set this request to (async) wait upon a bo * i915_request_await_object - set this request to (async) wait upon a bo
* @to: request we are wishing to use * @to: request we are wishing to use
......
...@@ -283,6 +283,10 @@ int i915_request_await_object(struct i915_request *to, ...@@ -283,6 +283,10 @@ int i915_request_await_object(struct i915_request *to,
bool write); bool write);
int i915_request_await_dma_fence(struct i915_request *rq, int i915_request_await_dma_fence(struct i915_request *rq,
struct dma_fence *fence); struct dma_fence *fence);
int i915_request_await_execution(struct i915_request *rq,
struct dma_fence *fence,
void (*hook)(struct i915_request *rq,
struct dma_fence *signal));
void i915_request_add(struct i915_request *rq); void i915_request_add(struct i915_request *rq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment