Commit fe49789f authored by Chris Wilson's avatar Chris Wilson

drm/i915: Deconstruct execute fence

On reflection, we are only using the execute fence as a waitqueue on the
global_seqno and not using it for dependency tracking between fences
(unlike the submit and dma fences). By only treating it as a waitqueue,
we can then treat it similar to the other waitqueues during submit,
making the code simpler.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170223074422.4125-8-chris@chris-wilson.co.uk
parent 541ca6ed
...@@ -69,7 +69,6 @@ static void i915_fence_release(struct dma_fence *fence) ...@@ -69,7 +69,6 @@ static void i915_fence_release(struct dma_fence *fence)
* caught trying to reuse dead objects. * caught trying to reuse dead objects.
*/ */
i915_sw_fence_fini(&req->submit); i915_sw_fence_fini(&req->submit);
i915_sw_fence_fini(&req->execute);
kmem_cache_free(req->i915->requests, req); kmem_cache_free(req->i915->requests, req);
} }
...@@ -294,7 +293,6 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request) ...@@ -294,7 +293,6 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
lockdep_assert_held(&request->i915->drm.struct_mutex); lockdep_assert_held(&request->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit)); GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
GEM_BUG_ON(!i915_sw_fence_signaled(&request->execute));
GEM_BUG_ON(!i915_gem_request_completed(request)); GEM_BUG_ON(!i915_gem_request_completed(request));
GEM_BUG_ON(!request->i915->gt.active_requests); GEM_BUG_ON(!request->i915->gt.active_requests);
...@@ -402,6 +400,8 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request) ...@@ -402,6 +400,8 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)
struct intel_timeline *timeline; struct intel_timeline *timeline;
u32 seqno; u32 seqno;
trace_i915_gem_request_execute(request);
/* Transfer from per-context onto the global per-engine timeline */ /* Transfer from per-context onto the global per-engine timeline */
timeline = engine->timeline; timeline = engine->timeline;
GEM_BUG_ON(timeline == request->timeline); GEM_BUG_ON(timeline == request->timeline);
...@@ -426,8 +426,7 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request) ...@@ -426,8 +426,7 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)
list_move_tail(&request->link, &timeline->requests); list_move_tail(&request->link, &timeline->requests);
spin_unlock(&request->timeline->lock); spin_unlock(&request->timeline->lock);
i915_sw_fence_commit(&request->execute); wake_up_all(&request->execute);
trace_i915_gem_request_execute(request);
} }
void i915_gem_request_submit(struct drm_i915_gem_request *request) void i915_gem_request_submit(struct drm_i915_gem_request *request)
...@@ -463,24 +462,6 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) ...@@ -463,24 +462,6 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
return NOTIFY_DONE; return NOTIFY_DONE;
} }
static int __i915_sw_fence_call
execute_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
struct drm_i915_gem_request *request =
container_of(fence, typeof(*request), execute);
switch (state) {
case FENCE_COMPLETE:
break;
case FENCE_FREE:
i915_gem_request_put(request);
break;
}
return NOTIFY_DONE;
}
/** /**
* i915_gem_request_alloc - allocate a request structure * i915_gem_request_alloc - allocate a request structure
* *
...@@ -573,13 +554,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine, ...@@ -573,13 +554,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
/* We bump the ref for the fence chain */ /* We bump the ref for the fence chain */
i915_sw_fence_init(&i915_gem_request_get(req)->submit, submit_notify); i915_sw_fence_init(&i915_gem_request_get(req)->submit, submit_notify);
i915_sw_fence_init(&i915_gem_request_get(req)->execute, execute_notify); init_waitqueue_head(&req->execute);
/* Ensure that the execute fence completes after the submit fence -
* as we complete the execute fence from within the submit fence
* callback, its completion would otherwise be visible first.
*/
i915_sw_fence_await_sw_fence(&req->execute, &req->submit, &req->execq);
i915_priotree_init(&req->priotree); i915_priotree_init(&req->priotree);
...@@ -1031,6 +1006,7 @@ long i915_wait_request(struct drm_i915_gem_request *req, ...@@ -1031,6 +1006,7 @@ long i915_wait_request(struct drm_i915_gem_request *req,
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
wait_queue_head_t *errq = &req->i915->gpu_error.wait_queue; wait_queue_head_t *errq = &req->i915->gpu_error.wait_queue;
DEFINE_WAIT(reset); DEFINE_WAIT(reset);
DEFINE_WAIT(exec);
struct intel_wait wait; struct intel_wait wait;
might_sleep(); might_sleep();
...@@ -1052,12 +1028,11 @@ long i915_wait_request(struct drm_i915_gem_request *req, ...@@ -1052,12 +1028,11 @@ long i915_wait_request(struct drm_i915_gem_request *req,
if (flags & I915_WAIT_LOCKED) if (flags & I915_WAIT_LOCKED)
add_wait_queue(errq, &reset); add_wait_queue(errq, &reset);
if (!i915_sw_fence_done(&req->execute)) { reset_wait_queue(&req->execute, &exec);
DEFINE_WAIT(exec); if (!req->global_seqno) {
do { do {
prepare_to_wait(&req->execute.wait, &exec, state); set_current_state(state);
if (i915_sw_fence_done(&req->execute)) if (req->global_seqno)
break; break;
if (flags & I915_WAIT_LOCKED && if (flags & I915_WAIT_LOCKED &&
...@@ -1080,15 +1055,14 @@ long i915_wait_request(struct drm_i915_gem_request *req, ...@@ -1080,15 +1055,14 @@ long i915_wait_request(struct drm_i915_gem_request *req,
timeout = io_schedule_timeout(timeout); timeout = io_schedule_timeout(timeout);
} while (1); } while (1);
finish_wait(&req->execute.wait, &exec); finish_wait(&req->execute, &exec);
if (timeout < 0) if (timeout < 0)
goto complete; goto complete;
GEM_BUG_ON(!i915_sw_fence_done(&req->execute)); GEM_BUG_ON(!req->global_seqno);
} }
GEM_BUG_ON(!i915_sw_fence_done(&req->submit)); GEM_BUG_ON(!i915_sw_fence_signaled(&req->submit));
GEM_BUG_ON(!req->global_seqno);
/* Optimistic short spin before touching IRQs */ /* Optimistic short spin before touching IRQs */
if (i915_spin_request(req, state, 5)) if (i915_spin_request(req, state, 5))
......
...@@ -119,18 +119,10 @@ struct drm_i915_gem_request { ...@@ -119,18 +119,10 @@ struct drm_i915_gem_request {
* The submit fence is used to await upon all of the request's * The submit fence is used to await upon all of the request's
* dependencies. When it is signaled, the request is ready to run. * dependencies. When it is signaled, the request is ready to run.
* It is used by the driver to then queue the request for execution. * It is used by the driver to then queue the request for execution.
*
* The execute fence is used to signal when the request has been
* sent to hardware.
*
* It is illegal for the submit fence of one request to wait upon the
* execute fence of an earlier request. It should be sufficient to
* wait upon the submit fence of the earlier request.
*/ */
struct i915_sw_fence submit; struct i915_sw_fence submit;
struct i915_sw_fence execute;
wait_queue_t submitq; wait_queue_t submitq;
wait_queue_t execq; wait_queue_head_t execute;
/* A list of everyone we wait upon, and everyone who waits upon us. /* A list of everyone we wait upon, and everyone who waits upon us.
* Even though we will not be submitted to the hardware before the * Even though we will not be submitted to the hardware before the
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment