Commit de220cc2 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Consolidate the timeline->barrier

The timeline is strictly ordered, so by inserting the timeline->barrier
request into the timeline->last_request it naturally provides the same
barrier. Consolidate the pair of barriers into one as they serve the
same purpose.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190408091728.20207-4-chris@chris-wilson.co.uk
parent da23379f
...@@ -1167,7 +1167,7 @@ static int ...@@ -1167,7 +1167,7 @@ static int
gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu) gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
{ {
struct drm_i915_private *i915 = ce->engine->i915; struct drm_i915_private *i915 = ce->engine->i915;
struct i915_request *rq, *prev; struct i915_request *rq;
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
int ret; int ret;
...@@ -1192,16 +1192,7 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu) ...@@ -1192,16 +1192,7 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
} }
/* Queue this switch after all other activity by this context. */ /* Queue this switch after all other activity by this context. */
prev = i915_active_request_raw(&ce->ring->timeline->last_request, ret = i915_active_request_set(&ce->ring->timeline->last_request, rq);
&i915->drm.struct_mutex);
if (prev && !i915_request_completed(prev)) {
ret = i915_request_await_dma_fence(rq, &prev->fence);
if (ret < 0)
goto out_add;
}
/* Order all following requests to be after. */
ret = i915_timeline_set_barrier(ce->ring->timeline, rq);
if (ret) if (ret)
goto out_add; goto out_add;
......
...@@ -584,11 +584,6 @@ i915_request_alloc_slow(struct intel_context *ce) ...@@ -584,11 +584,6 @@ i915_request_alloc_slow(struct intel_context *ce)
return kmem_cache_alloc(global.slab_requests, GFP_KERNEL); return kmem_cache_alloc(global.slab_requests, GFP_KERNEL);
} }
static int add_timeline_barrier(struct i915_request *rq)
{
return i915_request_await_active_request(rq, &rq->timeline->barrier);
}
/** /**
* i915_request_alloc - allocate a request structure * i915_request_alloc - allocate a request structure
* *
...@@ -738,10 +733,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) ...@@ -738,10 +733,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
*/ */
rq->head = rq->ring->emit; rq->head = rq->ring->emit;
ret = add_timeline_barrier(rq);
if (ret)
goto err_unwind;
ret = engine->request_alloc(rq); ret = engine->request_alloc(rq);
if (ret) if (ret)
goto err_unwind; goto err_unwind;
......
...@@ -253,7 +253,6 @@ int i915_timeline_init(struct drm_i915_private *i915, ...@@ -253,7 +253,6 @@ int i915_timeline_init(struct drm_i915_private *i915,
spin_lock_init(&timeline->lock); spin_lock_init(&timeline->lock);
mutex_init(&timeline->mutex); mutex_init(&timeline->mutex);
INIT_ACTIVE_REQUEST(&timeline->barrier);
INIT_ACTIVE_REQUEST(&timeline->last_request); INIT_ACTIVE_REQUEST(&timeline->last_request);
INIT_LIST_HEAD(&timeline->requests); INIT_LIST_HEAD(&timeline->requests);
...@@ -326,7 +325,6 @@ void i915_timeline_fini(struct i915_timeline *timeline) ...@@ -326,7 +325,6 @@ void i915_timeline_fini(struct i915_timeline *timeline)
{ {
GEM_BUG_ON(timeline->pin_count); GEM_BUG_ON(timeline->pin_count);
GEM_BUG_ON(!list_empty(&timeline->requests)); GEM_BUG_ON(!list_empty(&timeline->requests));
GEM_BUG_ON(i915_active_request_isset(&timeline->barrier));
i915_syncmap_free(&timeline->sync); i915_syncmap_free(&timeline->sync);
......
...@@ -110,19 +110,4 @@ void i915_timelines_init(struct drm_i915_private *i915); ...@@ -110,19 +110,4 @@ void i915_timelines_init(struct drm_i915_private *i915);
void i915_timelines_park(struct drm_i915_private *i915); void i915_timelines_park(struct drm_i915_private *i915);
void i915_timelines_fini(struct drm_i915_private *i915); void i915_timelines_fini(struct drm_i915_private *i915);
/**
* i915_timeline_set_barrier - orders submission between different timelines
* @timeline: timeline to set the barrier on
* @rq: request after which new submissions can proceed
*
* Sets the passed in request as the serialization point for all subsequent
* submissions on @timeline. Subsequent requests will not be submitted to GPU
* until the barrier has been completed.
*/
static inline int
i915_timeline_set_barrier(struct i915_timeline *tl, struct i915_request *rq)
{
return i915_active_request_set(&tl->barrier, rq);
}
#endif #endif
...@@ -61,16 +61,6 @@ struct i915_timeline { ...@@ -61,16 +61,6 @@ struct i915_timeline {
*/ */
struct i915_syncmap *sync; struct i915_syncmap *sync;
/**
* Barrier provides the ability to serialize ordering between different
* timelines.
*
* Users can call i915_timeline_set_barrier which will make all
* subsequent submissions to this timeline be executed only after the
* barrier has been completed.
*/
struct i915_active_request barrier;
struct list_head link; struct list_head link;
struct drm_i915_private *i915; struct drm_i915_private *i915;
......
...@@ -16,7 +16,6 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context) ...@@ -16,7 +16,6 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context)
spin_lock_init(&timeline->lock); spin_lock_init(&timeline->lock);
mutex_init(&timeline->mutex); mutex_init(&timeline->mutex);
INIT_ACTIVE_REQUEST(&timeline->barrier);
INIT_ACTIVE_REQUEST(&timeline->last_request); INIT_ACTIVE_REQUEST(&timeline->last_request);
INIT_LIST_HEAD(&timeline->requests); INIT_LIST_HEAD(&timeline->requests);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment