Commit 12d3173b authored by Chris Wilson's avatar Chris Wilson

drm/i915: Move reserve_seqno() next to unreserve_seqno()

Move the companion functions next to each other.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170223074422.4125-4-chris@chris-wilson.co.uk
parent 9b6586ae
...@@ -198,6 +198,83 @@ i915_priotree_init(struct i915_priotree *pt) ...@@ -198,6 +198,83 @@ i915_priotree_init(struct i915_priotree *pt)
pt->priority = INT_MIN; pt->priority = INT_MIN;
} }
static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
{
struct i915_gem_timeline *timeline = &i915->gt.global_timeline;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int ret;
/* Carefully retire all requests without writing to the rings */
ret = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
if (ret)
return ret;
i915_gem_retire_requests(i915);
GEM_BUG_ON(i915->gt.active_requests > 1);
/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
for_each_engine(engine, i915, id) {
struct intel_timeline *tl = &timeline->engine[id];
if (!i915_seqno_passed(seqno, tl->seqno)) {
/* spin until threads are complete */
while (intel_breadcrumbs_busy(engine))
cond_resched();
}
/* Finally reset hw state */
tl->seqno = seqno;
intel_engine_init_global_seqno(engine, seqno);
}
list_for_each_entry(timeline, &i915->gt.timelines, link) {
for_each_engine(engine, i915, id) {
struct intel_timeline *tl = &timeline->engine[id];
memset(tl->sync_seqno, 0, sizeof(tl->sync_seqno));
}
}
return 0;
}
int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
{
struct drm_i915_private *dev_priv = to_i915(dev);
lockdep_assert_held(&dev_priv->drm.struct_mutex);
if (seqno == 0)
return -EINVAL;
/* HWS page needs to be set less than what we
* will inject to ring
*/
return reset_all_global_seqno(dev_priv, seqno - 1);
}
static int reserve_seqno(struct intel_engine_cs *engine)
{
u32 active = ++engine->timeline->inflight_seqnos;
u32 seqno = engine->timeline->seqno;
int ret;
/* Reservation is fine until we need to wrap around */
if (likely(!add_overflows(seqno, active)))
return 0;
ret = reset_all_global_seqno(engine->i915, 0);
if (ret) {
engine->timeline->inflight_seqnos--;
return ret;
}
return 0;
}
static void unreserve_seqno(struct intel_engine_cs *engine) static void unreserve_seqno(struct intel_engine_cs *engine)
{ {
GEM_BUG_ON(!engine->timeline->inflight_seqnos); GEM_BUG_ON(!engine->timeline->inflight_seqnos);
...@@ -314,90 +391,6 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req) ...@@ -314,90 +391,6 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
} while (tmp != req); } while (tmp != req);
} }
static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
{
struct i915_gem_timeline *timeline = &i915->gt.global_timeline;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int ret;
/* Carefully retire all requests without writing to the rings */
ret = i915_gem_wait_for_idle(i915,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
if (ret)
return ret;
i915_gem_retire_requests(i915);
GEM_BUG_ON(i915->gt.active_requests > 1);
/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
for_each_engine(engine, i915, id) {
struct intel_timeline *tl = &timeline->engine[id];
if (!i915_seqno_passed(seqno, tl->seqno)) {
/* spin until threads are complete */
while (intel_breadcrumbs_busy(engine))
cond_resched();
}
/* Finally reset hw state */
tl->seqno = seqno;
intel_engine_init_global_seqno(engine, seqno);
}
list_for_each_entry(timeline, &i915->gt.timelines, link) {
for_each_engine(engine, i915, id) {
struct intel_timeline *tl = &timeline->engine[id];
memset(tl->sync_seqno, 0, sizeof(tl->sync_seqno));
}
}
return 0;
}
int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
{
struct drm_i915_private *dev_priv = to_i915(dev);
lockdep_assert_held(&dev_priv->drm.struct_mutex);
if (seqno == 0)
return -EINVAL;
/* HWS page needs to be set less than what we
* will inject to ring
*/
return reset_all_global_seqno(dev_priv, seqno - 1);
}
static int reserve_seqno(struct intel_engine_cs *engine)
{
u32 active = ++engine->timeline->inflight_seqnos;
u32 seqno = engine->timeline->seqno;
int ret;
/* Reservation is fine until we need to wrap around */
if (likely(!add_overflows(seqno, active)))
return 0;
/* Even though we are tracking inflight seqno individually on each
* engine, other engines may be observing us using hw semaphores and
* so we need to idle all engines before wrapping around this engine.
* As all engines are then idle, we can reset the seqno on all, so
* we don't stall in quick succession if each engine is being
* similarly utilized.
*/
ret = reset_all_global_seqno(engine->i915, 0);
if (ret) {
engine->timeline->inflight_seqnos--;
return ret;
}
return 0;
}
static u32 timeline_get_seqno(struct intel_timeline *tl) static u32 timeline_get_seqno(struct intel_timeline *tl)
{ {
return ++tl->seqno; return ++tl->seqno;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment