Commit ad7ad48e authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-fixes-2019-03-12' of...

Merge tag 'drm-intel-next-fixes-2019-03-12' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

- HDCP state handling in ddi_update_pipe
- Protect i915_active iterators from the shrinker
- Reacquire priolist cache after dropping the engine lock
- (Selftest) Always free spinner on __sseu_prepare error
- Acquire breadcrumb ref before canceling
- Fix atomic state leak on HDMI link reset
- Relax mmap VMA check
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190312205551.GA7701@intel.com
parents 74cd45fa ca22f32a
...@@ -163,17 +163,25 @@ int i915_active_ref(struct i915_active *ref, ...@@ -163,17 +163,25 @@ int i915_active_ref(struct i915_active *ref,
struct i915_request *rq) struct i915_request *rq)
{ {
struct i915_active_request *active; struct i915_active_request *active;
int err = 0;
/* Prevent reaping in case we malloc/wait while building the tree */
i915_active_acquire(ref);
active = active_instance(ref, timeline); active = active_instance(ref, timeline);
if (IS_ERR(active)) if (IS_ERR(active)) {
return PTR_ERR(active); err = PTR_ERR(active);
goto out;
}
if (!i915_active_request_isset(active)) if (!i915_active_request_isset(active))
ref->count++; ref->count++;
__i915_active_request_set(active, rq); __i915_active_request_set(active, rq);
GEM_BUG_ON(!ref->count); GEM_BUG_ON(!ref->count);
return 0; out:
i915_active_release(ref);
return err;
} }
bool i915_active_acquire(struct i915_active *ref) bool i915_active_acquire(struct i915_active *ref)
...@@ -223,19 +231,25 @@ int i915_request_await_active_request(struct i915_request *rq, ...@@ -223,19 +231,25 @@ int i915_request_await_active_request(struct i915_request *rq,
int i915_request_await_active(struct i915_request *rq, struct i915_active *ref) int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
{ {
struct active_node *it, *n; struct active_node *it, *n;
int ret; int err = 0;
ret = i915_request_await_active_request(rq, &ref->last); /* await allocates and so we need to avoid hitting the shrinker */
if (ret) if (i915_active_acquire(ref))
return ret; goto out; /* was idle */
err = i915_request_await_active_request(rq, &ref->last);
if (err)
goto out;
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
ret = i915_request_await_active_request(rq, &it->base); err = i915_request_await_active_request(rq, &it->base);
if (ret) if (err)
return ret; goto out;
} }
return 0; out:
i915_active_release(ref);
return err;
} }
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
......
...@@ -1688,7 +1688,8 @@ __vma_matches(struct vm_area_struct *vma, struct file *filp, ...@@ -1688,7 +1688,8 @@ __vma_matches(struct vm_area_struct *vma, struct file *filp,
if (vma->vm_file != filp) if (vma->vm_file != filp)
return false; return false;
return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size; return vma->vm_start == addr &&
(vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
} }
/** /**
......
...@@ -223,8 +223,14 @@ i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio) ...@@ -223,8 +223,14 @@ i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
return &p->requests[idx]; return &p->requests[idx];
} }
struct sched_cache {
struct list_head *priolist;
};
static struct intel_engine_cs * static struct intel_engine_cs *
sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked) sched_lock_engine(const struct i915_sched_node *node,
struct intel_engine_cs *locked,
struct sched_cache *cache)
{ {
struct intel_engine_cs *engine = node_to_request(node)->engine; struct intel_engine_cs *engine = node_to_request(node)->engine;
...@@ -232,6 +238,7 @@ sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked) ...@@ -232,6 +238,7 @@ sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
if (engine != locked) { if (engine != locked) {
spin_unlock(&locked->timeline.lock); spin_unlock(&locked->timeline.lock);
memset(cache, 0, sizeof(*cache));
spin_lock(&engine->timeline.lock); spin_lock(&engine->timeline.lock);
} }
...@@ -253,11 +260,11 @@ static bool inflight(const struct i915_request *rq, ...@@ -253,11 +260,11 @@ static bool inflight(const struct i915_request *rq,
static void __i915_schedule(struct i915_request *rq, static void __i915_schedule(struct i915_request *rq,
const struct i915_sched_attr *attr) const struct i915_sched_attr *attr)
{ {
struct list_head *uninitialized_var(pl); struct intel_engine_cs *engine;
struct intel_engine_cs *engine, *last;
struct i915_dependency *dep, *p; struct i915_dependency *dep, *p;
struct i915_dependency stack; struct i915_dependency stack;
const int prio = attr->priority; const int prio = attr->priority;
struct sched_cache cache;
LIST_HEAD(dfs); LIST_HEAD(dfs);
/* Needed in order to use the temporary link inside i915_dependency */ /* Needed in order to use the temporary link inside i915_dependency */
...@@ -328,7 +335,7 @@ static void __i915_schedule(struct i915_request *rq, ...@@ -328,7 +335,7 @@ static void __i915_schedule(struct i915_request *rq,
__list_del_entry(&stack.dfs_link); __list_del_entry(&stack.dfs_link);
} }
last = NULL; memset(&cache, 0, sizeof(cache));
engine = rq->engine; engine = rq->engine;
spin_lock_irq(&engine->timeline.lock); spin_lock_irq(&engine->timeline.lock);
...@@ -338,7 +345,7 @@ static void __i915_schedule(struct i915_request *rq, ...@@ -338,7 +345,7 @@ static void __i915_schedule(struct i915_request *rq,
INIT_LIST_HEAD(&dep->dfs_link); INIT_LIST_HEAD(&dep->dfs_link);
engine = sched_lock_engine(node, engine); engine = sched_lock_engine(node, engine, &cache);
lockdep_assert_held(&engine->timeline.lock); lockdep_assert_held(&engine->timeline.lock);
/* Recheck after acquiring the engine->timeline.lock */ /* Recheck after acquiring the engine->timeline.lock */
...@@ -347,11 +354,11 @@ static void __i915_schedule(struct i915_request *rq, ...@@ -347,11 +354,11 @@ static void __i915_schedule(struct i915_request *rq,
node->attr.priority = prio; node->attr.priority = prio;
if (!list_empty(&node->link)) { if (!list_empty(&node->link)) {
if (last != engine) { if (!cache.priolist)
pl = i915_sched_lookup_priolist(engine, prio); cache.priolist =
last = engine; i915_sched_lookup_priolist(engine,
} prio);
list_move_tail(&node->link, pl); list_move_tail(&node->link, cache.priolist);
} else { } else {
/* /*
* If the request is not in the priolist queue because * If the request is not in the priolist queue because
......
...@@ -106,16 +106,6 @@ bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine) ...@@ -106,16 +106,6 @@ bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL, GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL,
&rq->fence.flags)); &rq->fence.flags));
clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
/*
* We may race with direct invocation of
* dma_fence_signal(), e.g. i915_request_retire(),
* in which case we can skip processing it ourselves.
*/
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&rq->fence.flags))
continue;
/* /*
* Queue for execution after dropping the signaling * Queue for execution after dropping the signaling
...@@ -123,6 +113,14 @@ bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine) ...@@ -123,6 +113,14 @@ bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
* more signalers to the same context or engine. * more signalers to the same context or engine.
*/ */
i915_request_get(rq); i915_request_get(rq);
/*
* We may race with direct invocation of
* dma_fence_signal(), e.g. i915_request_retire(),
* so we need to acquire our reference to the request
* before we cancel the breadcrumb.
*/
clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
list_add_tail(&rq->signal_link, &signal); list_add_tail(&rq->signal_link, &signal);
} }
......
...@@ -3568,6 +3568,13 @@ static void intel_ddi_update_pipe(struct intel_encoder *encoder, ...@@ -3568,6 +3568,13 @@ static void intel_ddi_update_pipe(struct intel_encoder *encoder,
{ {
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state); intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state);
if (conn_state->content_protection ==
DRM_MODE_CONTENT_PROTECTION_DESIRED)
intel_hdcp_enable(to_intel_connector(conn_state->connector));
else if (conn_state->content_protection ==
DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
intel_hdcp_disable(to_intel_connector(conn_state->connector));
} }
static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder, static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder,
...@@ -3962,12 +3969,7 @@ static int modeset_pipe(struct drm_crtc *crtc, ...@@ -3962,12 +3969,7 @@ static int modeset_pipe(struct drm_crtc *crtc,
goto out; goto out;
ret = drm_atomic_commit(state); ret = drm_atomic_commit(state);
if (ret) out:
goto out;
return 0;
out:
drm_atomic_state_put(state); drm_atomic_state_put(state);
return ret; return ret;
......
...@@ -710,47 +710,45 @@ __sseu_prepare(struct drm_i915_private *i915, ...@@ -710,47 +710,45 @@ __sseu_prepare(struct drm_i915_private *i915,
unsigned int flags, unsigned int flags,
struct i915_gem_context *ctx, struct i915_gem_context *ctx,
struct intel_engine_cs *engine, struct intel_engine_cs *engine,
struct igt_spinner **spin_out) struct igt_spinner **spin)
{ {
int ret = 0; struct i915_request *rq;
int ret;
if (flags & (TEST_BUSY | TEST_RESET)) {
struct igt_spinner *spin;
struct i915_request *rq;
spin = kzalloc(sizeof(*spin), GFP_KERNEL); *spin = NULL;
if (!spin) { if (!(flags & (TEST_BUSY | TEST_RESET)))
ret = -ENOMEM; return 0;
goto out;
}
ret = igt_spinner_init(spin, i915); *spin = kzalloc(sizeof(**spin), GFP_KERNEL);
if (ret) if (!*spin)
return ret; return -ENOMEM;
rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP); ret = igt_spinner_init(*spin, i915);
if (IS_ERR(rq)) { if (ret)
ret = PTR_ERR(rq); goto err_free;
igt_spinner_fini(spin);
kfree(spin);
goto out;
}
i915_request_add(rq); rq = igt_spinner_create_request(*spin, ctx, engine, MI_NOOP);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
goto err_fini;
}
if (!igt_wait_for_spinner(spin, rq)) { i915_request_add(rq);
pr_err("%s: Spinner failed to start!\n", name);
igt_spinner_end(spin);
igt_spinner_fini(spin);
kfree(spin);
ret = -ETIMEDOUT;
goto out;
}
*spin_out = spin; if (!igt_wait_for_spinner(*spin, rq)) {
pr_err("%s: Spinner failed to start!\n", name);
ret = -ETIMEDOUT;
goto err_end;
} }
out: return 0;
err_end:
igt_spinner_end(*spin);
err_fini:
igt_spinner_fini(*spin);
err_free:
kfree(fetch_and_zero(spin));
return ret; return ret;
} }
...@@ -897,22 +895,23 @@ __sseu_test(struct drm_i915_private *i915, ...@@ -897,22 +895,23 @@ __sseu_test(struct drm_i915_private *i915,
ret = __sseu_prepare(i915, name, flags, ctx, engine, &spin); ret = __sseu_prepare(i915, name, flags, ctx, engine, &spin);
if (ret) if (ret)
goto out; goto out_context;
ret = __i915_gem_context_reconfigure_sseu(ctx, engine, sseu); ret = __i915_gem_context_reconfigure_sseu(ctx, engine, sseu);
if (ret) if (ret)
goto out; goto out_spin;
ret = __sseu_finish(i915, name, flags, ctx, kctx, engine, obj, ret = __sseu_finish(i915, name, flags, ctx, kctx, engine, obj,
hweight32(sseu.slice_mask), spin); hweight32(sseu.slice_mask), spin);
out: out_spin:
if (spin) { if (spin) {
igt_spinner_end(spin); igt_spinner_end(spin);
igt_spinner_fini(spin); igt_spinner_fini(spin);
kfree(spin); kfree(spin);
} }
out_context:
kernel_context_close(kctx); kernel_context_close(kctx);
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment