Commit 2e49520e authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-fixes-2020-10-02' of...

Merge tag 'drm-intel-next-fixes-2020-10-02' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

Propagated from drm-intel-next-queued:
- Fix CRTC state checker (Ville)

Propated from drm-intel-gt-next:
- Avoid implicit vmpa for highmem on 32b (Chris)
- Prevent PAT attriutes for writecombine if CPU doesn't support PAT (Chris)
- Clear the buffer pool age before use. (Chris)
- Fix error code (Dan)
- Break up error capture compression loops (Chris)
- Fix uninitialized variable in context_create_request (Maarten)
- Check for errors on i915_vm_alloc_pt_stash to avoid NULL dereference (Matt)
- Serialize debugfs i915_gem_objects with ctx->mutex (Chris)
- Fix a rebase mistake caused during drm-intel-gt-next creation (Chris)
- Hold request reference for canceling an active context (Chris)
- Heartbeats fixes (Chris)
- Use usigned during batch copies (Chris)
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20201002182610.GA2204465@intel.com
parents 083320eb c60b93cd
...@@ -14304,7 +14304,6 @@ verify_crtc_state(struct intel_crtc *crtc, ...@@ -14304,7 +14304,6 @@ verify_crtc_state(struct intel_crtc *crtc,
struct intel_encoder *encoder; struct intel_encoder *encoder;
struct intel_crtc_state *pipe_config = old_crtc_state; struct intel_crtc_state *pipe_config = old_crtc_state;
struct drm_atomic_state *state = old_crtc_state->uapi.state; struct drm_atomic_state *state = old_crtc_state->uapi.state;
bool active;
__drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi); __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
intel_crtc_free_hw_state(old_crtc_state); intel_crtc_free_hw_state(old_crtc_state);
...@@ -14314,16 +14313,19 @@ verify_crtc_state(struct intel_crtc *crtc, ...@@ -14314,16 +14313,19 @@ verify_crtc_state(struct intel_crtc *crtc,
drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id, drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
crtc->base.name); crtc->base.name);
active = dev_priv->display.get_pipe_config(crtc, pipe_config); pipe_config->hw.enable = new_crtc_state->hw.enable;
pipe_config->hw.active =
dev_priv->display.get_pipe_config(crtc, pipe_config);
/* we keep both pipes enabled on 830 */ /* we keep both pipes enabled on 830 */
if (IS_I830(dev_priv)) if (IS_I830(dev_priv) && pipe_config->hw.active)
active = new_crtc_state->hw.active; pipe_config->hw.active = new_crtc_state->hw.active;
I915_STATE_WARN(new_crtc_state->hw.active != active, I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
"crtc active state doesn't match with hw state " "crtc active state doesn't match with hw state "
"(expected %i, found %i)\n", "(expected %i, found %i)\n",
new_crtc_state->hw.active, active); new_crtc_state->hw.active, pipe_config->hw.active);
I915_STATE_WARN(crtc->active != new_crtc_state->hw.active, I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
"transitional active state does not match atomic hw state " "transitional active state does not match atomic hw state "
...@@ -14332,6 +14334,7 @@ verify_crtc_state(struct intel_crtc *crtc, ...@@ -14332,6 +14334,7 @@ verify_crtc_state(struct intel_crtc *crtc,
for_each_encoder_on_crtc(dev, &crtc->base, encoder) { for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
enum pipe pipe; enum pipe pipe;
bool active;
active = encoder->get_hw_state(encoder, &pipe); active = encoder->get_hw_state(encoder, &pipe);
I915_STATE_WARN(active != new_crtc_state->hw.active, I915_STATE_WARN(active != new_crtc_state->hw.active,
......
...@@ -390,24 +390,6 @@ __context_engines_static(const struct i915_gem_context *ctx) ...@@ -390,24 +390,6 @@ __context_engines_static(const struct i915_gem_context *ctx)
return rcu_dereference_protected(ctx->engines, true); return rcu_dereference_protected(ctx->engines, true);
} }
static bool __reset_engine(struct intel_engine_cs *engine)
{
struct intel_gt *gt = engine->gt;
bool success = false;
if (!intel_has_reset_engine(gt))
return false;
if (!test_and_set_bit(I915_RESET_ENGINE + engine->id,
&gt->reset.flags)) {
success = intel_engine_reset(engine, NULL) == 0;
clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
&gt->reset.flags);
}
return success;
}
static void __reset_context(struct i915_gem_context *ctx, static void __reset_context(struct i915_gem_context *ctx,
struct intel_engine_cs *engine) struct intel_engine_cs *engine)
{ {
...@@ -431,12 +413,7 @@ static bool __cancel_engine(struct intel_engine_cs *engine) ...@@ -431,12 +413,7 @@ static bool __cancel_engine(struct intel_engine_cs *engine)
* kill the banned context, we fallback to doing a local reset * kill the banned context, we fallback to doing a local reset
* instead. * instead.
*/ */
if (IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT) && return intel_engine_pulse(engine) == 0;
!intel_engine_pulse(engine))
return true;
/* If we are unable to send a pulse, try resetting this engine. */
return __reset_engine(engine);
} }
static bool static bool
...@@ -460,8 +437,8 @@ __active_engine(struct i915_request *rq, struct intel_engine_cs **active) ...@@ -460,8 +437,8 @@ __active_engine(struct i915_request *rq, struct intel_engine_cs **active)
spin_lock(&locked->active.lock); spin_lock(&locked->active.lock);
} }
if (!i915_request_completed(rq)) { if (i915_request_is_active(rq)) {
if (i915_request_is_active(rq) && rq->fence.error != -EIO) if (!i915_request_completed(rq))
*active = locked; *active = locked;
ret = true; ret = true;
} }
...@@ -479,13 +456,26 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce) ...@@ -479,13 +456,26 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
if (!ce->timeline) if (!ce->timeline)
return NULL; return NULL;
/*
* rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference
* to the request to prevent it being transferred to a new timeline
* (and onto a new timeline->requests list).
*/
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(rq, &ce->timeline->requests, link) { list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
if (i915_request_is_active(rq) && i915_request_completed(rq)) bool found;
continue;
/* timeline is already completed upto this point? */
if (!i915_request_get_rcu(rq))
break;
/* Check with the backend if the request is inflight */ /* Check with the backend if the request is inflight */
if (__active_engine(rq, &engine)) found = true;
if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
found = __active_engine(rq, &engine);
i915_request_put(rq);
if (found)
break; break;
} }
rcu_read_unlock(); rcu_read_unlock();
...@@ -493,7 +483,7 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce) ...@@ -493,7 +483,7 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
return engine; return engine;
} }
static void kill_engines(struct i915_gem_engines *engines) static void kill_engines(struct i915_gem_engines *engines, bool ban)
{ {
struct i915_gem_engines_iter it; struct i915_gem_engines_iter it;
struct intel_context *ce; struct intel_context *ce;
...@@ -508,7 +498,7 @@ static void kill_engines(struct i915_gem_engines *engines) ...@@ -508,7 +498,7 @@ static void kill_engines(struct i915_gem_engines *engines)
for_each_gem_engine(ce, engines, it) { for_each_gem_engine(ce, engines, it) {
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
if (intel_context_set_banned(ce)) if (ban && intel_context_set_banned(ce))
continue; continue;
/* /*
...@@ -521,7 +511,7 @@ static void kill_engines(struct i915_gem_engines *engines) ...@@ -521,7 +511,7 @@ static void kill_engines(struct i915_gem_engines *engines)
engine = active_engine(ce); engine = active_engine(ce);
/* First attempt to gracefully cancel the context */ /* First attempt to gracefully cancel the context */
if (engine && !__cancel_engine(engine)) if (engine && !__cancel_engine(engine) && ban)
/* /*
* If we are unable to send a preemptive pulse to bump * If we are unable to send a preemptive pulse to bump
* the context from the GPU, we have to resort to a full * the context from the GPU, we have to resort to a full
...@@ -531,8 +521,10 @@ static void kill_engines(struct i915_gem_engines *engines) ...@@ -531,8 +521,10 @@ static void kill_engines(struct i915_gem_engines *engines)
} }
} }
static void kill_stale_engines(struct i915_gem_context *ctx) static void kill_context(struct i915_gem_context *ctx)
{ {
bool ban = (!i915_gem_context_is_persistent(ctx) ||
!ctx->i915->params.enable_hangcheck);
struct i915_gem_engines *pos, *next; struct i915_gem_engines *pos, *next;
spin_lock_irq(&ctx->stale.lock); spin_lock_irq(&ctx->stale.lock);
...@@ -545,7 +537,7 @@ static void kill_stale_engines(struct i915_gem_context *ctx) ...@@ -545,7 +537,7 @@ static void kill_stale_engines(struct i915_gem_context *ctx)
spin_unlock_irq(&ctx->stale.lock); spin_unlock_irq(&ctx->stale.lock);
kill_engines(pos); kill_engines(pos, ban);
spin_lock_irq(&ctx->stale.lock); spin_lock_irq(&ctx->stale.lock);
GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence)); GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
...@@ -557,11 +549,6 @@ static void kill_stale_engines(struct i915_gem_context *ctx) ...@@ -557,11 +549,6 @@ static void kill_stale_engines(struct i915_gem_context *ctx)
spin_unlock_irq(&ctx->stale.lock); spin_unlock_irq(&ctx->stale.lock);
} }
static void kill_context(struct i915_gem_context *ctx)
{
kill_stale_engines(ctx);
}
static void engines_idle_release(struct i915_gem_context *ctx, static void engines_idle_release(struct i915_gem_context *ctx,
struct i915_gem_engines *engines) struct i915_gem_engines *engines)
{ {
...@@ -596,7 +583,7 @@ static void engines_idle_release(struct i915_gem_context *ctx, ...@@ -596,7 +583,7 @@ static void engines_idle_release(struct i915_gem_context *ctx,
kill: kill:
if (list_empty(&engines->link)) /* raced, already closed */ if (list_empty(&engines->link)) /* raced, already closed */
kill_engines(engines); kill_engines(engines, true);
i915_sw_fence_commit(&engines->fence); i915_sw_fence_commit(&engines->fence);
} }
...@@ -654,9 +641,7 @@ static void context_close(struct i915_gem_context *ctx) ...@@ -654,9 +641,7 @@ static void context_close(struct i915_gem_context *ctx)
* case we opt to forcibly kill off all remaining requests on * case we opt to forcibly kill off all remaining requests on
* context close. * context close.
*/ */
if (!i915_gem_context_is_persistent(ctx) || kill_context(ctx);
!ctx->i915->params.enable_hangcheck)
kill_context(ctx);
i915_gem_context_put(ctx); i915_gem_context_put(ctx);
} }
......
...@@ -2267,8 +2267,8 @@ struct eb_parse_work { ...@@ -2267,8 +2267,8 @@ struct eb_parse_work {
struct i915_vma *batch; struct i915_vma *batch;
struct i915_vma *shadow; struct i915_vma *shadow;
struct i915_vma *trampoline; struct i915_vma *trampoline;
unsigned int batch_offset; unsigned long batch_offset;
unsigned int batch_length; unsigned long batch_length;
}; };
static int __eb_parse(struct dma_fence_work *work) static int __eb_parse(struct dma_fence_work *work)
...@@ -2338,6 +2338,9 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb, ...@@ -2338,6 +2338,9 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
struct eb_parse_work *pw; struct eb_parse_work *pw;
int err; int err;
GEM_BUG_ON(overflows_type(eb->batch_start_offset, pw->batch_offset));
GEM_BUG_ON(overflows_type(eb->batch_len, pw->batch_length));
pw = kzalloc(sizeof(*pw), GFP_KERNEL); pw = kzalloc(sizeof(*pw), GFP_KERNEL);
if (!pw) if (!pw)
return -ENOMEM; return -ENOMEM;
......
...@@ -364,7 +364,7 @@ int i915_gem_object_copy_blt(struct drm_i915_gem_object *src, ...@@ -364,7 +364,7 @@ int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
vma[1] = i915_vma_instance(dst, vm, NULL); vma[1] = i915_vma_instance(dst, vm, NULL);
if (IS_ERR(vma[1])) if (IS_ERR(vma[1]))
return PTR_ERR(vma); return PTR_ERR(vma[1]);
i915_gem_ww_ctx_init(&ww, true); i915_gem_ww_ctx_init(&ww, true);
intel_engine_pm_get(ce->engine); intel_engine_pm_get(ce->engine);
......
...@@ -254,9 +254,35 @@ static void *i915_gem_object_map(struct drm_i915_gem_object *obj, ...@@ -254,9 +254,35 @@ static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
if (!i915_gem_object_has_struct_page(obj) && type != I915_MAP_WC) if (!i915_gem_object_has_struct_page(obj) && type != I915_MAP_WC)
return NULL; return NULL;
if (GEM_WARN_ON(type == I915_MAP_WC &&
!static_cpu_has(X86_FEATURE_PAT)))
return NULL;
/* A single page can always be kmapped */ /* A single page can always be kmapped */
if (n_pte == 1 && type == I915_MAP_WB) if (n_pte == 1 && type == I915_MAP_WB) {
return kmap(sg_page(sgt->sgl)); struct page *page = sg_page(sgt->sgl);
/*
* On 32b, highmem using a finite set of indirect PTE (i.e.
* vmap) to provide virtual mappings of the high pages.
* As these are finite, map_new_virtual() must wait for some
* other kmap() to finish when it runs out. If we map a large
* number of objects, there is no method for it to tell us
* to release the mappings, and we deadlock.
*
* However, if we make an explicit vmap of the page, that
* uses a larger vmalloc arena, and also has the ability
* to tell us to release unwanted mappings. Most importantly,
* it will fail and propagate an error instead of waiting
* forever.
*
* So if the page is beyond the 32b boundary, make an explicit
* vmap. On 64b, this check will be optimised away as we can
* directly kmap any page on the system.
*/
if (!PageHighMem(page))
return kmap(page);
}
mem = stack; mem = stack;
if (n_pte > ARRAY_SIZE(stack)) { if (n_pte > ARRAY_SIZE(stack)) {
......
...@@ -472,6 +472,7 @@ struct i915_request *intel_context_create_request(struct intel_context *ce) ...@@ -472,6 +472,7 @@ struct i915_request *intel_context_create_request(struct intel_context *ce)
err = i915_gem_ww_ctx_backoff(&ww); err = i915_gem_ww_ctx_backoff(&ww);
if (!err) if (!err)
goto retry; goto retry;
rq = ERR_PTR(err);
} else { } else {
rq = ERR_PTR(err); rq = ERR_PTR(err);
} }
......
...@@ -337,4 +337,13 @@ intel_engine_has_preempt_reset(const struct intel_engine_cs *engine) ...@@ -337,4 +337,13 @@ intel_engine_has_preempt_reset(const struct intel_engine_cs *engine)
return intel_engine_has_preemption(engine); return intel_engine_has_preemption(engine);
} }
static inline bool
intel_engine_has_heartbeat(const struct intel_engine_cs *engine)
{
if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL))
return false;
return READ_ONCE(engine->props.heartbeat_interval_ms);
}
#endif /* _INTEL_RINGBUFFER_H_ */ #endif /* _INTEL_RINGBUFFER_H_ */
...@@ -177,36 +177,82 @@ void intel_engine_init_heartbeat(struct intel_engine_cs *engine) ...@@ -177,36 +177,82 @@ void intel_engine_init_heartbeat(struct intel_engine_cs *engine)
INIT_DELAYED_WORK(&engine->heartbeat.work, heartbeat); INIT_DELAYED_WORK(&engine->heartbeat.work, heartbeat);
} }
static int __intel_engine_pulse(struct intel_engine_cs *engine)
{
struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER };
struct intel_context *ce = engine->kernel_context;
struct i915_request *rq;
lockdep_assert_held(&ce->timeline->mutex);
GEM_BUG_ON(!intel_engine_has_preemption(engine));
GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
intel_context_enter(ce);
rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
intel_context_exit(ce);
if (IS_ERR(rq))
return PTR_ERR(rq);
__set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
idle_pulse(engine, rq);
__i915_request_commit(rq);
__i915_request_queue(rq, &attr);
GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
return 0;
}
static unsigned long set_heartbeat(struct intel_engine_cs *engine,
unsigned long delay)
{
unsigned long old;
old = xchg(&engine->props.heartbeat_interval_ms, delay);
if (delay)
intel_engine_unpark_heartbeat(engine);
else
intel_engine_park_heartbeat(engine);
return old;
}
int intel_engine_set_heartbeat(struct intel_engine_cs *engine, int intel_engine_set_heartbeat(struct intel_engine_cs *engine,
unsigned long delay) unsigned long delay)
{ {
int err; struct intel_context *ce = engine->kernel_context;
int err = 0;
/* Send one last pulse before to cleanup persistent hogs */ if (!delay && !intel_engine_has_preempt_reset(engine))
if (!delay && IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) { return -ENODEV;
err = intel_engine_pulse(engine);
if (err) intel_engine_pm_get(engine);
return err;
} err = mutex_lock_interruptible(&ce->timeline->mutex);
if (err)
goto out_rpm;
WRITE_ONCE(engine->props.heartbeat_interval_ms, delay); if (delay != engine->props.heartbeat_interval_ms) {
unsigned long saved = set_heartbeat(engine, delay);
if (intel_engine_pm_get_if_awake(engine)) { /* recheck current execution */
if (delay) if (intel_engine_has_preemption(engine)) {
intel_engine_unpark_heartbeat(engine); err = __intel_engine_pulse(engine);
else if (err)
intel_engine_park_heartbeat(engine); set_heartbeat(engine, saved);
intel_engine_pm_put(engine); }
} }
return 0; mutex_unlock(&ce->timeline->mutex);
out_rpm:
intel_engine_pm_put(engine);
return err;
} }
int intel_engine_pulse(struct intel_engine_cs *engine) int intel_engine_pulse(struct intel_engine_cs *engine)
{ {
struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER };
struct intel_context *ce = engine->kernel_context; struct intel_context *ce = engine->kernel_context;
struct i915_request *rq;
int err; int err;
if (!intel_engine_has_preemption(engine)) if (!intel_engine_has_preemption(engine))
...@@ -215,30 +261,12 @@ int intel_engine_pulse(struct intel_engine_cs *engine) ...@@ -215,30 +261,12 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
if (!intel_engine_pm_get_if_awake(engine)) if (!intel_engine_pm_get_if_awake(engine))
return 0; return 0;
if (mutex_lock_interruptible(&ce->timeline->mutex)) { err = -EINTR;
err = -EINTR; if (!mutex_lock_interruptible(&ce->timeline->mutex)) {
goto out_rpm; err = __intel_engine_pulse(engine);
} mutex_unlock(&ce->timeline->mutex);
intel_context_enter(ce);
rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
intel_context_exit(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_unlock;
} }
__set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
idle_pulse(engine, rq);
__i915_request_commit(rq);
__i915_request_queue(rq, &attr);
GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
err = 0;
out_unlock:
mutex_unlock(&ce->timeline->mutex);
out_rpm:
intel_engine_pm_put(engine); intel_engine_pm_put(engine);
return err; return err;
} }
......
...@@ -134,6 +134,7 @@ static void pool_retire(struct i915_active *ref) ...@@ -134,6 +134,7 @@ static void pool_retire(struct i915_active *ref)
/* Return this object to the shrinker pool */ /* Return this object to the shrinker pool */
i915_gem_object_make_purgeable(node->obj); i915_gem_object_make_purgeable(node->obj);
GEM_BUG_ON(node->age);
spin_lock_irqsave(&pool->lock, flags); spin_lock_irqsave(&pool->lock, flags);
list_add_rcu(&node->link, list); list_add_rcu(&node->link, list);
WRITE_ONCE(node->age, jiffies ?: 1); /* 0 reserved for active nodes */ WRITE_ONCE(node->age, jiffies ?: 1); /* 0 reserved for active nodes */
...@@ -155,6 +156,7 @@ node_create(struct intel_gt_buffer_pool *pool, size_t sz) ...@@ -155,6 +156,7 @@ node_create(struct intel_gt_buffer_pool *pool, size_t sz)
if (!node) if (!node)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
node->age = 0;
node->pool = pool; node->pool = pool;
i915_active_init(&node->active, pool_active, pool_retire); i915_active_init(&node->active, pool_active, pool_retire);
......
...@@ -1136,7 +1136,7 @@ find_reg(const struct intel_engine_cs *engine, u32 addr) ...@@ -1136,7 +1136,7 @@ find_reg(const struct intel_engine_cs *engine, u32 addr)
/* Returns a vmap'd pointer to dst_obj, which the caller must unmap */ /* Returns a vmap'd pointer to dst_obj, which the caller must unmap */
static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
struct drm_i915_gem_object *src_obj, struct drm_i915_gem_object *src_obj,
u32 offset, u32 length) unsigned long offset, unsigned long length)
{ {
bool needs_clflush; bool needs_clflush;
void *dst, *src; void *dst, *src;
...@@ -1166,8 +1166,8 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, ...@@ -1166,8 +1166,8 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
} }
} }
if (IS_ERR(src)) { if (IS_ERR(src)) {
unsigned long x, n;
void *ptr; void *ptr;
int x, n;
/* /*
* We can avoid clflushing partial cachelines before the write * We can avoid clflushing partial cachelines before the write
...@@ -1184,7 +1184,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, ...@@ -1184,7 +1184,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
ptr = dst; ptr = dst;
x = offset_in_page(offset); x = offset_in_page(offset);
for (n = offset >> PAGE_SHIFT; length; n++) { for (n = offset >> PAGE_SHIFT; length; n++) {
int len = min_t(int, length, PAGE_SIZE - x); int len = min(length, PAGE_SIZE - x);
src = kmap_atomic(i915_gem_object_get_page(src_obj, n)); src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
if (needs_clflush) if (needs_clflush)
...@@ -1414,8 +1414,8 @@ static bool shadow_needs_clflush(struct drm_i915_gem_object *obj) ...@@ -1414,8 +1414,8 @@ static bool shadow_needs_clflush(struct drm_i915_gem_object *obj)
*/ */
int intel_engine_cmd_parser(struct intel_engine_cs *engine, int intel_engine_cmd_parser(struct intel_engine_cs *engine,
struct i915_vma *batch, struct i915_vma *batch,
u32 batch_offset, unsigned long batch_offset,
u32 batch_length, unsigned long batch_length,
struct i915_vma *shadow, struct i915_vma *shadow,
bool trampoline) bool trampoline)
{ {
......
...@@ -326,6 +326,7 @@ static void print_context_stats(struct seq_file *m, ...@@ -326,6 +326,7 @@ static void print_context_stats(struct seq_file *m,
} }
i915_gem_context_unlock_engines(ctx); i915_gem_context_unlock_engines(ctx);
mutex_lock(&ctx->mutex);
if (!IS_ERR_OR_NULL(ctx->file_priv)) { if (!IS_ERR_OR_NULL(ctx->file_priv)) {
struct file_stats stats = { struct file_stats stats = {
.vm = rcu_access_pointer(ctx->vm), .vm = rcu_access_pointer(ctx->vm),
...@@ -346,6 +347,7 @@ static void print_context_stats(struct seq_file *m, ...@@ -346,6 +347,7 @@ static void print_context_stats(struct seq_file *m,
print_file_stats(m, name, stats); print_file_stats(m, name, stats);
} }
mutex_unlock(&ctx->mutex);
spin_lock(&i915->gem.contexts.lock); spin_lock(&i915->gem.contexts.lock);
list_safe_reset_next(ctx, cn, link); list_safe_reset_next(ctx, cn, link);
......
...@@ -1949,8 +1949,8 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine); ...@@ -1949,8 +1949,8 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
int intel_engine_cmd_parser(struct intel_engine_cs *engine, int intel_engine_cmd_parser(struct intel_engine_cs *engine,
struct i915_vma *batch, struct i915_vma *batch,
u32 batch_offset, unsigned long batch_offset,
u32 batch_length, unsigned long batch_length,
struct i915_vma *shadow, struct i915_vma *shadow,
bool trampoline); bool trampoline);
#define I915_CMD_PARSER_TRAMPOLINE_SIZE 8 #define I915_CMD_PARSER_TRAMPOLINE_SIZE 8
......
...@@ -311,6 +311,8 @@ static int compress_page(struct i915_vma_compress *c, ...@@ -311,6 +311,8 @@ static int compress_page(struct i915_vma_compress *c,
if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK) if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
return -EIO; return -EIO;
cond_resched();
} while (zstream->avail_in); } while (zstream->avail_in);
/* Fallback to uncompressed if we increase size? */ /* Fallback to uncompressed if we increase size? */
...@@ -397,6 +399,7 @@ static int compress_page(struct i915_vma_compress *c, ...@@ -397,6 +399,7 @@ static int compress_page(struct i915_vma_compress *c,
if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE))) if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
memcpy(ptr, src, PAGE_SIZE); memcpy(ptr, src, PAGE_SIZE);
dst->pages[dst->page_count++] = ptr; dst->pages[dst->page_count++] = ptr;
cond_resched();
return 0; return 0;
} }
......
...@@ -542,8 +542,13 @@ bool __i915_request_submit(struct i915_request *request) ...@@ -542,8 +542,13 @@ bool __i915_request_submit(struct i915_request *request)
if (i915_request_completed(request)) if (i915_request_completed(request))
goto xfer; goto xfer;
if (unlikely(intel_context_is_closed(request->context) &&
!intel_engine_has_heartbeat(engine)))
intel_context_set_banned(request->context);
if (unlikely(intel_context_is_banned(request->context))) if (unlikely(intel_context_is_banned(request->context)))
i915_request_set_error_once(request, -EIO); i915_request_set_error_once(request, -EIO);
if (unlikely(fatal_error(request->fence.error))) if (unlikely(fatal_error(request->fence.error)))
__i915_request_skip(request); __i915_request_skip(request);
...@@ -593,16 +598,8 @@ bool __i915_request_submit(struct i915_request *request) ...@@ -593,16 +598,8 @@ bool __i915_request_submit(struct i915_request *request)
__notify_execute_cb_irq(request); __notify_execute_cb_irq(request);
/* We may be recursing from the signal callback of another i915 fence */ /* We may be recursing from the signal callback of another i915 fence */
if (!i915_request_signaled(request)) { if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); i915_request_enable_breadcrumb(request);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&request->fence.flags) &&
!i915_request_enable_breadcrumb(request))
intel_engine_signal_breadcrumbs(engine);
spin_unlock(&request->lock);
}
return result; return result;
} }
......
...@@ -892,9 +892,11 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, ...@@ -892,9 +892,11 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
/* Allocate enough page directories to used PTE */ /* Allocate enough page directories to used PTE */
if (vma->vm->allocate_va_range) { if (vma->vm->allocate_va_range) {
i915_vm_alloc_pt_stash(vma->vm, err = i915_vm_alloc_pt_stash(vma->vm,
&work->stash, &work->stash,
vma->size); vma->size);
if (err)
goto err_fence;
err = i915_vm_pin_pt_stash(vma->vm, err = i915_vm_pin_pt_stash(vma->vm,
&work->stash); &work->stash);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment