Commit 614654ab authored by Chris Wilson's avatar Chris Wilson

drm/i915: Check current i915_vma.pin_count status first on unbind

Do an early rejection of a i915_vma_unbind() attempt if the i915_vma is
currently pinned, without waiting to see if the inflight operations may
unpin it. We see this problem with the shrinker trying to unbind the
active vma from inside its bind worker:

<6> [472.618968] Workqueue: events_unbound fence_work [i915]
<4> [472.618970] Call Trace:
<4> [472.618974]  ? __schedule+0x2e5/0x810
<4> [472.618978]  schedule+0x37/0xe0
<4> [472.618982]  schedule_preempt_disabled+0xf/0x20
<4> [472.618984]  __mutex_lock+0x281/0x9c0
<4> [472.618987]  ? mark_held_locks+0x49/0x70
<4> [472.618989]  ? _raw_spin_unlock_irqrestore+0x47/0x60
<4> [472.619038]  ? i915_vma_unbind+0xae/0x110 [i915]
<4> [472.619084]  ? i915_vma_unbind+0xae/0x110 [i915]
<4> [472.619122]  i915_vma_unbind+0xae/0x110 [i915]
<4> [472.619165]  i915_gem_object_unbind+0x1dc/0x400 [i915]
<4> [472.619208]  i915_gem_shrink+0x328/0x660 [i915]
<4> [472.619250]  ? i915_gem_shrink_all+0x38/0x60 [i915]
<4> [472.619282]  i915_gem_shrink_all+0x38/0x60 [i915]
<4> [472.619325]  vm_alloc_page.constprop.25+0x1aa/0x240 [i915]
<4> [472.619330]  ? rcu_read_lock_sched_held+0x4d/0x80
<4> [472.619363]  ? __alloc_pd+0xb/0x30 [i915]
<4> [472.619366]  ? module_assert_mutex_or_preempt+0xf/0x30
<4> [472.619368]  ? __module_address+0x23/0xe0
<4> [472.619371]  ? is_module_address+0x26/0x40
<4> [472.619374]  ? static_obj+0x34/0x50
<4> [472.619376]  ? lockdep_init_map+0x4d/0x1e0
<4> [472.619407]  setup_page_dma+0xd/0x90 [i915]
<4> [472.619437]  alloc_pd+0x29/0x50 [i915]
<4> [472.619470]  __gen8_ppgtt_alloc+0x443/0x6b0 [i915]
<4> [472.619503]  gen8_ppgtt_alloc+0xd7/0x300 [i915]
<4> [472.619535]  ppgtt_bind_vma+0x2a/0xe0 [i915]
<4> [472.619577]  __vma_bind+0x26/0x40 [i915]
<4> [472.619611]  fence_work+0x1c/0x90 [i915]
<4> [472.619617]  process_one_work+0x26a/0x620

Fixes: 2850748e ("drm/i915: Pull i915_vma_pin under the vm->mutex")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200403120150.17091-1-chris@chris-wilson.co.uk
parent 6352219c
...@@ -1225,18 +1225,6 @@ int __i915_vma_unbind(struct i915_vma *vma) ...@@ -1225,18 +1225,6 @@ int __i915_vma_unbind(struct i915_vma *vma)
lockdep_assert_held(&vma->vm->mutex); lockdep_assert_held(&vma->vm->mutex);
/*
* First wait upon any activity as retiring the request may
* have side-effects such as unpinning or even unbinding this vma.
*
* XXX Actually waiting under the vm->mutex is a hinderance and
* should be pipelined wherever possible. In cases where that is
* unavoidable, we should lift the wait to before the mutex.
*/
ret = i915_vma_sync(vma);
if (ret)
return ret;
if (i915_vma_is_pinned(vma)) { if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned"); vma_print_allocator(vma, "is pinned");
return -EAGAIN; return -EAGAIN;
...@@ -1308,15 +1296,20 @@ int i915_vma_unbind(struct i915_vma *vma) ...@@ -1308,15 +1296,20 @@ int i915_vma_unbind(struct i915_vma *vma)
if (!drm_mm_node_allocated(&vma->node)) if (!drm_mm_node_allocated(&vma->node))
return 0; return 0;
if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
/* XXX not always required: nop_clear_range */
wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
/* Optimistic wait before taking the mutex */ /* Optimistic wait before taking the mutex */
err = i915_vma_sync(vma); err = i915_vma_sync(vma);
if (err) if (err)
goto out_rpm; goto out_rpm;
if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned");
return -EAGAIN;
}
if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
/* XXX not always required: nop_clear_range */
wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref); err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
if (err) if (err)
goto out_rpm; goto out_rpm;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment