Commit 8c992370 authored by Chris Wilson's avatar Chris Wilson Committed by Jani Nikula

drm/i915: Remove the vma from the drm_mm if binding fails

As we track whether a vma has been inserted into the drm_mm using the
vma->flags, if we fail to bind the vma into the GTT we do not update
those bits and will attempt to reinsert the vma into the drm_mm on
future passes. To prevent that, we want to unwind i915_vma_insert() if
we fail in our attempt to bind.

Fixes: 59bfa124 ("drm/i915: Start passing around i915_vma from execbuffer")
Testcase: igt/drv_selftest/live_gtt
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: <stable@vger.kernel.org> # v4.9+
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170227122654.27651-3-chris@chris-wilson.co.uk
(cherry picked from commit 31c7effa)
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
parent b717a039
...@@ -512,10 +512,36 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) ...@@ -512,10 +512,36 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
return ret; return ret;
} }
static void
i915_vma_remove(struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
drm_mm_remove_node(&vma->node);
list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
/* Since the unbound list is global, only move to that list if
* no more VMAs exist.
*/
if (--obj->bind_count == 0)
list_move_tail(&obj->global_link,
&to_i915(obj->base.dev)->mm.unbound_list);
/* And finally now the object is completely decoupled from this vma,
* we can drop its hold on the backing storage and allow it to be
* reaped by the shrinker.
*/
i915_gem_object_unpin_pages(obj);
GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
}
int __i915_vma_do_pin(struct i915_vma *vma, int __i915_vma_do_pin(struct i915_vma *vma,
u64 size, u64 alignment, u64 flags) u64 size, u64 alignment, u64 flags)
{ {
unsigned int bound = vma->flags; const unsigned int bound = vma->flags;
int ret; int ret;
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
...@@ -524,18 +550,18 @@ int __i915_vma_do_pin(struct i915_vma *vma, ...@@ -524,18 +550,18 @@ int __i915_vma_do_pin(struct i915_vma *vma,
if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) { if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
ret = -EBUSY; ret = -EBUSY;
goto err; goto err_unpin;
} }
if ((bound & I915_VMA_BIND_MASK) == 0) { if ((bound & I915_VMA_BIND_MASK) == 0) {
ret = i915_vma_insert(vma, size, alignment, flags); ret = i915_vma_insert(vma, size, alignment, flags);
if (ret) if (ret)
goto err; goto err_unpin;
} }
ret = i915_vma_bind(vma, vma->obj->cache_level, flags); ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
if (ret) if (ret)
goto err; goto err_remove;
if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND) if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
__i915_vma_set_map_and_fenceable(vma); __i915_vma_set_map_and_fenceable(vma);
...@@ -544,7 +570,12 @@ int __i915_vma_do_pin(struct i915_vma *vma, ...@@ -544,7 +570,12 @@ int __i915_vma_do_pin(struct i915_vma *vma,
GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
return 0; return 0;
err: err_remove:
if ((bound & I915_VMA_BIND_MASK) == 0) {
GEM_BUG_ON(vma->pages);
i915_vma_remove(vma);
}
err_unpin:
__i915_vma_unpin(vma); __i915_vma_unpin(vma);
return ret; return ret;
} }
...@@ -657,9 +688,6 @@ int i915_vma_unbind(struct i915_vma *vma) ...@@ -657,9 +688,6 @@ int i915_vma_unbind(struct i915_vma *vma)
} }
vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
drm_mm_remove_node(&vma->node);
list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
if (vma->pages != obj->mm.pages) { if (vma->pages != obj->mm.pages) {
GEM_BUG_ON(!vma->pages); GEM_BUG_ON(!vma->pages);
sg_free_table(vma->pages); sg_free_table(vma->pages);
...@@ -667,18 +695,7 @@ int i915_vma_unbind(struct i915_vma *vma) ...@@ -667,18 +695,7 @@ int i915_vma_unbind(struct i915_vma *vma)
} }
vma->pages = NULL; vma->pages = NULL;
/* Since the unbound list is global, only move to that list if i915_vma_remove(vma);
* no more VMAs exist. */
if (--obj->bind_count == 0)
list_move_tail(&obj->global_link,
&to_i915(obj->base.dev)->mm.unbound_list);
/* And finally now the object is completely decoupled from this vma,
* we can drop its hold on the backing storage and allow it to be
* reaped by the shrinker.
*/
i915_gem_object_unpin_pages(obj);
GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
destroy: destroy:
if (unlikely(i915_vma_is_closed(vma))) if (unlikely(i915_vma_is_closed(vma)))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment