Commit aa653a68 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Be more careful when unbinding vma

When we call i915_vma_unbind(), we will wait upon outstanding rendering.
This will also trigger a retirement phase, which may update the object
lists. If, we extend request tracking to the VMA itself (rather than
keep it at the encompassing object), then there is a potential that the
obj->vma_list be modified for other elements upon i915_vma_unbind(). As
a result, if we walk over the object list and call i915_vma_unbind(), we
need to be prepared for that list to change.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1470293567-10811-8-git-send-email-chris@chris-wilson.co.uk
parent 15717de2
...@@ -3052,6 +3052,8 @@ int __must_check i915_vma_unbind(struct i915_vma *vma); ...@@ -3052,6 +3052,8 @@ int __must_check i915_vma_unbind(struct i915_vma *vma);
* _guarantee_ VMA in question is _not in use_ anywhere. * _guarantee_ VMA in question is _not in use_ anywhere.
*/ */
int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma); int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma);
int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj); void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
......
...@@ -283,17 +283,37 @@ static const struct drm_i915_gem_object_ops i915_gem_phys_ops = { ...@@ -283,17 +283,37 @@ static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
.release = i915_gem_object_release_phys, .release = i915_gem_object_release_phys,
}; };
int
i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
LIST_HEAD(still_in_list);
int ret;
/* The vma will only be freed if it is marked as closed, and if we wait
* upon rendering to the vma, we may unbind anything in the list.
*/
while ((vma = list_first_entry_or_null(&obj->vma_list,
struct i915_vma,
obj_link))) {
list_move_tail(&vma->obj_link, &still_in_list);
ret = i915_vma_unbind(vma);
if (ret)
break;
}
list_splice(&still_in_list, &obj->vma_list);
return ret;
}
static int static int
drop_pages(struct drm_i915_gem_object *obj) drop_pages(struct drm_i915_gem_object *obj)
{ {
struct i915_vma *vma, *next;
int ret; int ret;
i915_gem_object_get(obj); i915_gem_object_get(obj);
list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) ret = i915_gem_object_unbind(obj);
if (i915_vma_unbind(vma)) if (ret == 0)
break;
ret = i915_gem_object_put_pages(obj); ret = i915_gem_object_put_pages(obj);
i915_gem_object_put(obj); i915_gem_object_put(obj);
...@@ -3450,8 +3470,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) ...@@ -3450,8 +3470,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level) enum i915_cache_level cache_level)
{ {
struct drm_device *dev = obj->base.dev; struct i915_vma *vma;
struct i915_vma *vma, *next;
int ret = 0; int ret = 0;
if (obj->cache_level == cache_level) if (obj->cache_level == cache_level)
...@@ -3462,7 +3481,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, ...@@ -3462,7 +3481,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
* catch the issue of the CS prefetch crossing page boundaries and * catch the issue of the CS prefetch crossing page boundaries and
* reading an invalid PTE on older architectures. * reading an invalid PTE on older architectures.
*/ */
list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) { restart:
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!drm_mm_node_allocated(&vma->node)) if (!drm_mm_node_allocated(&vma->node))
continue; continue;
...@@ -3471,11 +3491,18 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, ...@@ -3471,11 +3491,18 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return -EBUSY; return -EBUSY;
} }
if (!i915_gem_valid_gtt_space(vma, cache_level)) { if (i915_gem_valid_gtt_space(vma, cache_level))
continue;
ret = i915_vma_unbind(vma); ret = i915_vma_unbind(vma);
if (ret) if (ret)
return ret; return ret;
}
/* As unbinding may affect other elements in the
* obj->vma_list (due to side-effects from retiring
* an active vma), play safe and restart the iterator.
*/
goto restart;
} }
/* We can reuse the existing drm_mm nodes but need to change the /* We can reuse the existing drm_mm nodes but need to change the
...@@ -3494,7 +3521,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, ...@@ -3494,7 +3521,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
if (ret) if (ret)
return ret; return ret;
if (!HAS_LLC(dev) && cache_level != I915_CACHE_NONE) { if (!HAS_LLC(obj->base.dev) && cache_level != I915_CACHE_NONE) {
/* Access to snoopable pages through the GTT is /* Access to snoopable pages through the GTT is
* incoherent and on some machines causes a hard * incoherent and on some machines causes a hard
* lockup. Relinquish the CPU mmaping to force * lockup. Relinquish the CPU mmaping to force
......
...@@ -172,8 +172,6 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -172,8 +172,6 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
(obj = list_first_entry_or_null(phase->list, (obj = list_first_entry_or_null(phase->list,
typeof(*obj), typeof(*obj),
global_list))) { global_list))) {
struct i915_vma *vma, *v;
list_move_tail(&obj->global_list, &still_in_list); list_move_tail(&obj->global_list, &still_in_list);
if (flags & I915_SHRINK_PURGEABLE && if (flags & I915_SHRINK_PURGEABLE &&
...@@ -193,11 +191,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -193,11 +191,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
i915_gem_object_get(obj); i915_gem_object_get(obj);
/* For the unbound phase, this should be a no-op! */ /* For the unbound phase, this should be a no-op! */
list_for_each_entry_safe(vma, v, i915_gem_object_unbind(obj);
&obj->vma_list, obj_link)
if (i915_vma_unbind(vma))
break;
if (i915_gem_object_put_pages(obj) == 0) if (i915_gem_object_put_pages(obj) == 0)
count += obj->base.size >> PAGE_SHIFT; count += obj->base.size >> PAGE_SHIFT;
......
...@@ -104,7 +104,6 @@ static void cancel_userptr(struct work_struct *work) ...@@ -104,7 +104,6 @@ static void cancel_userptr(struct work_struct *work)
if (obj->pages != NULL) { if (obj->pages != NULL) {
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_vma *vma, *tmp;
bool was_interruptible; bool was_interruptible;
wait_rendering(obj); wait_rendering(obj);
...@@ -112,8 +111,7 @@ static void cancel_userptr(struct work_struct *work) ...@@ -112,8 +111,7 @@ static void cancel_userptr(struct work_struct *work)
was_interruptible = dev_priv->mm.interruptible; was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false; dev_priv->mm.interruptible = false;
list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link) WARN_ON(i915_gem_object_unbind(obj));
WARN_ON(i915_vma_unbind(vma));
WARN_ON(i915_gem_object_put_pages(obj)); WARN_ON(i915_gem_object_put_pages(obj));
dev_priv->mm.interruptible = was_interruptible; dev_priv->mm.interruptible = was_interruptible;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment