Commit 5e053450 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Only track bound elements of the GTT

The premise here is to simply avoiding having to acquire the vm->mutex
inside vma create/destroy to update the vm->unbound_lists, to avoid some
nasty lock recursions later.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-2-chris@chris-wilson.co.uk
parent b290a78b
......@@ -692,7 +692,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
__i915_vma_set_map_and_fenceable(vma);
mutex_lock(&ggtt->vm.mutex);
list_move_tail(&vma->vm_link, &ggtt->vm.bound_list);
list_add_tail(&vma->vm_link, &ggtt->vm.bound_list);
mutex_unlock(&ggtt->vm.mutex);
GEM_BUG_ON(i915_gem_object_is_shrinkable(obj));
......
......@@ -505,19 +505,12 @@ static void i915_address_space_fini(struct i915_address_space *vm)
static void ppgtt_destroy_vma(struct i915_address_space *vm)
{
struct list_head *phases[] = {
&vm->bound_list,
&vm->unbound_list,
NULL,
}, **phase;
mutex_lock(&vm->i915->drm.struct_mutex);
for (phase = phases; *phase; phase++) {
struct i915_vma *vma, *vn;
list_for_each_entry_safe(vma, vn, *phase, vm_link)
mutex_lock(&vm->i915->drm.struct_mutex);
list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link)
i915_vma_destroy(vma);
}
GEM_BUG_ON(!list_empty(&vm->bound_list));
mutex_unlock(&vm->i915->drm.struct_mutex);
}
......@@ -528,9 +521,6 @@ static void __i915_vm_release(struct work_struct *work)
ppgtt_destroy_vma(vm);
GEM_BUG_ON(!list_empty(&vm->bound_list));
GEM_BUG_ON(!list_empty(&vm->unbound_list));
vm->cleanup(vm);
i915_address_space_fini(vm);
......@@ -569,7 +559,6 @@ static void i915_address_space_init(struct i915_address_space *vm, int subclass)
stash_init(&vm->free_pages);
INIT_LIST_HEAD(&vm->unbound_list);
INIT_LIST_HEAD(&vm->bound_list);
}
......@@ -1887,10 +1876,6 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
INIT_LIST_HEAD(&vma->obj_link);
INIT_LIST_HEAD(&vma->closed_link);
mutex_lock(&vma->vm->mutex);
list_add(&vma->vm_link, &vma->vm->unbound_list);
mutex_unlock(&vma->vm->mutex);
return vma;
}
......
......@@ -320,11 +320,6 @@ struct i915_address_space {
*/
struct list_head bound_list;
/**
* List of vma that are not unbound.
*/
struct list_head unbound_list;
struct pagestash free_pages;
/* Global GTT */
......
......@@ -218,10 +218,6 @@ vma_create(struct drm_i915_gem_object *obj,
spin_unlock(&obj->vma.lock);
mutex_lock(&vm->mutex);
list_add(&vma->vm_link, &vm->unbound_list);
mutex_unlock(&vm->mutex);
return vma;
err_vma:
......@@ -657,7 +653,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
mutex_lock(&vma->vm->mutex);
list_move_tail(&vma->vm_link, &vma->vm->bound_list);
list_add_tail(&vma->vm_link, &vma->vm->bound_list);
mutex_unlock(&vma->vm->mutex);
if (vma->obj) {
......@@ -685,7 +681,7 @@ i915_vma_remove(struct i915_vma *vma)
mutex_lock(&vma->vm->mutex);
drm_mm_remove_node(&vma->node);
list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
list_del(&vma->vm_link);
mutex_unlock(&vma->vm->mutex);
/*
......@@ -798,10 +794,6 @@ static void __i915_vma_destroy(struct i915_vma *vma)
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->fence);
mutex_lock(&vma->vm->mutex);
list_del(&vma->vm_link);
mutex_unlock(&vma->vm->mutex);
if (vma->obj) {
struct drm_i915_gem_object *obj = vma->obj;
......
......@@ -1242,7 +1242,7 @@ static void track_vma_bind(struct i915_vma *vma)
vma->pages = obj->mm.pages;
mutex_lock(&vma->vm->mutex);
list_move_tail(&vma->vm_link, &vma->vm->bound_list);
list_add_tail(&vma->vm_link, &vma->vm->bound_list);
mutex_unlock(&vma->vm->mutex);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment