Commit b1f788c6 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Release vma when the handle is closed

In order to prevent a leak of the vma on shared objects, we need to
hook into the object_close callback to destroy the vma on the object for
this file. However, if we destroyed that vma immediately we may cause
unexpected application stalls as we try to unbind a busy vma - hence we
defer the unbind to when we retire the vma.

v2: Keep vma allocated until closed. This is useful for a later
optimisation, but it is required now in order to handle potential
recursion of i915_vma_unbind() by retiring itself.
v3: Comments are important.

Testcase: igt/gem_ppggtt/flink-and-close-vma-leak
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1470293567-10811-26-git-send-email-chris@chris-wilson.co.uk
parent b0decaf7
...@@ -2578,6 +2578,7 @@ static struct drm_driver driver = { ...@@ -2578,6 +2578,7 @@ static struct drm_driver driver = {
.postclose = i915_driver_postclose, .postclose = i915_driver_postclose,
.set_busid = drm_pci_set_busid, .set_busid = drm_pci_set_busid,
.gem_close_object = i915_gem_close_object,
.gem_free_object = i915_gem_free_object, .gem_free_object = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops, .gem_vm_ops = &i915_gem_vm_ops,
......
...@@ -3014,8 +3014,8 @@ struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev, ...@@ -3014,8 +3014,8 @@ struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
size_t size); size_t size);
struct drm_i915_gem_object *i915_gem_object_create_from_data( struct drm_i915_gem_object *i915_gem_object_create_from_data(
struct drm_device *dev, const void *data, size_t size); struct drm_device *dev, const void *data, size_t size);
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
void i915_gem_free_object(struct drm_gem_object *obj); void i915_gem_free_object(struct drm_gem_object *obj);
void i915_gem_vma_destroy(struct i915_vma *vma);
/* Flags used by pin/bind&friends. */ /* Flags used by pin/bind&friends. */
#define PIN_MAPPABLE (1<<0) #define PIN_MAPPABLE (1<<0)
...@@ -3048,6 +3048,8 @@ int __must_check i915_vma_unbind(struct i915_vma *vma); ...@@ -3048,6 +3048,8 @@ int __must_check i915_vma_unbind(struct i915_vma *vma);
* _guarantee_ VMA in question is _not in use_ anywhere. * _guarantee_ VMA in question is _not in use_ anywhere.
*/ */
int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma); int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma);
void i915_vma_close(struct i915_vma *vma);
void i915_vma_destroy(struct i915_vma *vma);
int i915_gem_object_unbind(struct drm_i915_gem_object *obj); int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
......
...@@ -2596,6 +2596,19 @@ i915_gem_idle_work_handler(struct work_struct *work) ...@@ -2596,6 +2596,19 @@ i915_gem_idle_work_handler(struct work_struct *work)
} }
} }
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem);
struct drm_i915_file_private *fpriv = file->driver_priv;
struct i915_vma *vma, *vn;
mutex_lock(&obj->base.dev->struct_mutex);
list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
if (vma->vm->file == fpriv)
i915_vma_close(vma);
mutex_unlock(&obj->base.dev->struct_mutex);
}
/** /**
* i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
* @dev: drm device pointer * @dev: drm device pointer
...@@ -2803,26 +2816,32 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait) ...@@ -2803,26 +2816,32 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
if (active && wait) { if (active && wait) {
int idx; int idx;
/* When a closed VMA is retired, it is unbound - eek.
* In order to prevent it from being recursively closed,
* take a pin on the vma so that the second unbind is
* aborted.
*/
vma->pin_count++;
for_each_active(active, idx) { for_each_active(active, idx) {
ret = i915_gem_active_retire(&vma->last_read[idx], ret = i915_gem_active_retire(&vma->last_read[idx],
&vma->vm->dev->struct_mutex); &vma->vm->dev->struct_mutex);
if (ret) if (ret)
return ret; break;
} }
vma->pin_count--;
if (ret)
return ret;
GEM_BUG_ON(i915_vma_is_active(vma)); GEM_BUG_ON(i915_vma_is_active(vma));
} }
if (vma->pin_count) if (vma->pin_count)
return -EBUSY; return -EBUSY;
if (list_empty(&vma->obj_link)) if (!drm_mm_node_allocated(&vma->node))
return 0; goto destroy;
if (!drm_mm_node_allocated(&vma->node)) {
i915_gem_vma_destroy(vma);
return 0;
}
GEM_BUG_ON(obj->bind_count == 0); GEM_BUG_ON(obj->bind_count == 0);
GEM_BUG_ON(!obj->pages); GEM_BUG_ON(!obj->pages);
...@@ -2855,7 +2874,6 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait) ...@@ -2855,7 +2874,6 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
} }
drm_mm_remove_node(&vma->node); drm_mm_remove_node(&vma->node);
i915_gem_vma_destroy(vma);
/* Since the unbound list is global, only move to that list if /* Since the unbound list is global, only move to that list if
* no more VMAs exist. */ * no more VMAs exist. */
...@@ -2869,6 +2887,10 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait) ...@@ -2869,6 +2887,10 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
*/ */
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
destroy:
if (unlikely(vma->closed))
i915_vma_destroy(vma);
return 0; return 0;
} }
...@@ -3043,7 +3065,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, ...@@ -3043,7 +3065,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
if (offset & (alignment - 1) || offset + size > end) { if (offset & (alignment - 1) || offset + size > end) {
ret = -EINVAL; ret = -EINVAL;
goto err_free_vma; goto err_vma;
} }
vma->node.start = offset; vma->node.start = offset;
vma->node.size = size; vma->node.size = size;
...@@ -3055,7 +3077,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, ...@@ -3055,7 +3077,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
ret = drm_mm_reserve_node(&vm->mm, &vma->node); ret = drm_mm_reserve_node(&vm->mm, &vma->node);
} }
if (ret) if (ret)
goto err_free_vma; goto err_vma;
} else { } else {
if (flags & PIN_HIGH) { if (flags & PIN_HIGH) {
search_flag = DRM_MM_SEARCH_BELOW; search_flag = DRM_MM_SEARCH_BELOW;
...@@ -3080,7 +3102,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, ...@@ -3080,7 +3102,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
if (ret == 0) if (ret == 0)
goto search_free; goto search_free;
goto err_free_vma; goto err_vma;
} }
} }
if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) { if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
...@@ -3101,8 +3123,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, ...@@ -3101,8 +3123,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
err_remove_node: err_remove_node:
drm_mm_remove_node(&vma->node); drm_mm_remove_node(&vma->node);
err_free_vma: err_vma:
i915_gem_vma_destroy(vma);
vma = ERR_PTR(ret); vma = ERR_PTR(ret);
err_unpin: err_unpin:
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
...@@ -4051,21 +4072,18 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) ...@@ -4051,21 +4072,18 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
trace_i915_gem_object_destroy(obj); trace_i915_gem_object_destroy(obj);
/* All file-owned VMA should have been released by this point through
* i915_gem_close_object(), or earlier by i915_gem_context_close().
* However, the object may also be bound into the global GTT (e.g.
* older GPUs without per-process support, or for direct access through
* the GTT either for the user or for scanout). Those VMA still need to
* unbound now.
*/
list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) { list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
int ret; GEM_BUG_ON(!vma->is_ggtt);
GEM_BUG_ON(i915_vma_is_active(vma));
vma->pin_count = 0; vma->pin_count = 0;
ret = __i915_vma_unbind_no_wait(vma); i915_vma_close(vma);
if (WARN_ON(ret == -ERESTARTSYS)) {
bool was_interruptible;
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
WARN_ON(i915_vma_unbind(vma));
dev_priv->mm.interruptible = was_interruptible;
}
} }
GEM_BUG_ON(obj->bind_count); GEM_BUG_ON(obj->bind_count);
...@@ -4129,22 +4147,6 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, ...@@ -4129,22 +4147,6 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
return NULL; return NULL;
} }
void i915_gem_vma_destroy(struct i915_vma *vma)
{
WARN_ON(vma->node.allocated);
/* Keep the vma as a placeholder in the execbuffer reservation lists */
if (!list_empty(&vma->exec_list))
return;
if (!vma->is_ggtt)
i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
list_del(&vma->obj_link);
kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
}
static void static void
i915_gem_stop_engines(struct drm_device *dev) i915_gem_stop_engines(struct drm_device *dev)
{ {
......
...@@ -182,8 +182,8 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, ...@@ -182,8 +182,8 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
struct i915_vma, struct i915_vma,
exec_list); exec_list);
if (drm_mm_scan_remove_block(&vma->node)) { if (drm_mm_scan_remove_block(&vma->node)) {
vma->pin_count++;
list_move(&vma->exec_list, &eviction_list); list_move(&vma->exec_list, &eviction_list);
i915_gem_object_get(vma->obj);
continue; continue;
} }
list_del_init(&vma->exec_list); list_del_init(&vma->exec_list);
...@@ -191,18 +191,14 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, ...@@ -191,18 +191,14 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
/* Unbinding will emit any required flushes */ /* Unbinding will emit any required flushes */
while (!list_empty(&eviction_list)) { while (!list_empty(&eviction_list)) {
struct drm_i915_gem_object *obj;
vma = list_first_entry(&eviction_list, vma = list_first_entry(&eviction_list,
struct i915_vma, struct i915_vma,
exec_list); exec_list);
obj = vma->obj;
list_del_init(&vma->exec_list); list_del_init(&vma->exec_list);
vma->pin_count--;
if (ret == 0) if (ret == 0)
ret = i915_vma_unbind(vma); ret = i915_vma_unbind(vma);
i915_gem_object_put(obj);
} }
return ret; return ret;
......
...@@ -3342,6 +3342,31 @@ i915_vma_retire(struct i915_gem_active *active, ...@@ -3342,6 +3342,31 @@ i915_vma_retire(struct i915_gem_active *active,
return; return;
list_move_tail(&vma->vm_link, &vma->vm->inactive_list); list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
if (unlikely(vma->closed && !vma->pin_count))
WARN_ON(i915_vma_unbind(vma));
}
void i915_vma_destroy(struct i915_vma *vma)
{
GEM_BUG_ON(vma->node.allocated);
GEM_BUG_ON(i915_vma_is_active(vma));
GEM_BUG_ON(!vma->closed);
list_del(&vma->vm_link);
if (!vma->is_ggtt)
i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
}
void i915_vma_close(struct i915_vma *vma)
{
GEM_BUG_ON(vma->closed);
vma->closed = true;
list_del_init(&vma->obj_link);
if (!i915_vma_is_active(vma) && !vma->pin_count)
WARN_ON(__i915_vma_unbind_no_wait(vma));
} }
static struct i915_vma * static struct i915_vma *
......
...@@ -189,6 +189,7 @@ struct i915_vma { ...@@ -189,6 +189,7 @@ struct i915_vma {
#define LOCAL_BIND (1<<1) #define LOCAL_BIND (1<<1)
unsigned int bound : 4; unsigned int bound : 4;
bool is_ggtt : 1; bool is_ggtt : 1;
bool closed : 1;
/** /**
* Support different GGTT views into the same object. * Support different GGTT views into the same object.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment