Commit 1488fc08 authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter

drm/i915: Remove the deferred-free list

The use of the mm_list by deferred-free breaks the following patches to
extend the range of objects tracked. We can simplify things if we just
make the unbind during free uninterrutible.

Note that unbinding should never fail, because we hold an additional
reference on every active object. Only the ilk vt-d workaround breaks
this, but already takes care of not failing by waiting for the gpu to
quiescent non-interruptible. But the existence of the deferred free
list casted some doubts on this theory, hence WARN if the unbind fails
and only then retry non-interruptible.

We can kill this additional code after a release in case the theory is
indeed right and no one has hit that WARN.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 1b50247a
...@@ -47,7 +47,6 @@ enum { ...@@ -47,7 +47,6 @@ enum {
FLUSHING_LIST, FLUSHING_LIST,
INACTIVE_LIST, INACTIVE_LIST,
PINNED_LIST, PINNED_LIST,
DEFERRED_FREE_LIST,
}; };
static const char *yesno(int v) static const char *yesno(int v)
...@@ -182,10 +181,6 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) ...@@ -182,10 +181,6 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
seq_printf(m, "Flushing:\n"); seq_printf(m, "Flushing:\n");
head = &dev_priv->mm.flushing_list; head = &dev_priv->mm.flushing_list;
break; break;
case DEFERRED_FREE_LIST:
seq_printf(m, "Deferred free:\n");
head = &dev_priv->mm.deferred_free_list;
break;
default: default:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return -EINVAL; return -EINVAL;
...@@ -252,11 +247,6 @@ static int i915_gem_object_info(struct seq_file *m, void* data) ...@@ -252,11 +247,6 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size); count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
count_objects(&dev_priv->mm.deferred_free_list, mm_list);
seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0; size = count = mappable_size = mappable_count = 0;
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
if (obj->fault_mappable) { if (obj->fault_mappable) {
...@@ -1840,7 +1830,6 @@ static struct drm_info_list i915_debugfs_list[] = { ...@@ -1840,7 +1830,6 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
{"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
{"i915_gem_pageflip", i915_gem_pageflip_info, 0}, {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
{"i915_gem_request", i915_gem_request_info, 0}, {"i915_gem_request", i915_gem_request_info, 0},
{"i915_gem_seqno", i915_gem_seqno_info, 0}, {"i915_gem_seqno", i915_gem_seqno_info, 0},
......
...@@ -692,14 +692,6 @@ typedef struct drm_i915_private { ...@@ -692,14 +692,6 @@ typedef struct drm_i915_private {
/** LRU list of objects with fence regs on them. */ /** LRU list of objects with fence regs on them. */
struct list_head fence_list; struct list_head fence_list;
/**
* List of objects currently pending being freed.
*
* These objects are no longer in use, but due to a signal
* we were prevented from freeing them at the appointed time.
*/
struct list_head deferred_free_list;
/** /**
* We leave the user IRQ off as much as possible, * We leave the user IRQ off as much as possible,
* but this means that requests will finish and never * but this means that requests will finish and never
......
...@@ -46,7 +46,6 @@ static int i915_gem_phys_pwrite(struct drm_device *dev, ...@@ -46,7 +46,6 @@ static int i915_gem_phys_pwrite(struct drm_device *dev,
struct drm_i915_gem_object *obj, struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args, struct drm_i915_gem_pwrite *args,
struct drm_file *file); struct drm_file *file);
static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
static void i915_gem_write_fence(struct drm_device *dev, int reg, static void i915_gem_write_fence(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj); struct drm_i915_gem_object *obj);
...@@ -1782,20 +1781,6 @@ i915_gem_retire_requests(struct drm_device *dev) ...@@ -1782,20 +1781,6 @@ i915_gem_retire_requests(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
int i; int i;
if (!list_empty(&dev_priv->mm.deferred_free_list)) {
struct drm_i915_gem_object *obj, *next;
/* We must be careful that during unbind() we do not
* accidentally infinitely recurse into retire requests.
* Currently:
* retire -> free -> unbind -> wait -> retire_ring
*/
list_for_each_entry_safe(obj, next,
&dev_priv->mm.deferred_free_list,
mm_list)
i915_gem_free_object_tail(obj);
}
for (i = 0; i < I915_NUM_RINGS; i++) for (i = 0; i < I915_NUM_RINGS; i++)
i915_gem_retire_requests_ring(&dev_priv->ring[i]); i915_gem_retire_requests_ring(&dev_priv->ring[i]);
} }
...@@ -2067,7 +2052,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) ...@@ -2067,7 +2052,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
} }
ret = i915_gem_object_finish_gpu(obj); ret = i915_gem_object_finish_gpu(obj);
if (ret == -ERESTARTSYS) if (ret)
return ret; return ret;
/* Continue on if we fail due to EIO, the GPU is hung so we /* Continue on if we fail due to EIO, the GPU is hung so we
* should be safe and we need to cleanup or else we might * should be safe and we need to cleanup or else we might
...@@ -2094,7 +2079,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) ...@@ -2094,7 +2079,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
/* release the fence reg _after_ flushing */ /* release the fence reg _after_ flushing */
ret = i915_gem_object_put_fence(obj); ret = i915_gem_object_put_fence(obj);
if (ret == -ERESTARTSYS) if (ret)
return ret; return ret;
trace_i915_gem_object_unbind(obj); trace_i915_gem_object_unbind(obj);
...@@ -3377,21 +3362,29 @@ int i915_gem_init_object(struct drm_gem_object *obj) ...@@ -3377,21 +3362,29 @@ int i915_gem_init_object(struct drm_gem_object *obj)
return 0; return 0;
} }
static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj) void i915_gem_free_object(struct drm_gem_object *gem_obj)
{ {
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
ret = i915_gem_object_unbind(obj);
if (ret == -ERESTARTSYS) {
list_move(&obj->mm_list,
&dev_priv->mm.deferred_free_list);
return;
}
trace_i915_gem_object_destroy(obj); trace_i915_gem_object_destroy(obj);
if (obj->phys_obj)
i915_gem_detach_phys_object(dev, obj);
obj->pin_count = 0;
if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
bool was_interruptible;
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
WARN_ON(i915_gem_object_unbind(obj));
dev_priv->mm.interruptible = was_interruptible;
}
if (obj->base.map_list.map) if (obj->base.map_list.map)
drm_gem_free_mmap_offset(&obj->base); drm_gem_free_mmap_offset(&obj->base);
...@@ -3402,18 +3395,6 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj) ...@@ -3402,18 +3395,6 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
kfree(obj); kfree(obj);
} }
void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_device *dev = obj->base.dev;
if (obj->phys_obj)
i915_gem_detach_phys_object(dev, obj);
obj->pin_count = 0;
i915_gem_free_object_tail(obj);
}
int int
i915_gem_idle(struct drm_device *dev) i915_gem_idle(struct drm_device *dev)
{ {
...@@ -3679,7 +3660,6 @@ i915_gem_load(struct drm_device *dev) ...@@ -3679,7 +3660,6 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.flushing_list); INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list); INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list); INIT_LIST_HEAD(&dev_priv->mm.fence_list);
INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
INIT_LIST_HEAD(&dev_priv->mm.gtt_list); INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
for (i = 0; i < I915_NUM_RINGS; i++) for (i = 0; i < I915_NUM_RINGS; i++)
init_ring_lists(&dev_priv->ring[i]); init_ring_lists(&dev_priv->ring[i]);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment