Commit 7788a765 authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter

drm/i915: Avoid unbinding due to an interrupted pin_and_fence during execbuffer

If we need to stall in order to complete the pin_and_fence operation
during execbuffer reservation, there is a high likelihood that the
operation will be interrupted by a signal (thanks X!). In order to
simplify the cleanup along that error path, the object was
unconditionally unbound and the error propagated. However, being
interrupted here is far more common than I would like and so we can
strive to avoid the extra work by eliminating the forced unbind.

v2: In discussion over the indecent colour of the new functions and
unwind path, we realised that we can use the new unreserve function to
clean up the code even further.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 050ee91f
...@@ -331,7 +331,8 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, ...@@ -331,7 +331,8 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
return ret; return ret;
} }
#define __EXEC_OBJECT_HAS_FENCE (1<<31) #define __EXEC_OBJECT_HAS_PIN (1<<31)
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
static int static int
need_reloc_mappable(struct drm_i915_gem_object *obj) need_reloc_mappable(struct drm_i915_gem_object *obj)
...@@ -341,9 +342,10 @@ need_reloc_mappable(struct drm_i915_gem_object *obj) ...@@ -341,9 +342,10 @@ need_reloc_mappable(struct drm_i915_gem_object *obj)
} }
static int static int
pin_and_fence_object(struct drm_i915_gem_object *obj, i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring) struct intel_ring_buffer *ring)
{ {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
bool need_fence, need_mappable; bool need_fence, need_mappable;
...@@ -359,11 +361,13 @@ pin_and_fence_object(struct drm_i915_gem_object *obj, ...@@ -359,11 +361,13 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
if (ret) if (ret)
return ret; return ret;
entry->flags |= __EXEC_OBJECT_HAS_PIN;
if (has_fenced_gpu_access) { if (has_fenced_gpu_access) {
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
ret = i915_gem_object_get_fence(obj); ret = i915_gem_object_get_fence(obj);
if (ret) if (ret)
goto err_unpin; return ret;
if (i915_gem_object_pin_fence(obj)) if (i915_gem_object_pin_fence(obj))
entry->flags |= __EXEC_OBJECT_HAS_FENCE; entry->flags |= __EXEC_OBJECT_HAS_FENCE;
...@@ -372,12 +376,35 @@ pin_and_fence_object(struct drm_i915_gem_object *obj, ...@@ -372,12 +376,35 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
} }
} }
/* Ensure ppgtt mapping exists if needed */
if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
obj, obj->cache_level);
obj->has_aliasing_ppgtt_mapping = 1;
}
entry->offset = obj->gtt_offset; entry->offset = obj->gtt_offset;
return 0; return 0;
}
static void
i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
{
struct drm_i915_gem_exec_object2 *entry;
if (!obj->gtt_space)
return;
err_unpin: entry = obj->exec_entry;
if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
i915_gem_object_unpin_fence(obj);
if (entry->flags & __EXEC_OBJECT_HAS_PIN)
i915_gem_object_unpin(obj); i915_gem_object_unpin(obj);
return ret;
entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
} }
static int static int
...@@ -385,11 +412,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, ...@@ -385,11 +412,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_file *file, struct drm_file *file,
struct list_head *objects) struct list_head *objects)
{ {
drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
int ret, retry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
struct list_head ordered_objects; struct list_head ordered_objects;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
int retry;
INIT_LIST_HEAD(&ordered_objects); INIT_LIST_HEAD(&ordered_objects);
while (!list_empty(objects)) { while (!list_empty(objects)) {
...@@ -427,12 +453,12 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, ...@@ -427,12 +453,12 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
* 2. Bind new objects. * 2. Bind new objects.
* 3. Decrement pin count. * 3. Decrement pin count.
* *
* This avoid unnecessary unbinding of later objects in order to makr * This avoid unnecessary unbinding of later objects in order to make
* room for the earlier objects *unless* we need to defragment. * room for the earlier objects *unless* we need to defragment.
*/ */
retry = 0; retry = 0;
do { do {
ret = 0; int ret = 0;
/* Unbind any ill-fitting objects or pin. */ /* Unbind any ill-fitting objects or pin. */
list_for_each_entry(obj, objects, exec_list) { list_for_each_entry(obj, objects, exec_list) {
...@@ -452,7 +478,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, ...@@ -452,7 +478,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
(need_mappable && !obj->map_and_fenceable)) (need_mappable && !obj->map_and_fenceable))
ret = i915_gem_object_unbind(obj); ret = i915_gem_object_unbind(obj);
else else
ret = pin_and_fence_object(obj, ring); ret = i915_gem_execbuffer_reserve_object(obj, ring);
if (ret) if (ret)
goto err; goto err;
} }
...@@ -462,46 +488,14 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, ...@@ -462,46 +488,14 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
if (obj->gtt_space) if (obj->gtt_space)
continue; continue;
ret = pin_and_fence_object(obj, ring); ret = i915_gem_execbuffer_reserve_object(obj, ring);
if (ret) { if (ret)
int ret_ignore; goto err;
/* This can potentially raise a harmless
* -EINVAL if we failed to bind in the above
* call. It cannot raise -EINTR since we know
* that the bo is freshly bound and so will
* not need to be flushed or waited upon.
*/
ret_ignore = i915_gem_object_unbind(obj);
(void)ret_ignore;
WARN_ON(obj->gtt_space);
break;
}
}
/* Decrement pin count for bound objects */
list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry;
if (!obj->gtt_space)
continue;
entry = obj->exec_entry;
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
i915_gem_object_unpin_fence(obj);
entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
} }
i915_gem_object_unpin(obj); err: /* Decrement pin count for bound objects */
list_for_each_entry(obj, objects, exec_list)
/* ... and ensure ppgtt mapping exist if needed. */ i915_gem_execbuffer_unreserve_object(obj);
if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
obj, obj->cache_level);
obj->has_aliasing_ppgtt_mapping = 1;
}
}
if (ret != -ENOSPC || retry++) if (ret != -ENOSPC || retry++)
return ret; return ret;
...@@ -510,24 +504,6 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, ...@@ -510,24 +504,6 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
if (ret) if (ret)
return ret; return ret;
} while (1); } while (1);
err:
list_for_each_entry_continue_reverse(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry;
if (!obj->gtt_space)
continue;
entry = obj->exec_entry;
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
i915_gem_object_unpin_fence(obj);
entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
}
i915_gem_object_unpin(obj);
}
return ret;
} }
static int static int
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment