Commit cd2a4eaf authored by Chris Wilson's avatar Chris Wilson

drm/i915: Report resv_obj allocation failure

Since commit 64d6c500 ("drm/i915: Generalise GPU activity
tracking"), we have been prepared for i915_vma_move_to_active() to fail.
We can take advantage of this to report the failure for allocating the
shared-fence slot in the reservation_object.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190730205805.3733-1-chris@chris-wilson.co.uk
parent e4661f14
...@@ -886,23 +886,6 @@ void i915_vma_revoke_mmap(struct i915_vma *vma) ...@@ -886,23 +886,6 @@ void i915_vma_revoke_mmap(struct i915_vma *vma)
list_del(&vma->obj->userfault_link); list_del(&vma->obj->userfault_link);
} }
static void export_fence(struct i915_vma *vma,
struct i915_request *rq,
unsigned int flags)
{
struct reservation_object *resv = vma->resv;
/*
* Ignore errors from failing to allocate the new fence, we can't
* handle an error right now. Worst case should be missed
* synchronisation leading to rendering corruption.
*/
if (flags & EXEC_OBJECT_WRITE)
reservation_object_add_excl_fence(resv, &rq->fence);
else if (reservation_object_reserve_shared(resv, 1) == 0)
reservation_object_add_shared_fence(resv, &rq->fence);
}
int i915_vma_move_to_active(struct i915_vma *vma, int i915_vma_move_to_active(struct i915_vma *vma,
struct i915_request *rq, struct i915_request *rq,
unsigned int flags) unsigned int flags)
...@@ -926,14 +909,20 @@ int i915_vma_move_to_active(struct i915_vma *vma, ...@@ -926,14 +909,20 @@ int i915_vma_move_to_active(struct i915_vma *vma,
if (unlikely(err)) if (unlikely(err))
return err; return err;
obj->write_domain = 0;
if (flags & EXEC_OBJECT_WRITE) { if (flags & EXEC_OBJECT_WRITE) {
obj->write_domain = I915_GEM_DOMAIN_RENDER;
if (intel_fb_obj_invalidate(obj, ORIGIN_CS)) if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
__i915_active_request_set(&obj->frontbuffer_write, rq); __i915_active_request_set(&obj->frontbuffer_write, rq);
reservation_object_add_excl_fence(vma->resv, &rq->fence);
obj->write_domain = I915_GEM_DOMAIN_RENDER;
obj->read_domains = 0; obj->read_domains = 0;
} else {
err = reservation_object_reserve_shared(vma->resv, 1);
if (unlikely(err))
return err;
reservation_object_add_shared_fence(vma->resv, &rq->fence);
obj->write_domain = 0;
} }
obj->read_domains |= I915_GEM_GPU_DOMAINS; obj->read_domains |= I915_GEM_GPU_DOMAINS;
obj->mm.dirty = true; obj->mm.dirty = true;
...@@ -941,8 +930,6 @@ int i915_vma_move_to_active(struct i915_vma *vma, ...@@ -941,8 +930,6 @@ int i915_vma_move_to_active(struct i915_vma *vma,
if (flags & EXEC_OBJECT_NEEDS_FENCE) if (flags & EXEC_OBJECT_NEEDS_FENCE)
__i915_active_request_set(&vma->last_fence, rq); __i915_active_request_set(&vma->last_fence, rq);
export_fence(vma, rq, flags);
GEM_BUG_ON(!i915_vma_is_active(vma)); GEM_BUG_ON(!i915_vma_is_active(vma));
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment