Commit 2cbb8d4d authored by Christian König's avatar Christian König

drm/i915: use new iterator in i915_gem_object_wait_reservation

Simplifying the code a bit.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
[mlankhorst: Handle timeout = 0 correctly, use new i915_request_wait_timeout.]
Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Acked-by: default avatarDaniel Vetter <daniel@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20211116102431.198905-7-christian.koenig@amd.com
parent 7e2e69ed
...@@ -25,7 +25,7 @@ i915_gem_object_wait_fence(struct dma_fence *fence, ...@@ -25,7 +25,7 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
return timeout; return timeout;
if (dma_fence_is_i915(fence)) if (dma_fence_is_i915(fence))
return i915_request_wait(to_request(fence), flags, timeout); return i915_request_wait_timeout(to_request(fence), flags, timeout);
return dma_fence_wait_timeout(fence, return dma_fence_wait_timeout(fence,
flags & I915_WAIT_INTERRUPTIBLE, flags & I915_WAIT_INTERRUPTIBLE,
...@@ -37,58 +37,29 @@ i915_gem_object_wait_reservation(struct dma_resv *resv, ...@@ -37,58 +37,29 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,
unsigned int flags, unsigned int flags,
long timeout) long timeout)
{ {
struct dma_fence *excl; struct dma_resv_iter cursor;
bool prune_fences = false; struct dma_fence *fence;
long ret = timeout ?: 1;
if (flags & I915_WAIT_ALL) {
struct dma_fence **shared;
unsigned int count, i;
int ret;
ret = dma_resv_get_fences(resv, &excl, &count, &shared);
if (ret)
return ret;
for (i = 0; i < count; i++) {
timeout = i915_gem_object_wait_fence(shared[i],
flags, timeout);
if (timeout < 0)
break;
dma_fence_put(shared[i]);
}
for (; i < count; i++) dma_resv_iter_begin(&cursor, resv, flags & I915_WAIT_ALL);
dma_fence_put(shared[i]); dma_resv_for_each_fence_unlocked(&cursor, fence) {
kfree(shared); ret = i915_gem_object_wait_fence(fence, flags, timeout);
if (ret <= 0)
break;
/* if (timeout)
* If both shared fences and an exclusive fence exist, timeout = ret;
* then by construction the shared fences must be later
* than the exclusive fence. If we successfully wait for
* all the shared fences, we know that the exclusive fence
* must all be signaled. If all the shared fences are
* signaled, we can prune the array and recover the
* floating references on the fences/requests.
*/
prune_fences = count && timeout >= 0;
} else {
excl = dma_resv_get_excl_unlocked(resv);
} }
dma_resv_iter_end(&cursor);
if (excl && timeout >= 0)
timeout = i915_gem_object_wait_fence(excl, flags, timeout);
dma_fence_put(excl);
/* /*
* Opportunistically prune the fences iff we know they have *all* been * Opportunistically prune the fences iff we know they have *all* been
* signaled. * signaled.
*/ */
if (prune_fences) if (timeout > 0)
dma_resv_prune(resv); dma_resv_prune(resv);
return timeout; return ret;
} }
static void fence_set_priority(struct dma_fence *fence, static void fence_set_priority(struct dma_fence *fence,
...@@ -177,7 +148,11 @@ i915_gem_object_wait(struct drm_i915_gem_object *obj, ...@@ -177,7 +148,11 @@ i915_gem_object_wait(struct drm_i915_gem_object *obj,
timeout = i915_gem_object_wait_reservation(obj->base.resv, timeout = i915_gem_object_wait_reservation(obj->base.resv,
flags, timeout); flags, timeout);
return timeout < 0 ? timeout : 0;
if (timeout < 0)
return timeout;
return !timeout ? -ETIME : 0;
} }
static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment