Commit af5c6fcf authored by Chris Wilson's avatar Chris Wilson Committed by Joonas Lahtinen

drm/i915: Provide a fastpath for waiting on vma bindings

Before we can execute a request, we must wait for all of its vma to be
bound. This is a frequent operation for which we can optimise away a
few atomic operations (notably a cmpxchg) in lieu of the RCU protection.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarThomas Hellström <thomas.hellstrom@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200731085015.32368-7-chris@chris-wilson.co.ukSigned-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
parent 9ff33bbc
...@@ -231,4 +231,19 @@ struct i915_active *i915_active_create(void); ...@@ -231,4 +231,19 @@ struct i915_active *i915_active_create(void);
struct i915_active *i915_active_get(struct i915_active *ref); struct i915_active *i915_active_get(struct i915_active *ref);
void i915_active_put(struct i915_active *ref); void i915_active_put(struct i915_active *ref);
static inline int __i915_request_await_exclusive(struct i915_request *rq,
struct i915_active *active)
{
struct dma_fence *fence;
int err = 0;
fence = i915_active_fence_get(&active->excl);
if (fence) {
err = i915_request_await_dma_fence(rq, fence);
dma_fence_put(fence);
}
return err;
}
#endif /* _I915_ACTIVE_H_ */ #endif /* _I915_ACTIVE_H_ */
...@@ -1167,6 +1167,12 @@ void i915_vma_revoke_mmap(struct i915_vma *vma) ...@@ -1167,6 +1167,12 @@ void i915_vma_revoke_mmap(struct i915_vma *vma)
list_del(&vma->obj->userfault_link); list_del(&vma->obj->userfault_link);
} }
static int
__i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
{
return __i915_request_await_exclusive(rq, &vma->active);
}
int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq) int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
{ {
int err; int err;
...@@ -1174,8 +1180,7 @@ int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq) ...@@ -1174,8 +1180,7 @@ int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
GEM_BUG_ON(!i915_vma_is_pinned(vma)); GEM_BUG_ON(!i915_vma_is_pinned(vma));
/* Wait for the vma to be bound before we start! */ /* Wait for the vma to be bound before we start! */
err = i915_request_await_active(rq, &vma->active, err = __i915_request_await_bind(rq, vma);
I915_ACTIVE_AWAIT_EXCL);
if (err) if (err)
return err; return err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment