Commit 5652df82 authored by Thomas Hellström's avatar Thomas Hellström

drm/i915/ttm: Update i915_gem_obj_copy_ttm() to be asynchronous

Update the copy function i915_gem_obj_copy_ttm() to be asynchronous for
future users and update the only current user to sync the objects
as needed after this function.
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211122214554.371864-7-thomas.hellstrom@linux.intel.com
parent 6385eb7a
...@@ -826,33 +826,49 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst, ...@@ -826,33 +826,49 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
.interruptible = intr, .interruptible = intr,
}; };
struct i915_refct_sgt *dst_rsgt; struct i915_refct_sgt *dst_rsgt;
struct dma_fence *copy_fence; struct dma_fence *copy_fence, *dep_fence;
int ret; struct i915_deps deps;
int ret, shared_err;
assert_object_held(dst); assert_object_held(dst);
assert_object_held(src); assert_object_held(src);
i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
/* /*
* Sync for now. This will change with async moves. * We plan to add a shared fence only for the source. If that
* fails, we await all source fences before commencing
* the copy instead of only the exclusive.
*/ */
ret = ttm_bo_wait_ctx(dst_bo, &ctx); shared_err = dma_resv_reserve_shared(src_bo->base.resv, 1);
ret = i915_deps_add_resv(&deps, dst_bo->base.resv, true, false, &ctx);
if (!ret) if (!ret)
ret = ttm_bo_wait_ctx(src_bo, &ctx); ret = i915_deps_add_resv(&deps, src_bo->base.resv,
!!shared_err, false, &ctx);
if (ret) if (ret)
return ret; return ret;
dep_fence = i915_deps_to_fence(&deps, &ctx);
if (IS_ERR(dep_fence))
return PTR_ERR(dep_fence);
dst_rsgt = i915_ttm_resource_get_st(dst, dst_bo->resource); dst_rsgt = i915_ttm_resource_get_st(dst, dst_bo->resource);
copy_fence = __i915_ttm_move(src_bo, false, dst_bo->resource, copy_fence = __i915_ttm_move(src_bo, false, dst_bo->resource,
dst_bo->ttm, dst_rsgt, allow_accel, NULL); dst_bo->ttm, dst_rsgt, allow_accel,
dep_fence);
i915_refct_sgt_put(dst_rsgt); i915_refct_sgt_put(dst_rsgt);
if (IS_ERR(copy_fence)) if (IS_ERR_OR_NULL(copy_fence))
return PTR_ERR(copy_fence); return PTR_ERR_OR_ZERO(copy_fence);
if (copy_fence) { dma_resv_add_excl_fence(dst_bo->base.resv, copy_fence);
dma_fence_wait(copy_fence, false);
dma_fence_put(copy_fence); /* If we failed to reserve a shared slot, add an exclusive fence */
} if (shared_err)
dma_resv_add_excl_fence(src_bo->base.resv, copy_fence);
else
dma_resv_add_shared_fence(src_bo->base.resv, copy_fence);
dma_fence_put(copy_fence);
return 0; return 0;
} }
...@@ -80,6 +80,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply, ...@@ -80,6 +80,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
err = i915_gem_obj_copy_ttm(backup, obj, pm_apply->allow_gpu, false); err = i915_gem_obj_copy_ttm(backup, obj, pm_apply->allow_gpu, false);
GEM_WARN_ON(err); GEM_WARN_ON(err);
ttm_bo_wait_ctx(backup_bo, &ctx);
obj->ttm.backup = backup; obj->ttm.backup = backup;
return 0; return 0;
...@@ -170,6 +171,7 @@ static int i915_ttm_restore(struct i915_gem_apply_to_region *apply, ...@@ -170,6 +171,7 @@ static int i915_ttm_restore(struct i915_gem_apply_to_region *apply,
err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu, err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu,
false); false);
GEM_WARN_ON(err); GEM_WARN_ON(err);
ttm_bo_wait_ctx(backup_bo, &ctx);
obj->ttm.backup = NULL; obj->ttm.backup = NULL;
err = 0; err = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment