Commit 3690a01b authored by Thomas Hellström's avatar Thomas Hellström Committed by Rodrigo Vivi

drm/xe: Support copying of data between system memory bos

Modify the xe_migrate_copy() function somewhat to explicitly allow
copying of data between two buffer objects including system memory
buffer objects. Update the migrate test accordingly.

v2:
- Check that buffer object sizes match when copying (Matthew Auld)
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 9922bb40
...@@ -150,7 +150,7 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo, ...@@ -150,7 +150,7 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
xe_map_memset(xe, &bo->vmap, 0, 0xd0, bo->size); xe_map_memset(xe, &bo->vmap, 0, 0xd0, bo->size);
expected = 0xc0c0c0c0c0c0c0c0; expected = 0xc0c0c0c0c0c0c0c0;
fence = xe_migrate_copy(m, sysmem, sysmem->ttm.resource, fence = xe_migrate_copy(m, sysmem, bo, sysmem->ttm.resource,
bo->ttm.resource); bo->ttm.resource);
if (!sanity_fence_failed(xe, fence, big ? "Copying big bo sysmem -> vram" : if (!sanity_fence_failed(xe, fence, big ? "Copying big bo sysmem -> vram" :
"Copying small bo sysmem -> vram", test)) { "Copying small bo sysmem -> vram", test)) {
...@@ -167,7 +167,7 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo, ...@@ -167,7 +167,7 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
xe_map_memset(xe, &sysmem->vmap, 0, 0xd0, sysmem->size); xe_map_memset(xe, &sysmem->vmap, 0, 0xd0, sysmem->size);
xe_map_memset(xe, &bo->vmap, 0, 0xc0, bo->size); xe_map_memset(xe, &bo->vmap, 0, 0xc0, bo->size);
fence = xe_migrate_copy(m, sysmem, bo->ttm.resource, fence = xe_migrate_copy(m, bo, sysmem, bo->ttm.resource,
sysmem->ttm.resource); sysmem->ttm.resource);
if (!sanity_fence_failed(xe, fence, big ? "Copying big bo vram -> sysmem" : if (!sanity_fence_failed(xe, fence, big ? "Copying big bo vram -> sysmem" :
"Copying small bo vram -> sysmem", test)) { "Copying small bo vram -> sysmem", test)) {
...@@ -347,10 +347,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) ...@@ -347,10 +347,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
retval = xe_map_rd(xe, &tiny->vmap, tiny->size - 4, u32); retval = xe_map_rd(xe, &tiny->vmap, tiny->size - 4, u32);
check(retval, expected, "Command clear small last value", test); check(retval, expected, "Command clear small last value", test);
if (IS_DGFX(xe)) { kunit_info(test, "Copying small buffer object to system\n");
kunit_info(test, "Copying small buffer object to system\n"); test_copy(m, tiny, test);
test_copy(m, tiny, test);
}
/* Clear a big bo */ /* Clear a big bo */
kunit_info(test, "Clearing big buffer object\n"); kunit_info(test, "Clearing big buffer object\n");
...@@ -366,10 +364,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) ...@@ -366,10 +364,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
retval = xe_map_rd(xe, &big->vmap, big->size - 4, u32); retval = xe_map_rd(xe, &big->vmap, big->size - 4, u32);
check(retval, expected, "Command clear big last value", test); check(retval, expected, "Command clear big last value", test);
if (IS_DGFX(xe)) { kunit_info(test, "Copying big buffer object to system\n");
kunit_info(test, "Copying big buffer object to system\n"); test_copy(m, big, test);
test_copy(m, big, test);
}
kunit_info(test, "Testing page table update using CPU if GPU idle.\n"); kunit_info(test, "Testing page table update using CPU if GPU idle.\n");
test_pt_update(m, pt, test, false); test_pt_update(m, pt, test, false);
......
...@@ -679,7 +679,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, ...@@ -679,7 +679,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
if (move_lacks_source) if (move_lacks_source)
fence = xe_migrate_clear(gt->migrate, bo, new_mem); fence = xe_migrate_clear(gt->migrate, bo, new_mem);
else else
fence = xe_migrate_copy(gt->migrate, bo, old_mem, new_mem); fence = xe_migrate_copy(gt->migrate, bo, bo, old_mem, new_mem);
if (IS_ERR(fence)) { if (IS_ERR(fence)) {
ret = PTR_ERR(fence); ret = PTR_ERR(fence);
xe_device_mem_access_put(xe); xe_device_mem_access_put(xe);
......
...@@ -582,30 +582,31 @@ static u32 xe_migrate_ccs_copy(struct xe_migrate *m, ...@@ -582,30 +582,31 @@ static u32 xe_migrate_ccs_copy(struct xe_migrate *m,
/** /**
* xe_migrate_copy() - Copy content of TTM resources. * xe_migrate_copy() - Copy content of TTM resources.
* @m: The migration context. * @m: The migration context.
* @bo: The buffer object @src is currently bound to. * @src_bo: The buffer object @src is currently bound to.
* @dst_bo: If copying between resources created for the same bo, set this to
* the same value as @src_bo. If copying between buffer objects, set it to
* the buffer object @dst is currently bound to.
* @src: The source TTM resource. * @src: The source TTM resource.
* @dst: The dst TTM resource. * @dst: The dst TTM resource.
* *
* Copies the contents of @src to @dst: On flat CCS devices, * Copies the contents of @src to @dst: On flat CCS devices,
* the CCS metadata is copied as well if needed, or if not present, * the CCS metadata is copied as well if needed, or if not present,
* the CCS metadata of @dst is cleared for security reasons. * the CCS metadata of @dst is cleared for security reasons.
* It's currently not possible to copy between two system resources,
* since that would require two TTM page-vectors.
* TODO: Eliminate the @bo argument and supply two TTM page-vectors.
* *
* Return: Pointer to a dma_fence representing the last copy batch, or * Return: Pointer to a dma_fence representing the last copy batch, or
* an error pointer on failure. If there is a failure, any copy operation * an error pointer on failure. If there is a failure, any copy operation
* started by the function call has been synced. * started by the function call has been synced.
*/ */
struct dma_fence *xe_migrate_copy(struct xe_migrate *m, struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct xe_bo *bo, struct xe_bo *src_bo,
struct xe_bo *dst_bo,
struct ttm_resource *src, struct ttm_resource *src,
struct ttm_resource *dst) struct ttm_resource *dst)
{ {
struct xe_gt *gt = m->gt; struct xe_gt *gt = m->gt;
struct xe_device *xe = gt_to_xe(gt); struct xe_device *xe = gt_to_xe(gt);
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
u64 size = bo->size; u64 size = src_bo->size;
struct xe_res_cursor src_it, dst_it, ccs_it; struct xe_res_cursor src_it, dst_it, ccs_it;
u64 src_L0_ofs, dst_L0_ofs; u64 src_L0_ofs, dst_L0_ofs;
u32 src_L0_pt, dst_L0_pt; u32 src_L0_pt, dst_L0_pt;
...@@ -614,20 +615,28 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, ...@@ -614,20 +615,28 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
int err; int err;
bool src_is_vram = mem_type_is_vram(src->mem_type); bool src_is_vram = mem_type_is_vram(src->mem_type);
bool dst_is_vram = mem_type_is_vram(dst->mem_type); bool dst_is_vram = mem_type_is_vram(dst->mem_type);
bool copy_ccs = xe_device_has_flat_ccs(xe) && xe_bo_needs_ccs_pages(bo); bool copy_ccs = xe_device_has_flat_ccs(xe) &&
xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo);
bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram); bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram);
/* Copying CCS between two different BOs is not supported yet. */
if (XE_WARN_ON(copy_ccs && src_bo != dst_bo))
return ERR_PTR(-EINVAL);
if (src_bo != dst_bo && XE_WARN_ON(src_bo->size != dst_bo->size))
return ERR_PTR(-EINVAL);
if (!src_is_vram) if (!src_is_vram)
xe_res_first_sg(xe_bo_get_sg(bo), 0, size, &src_it); xe_res_first_sg(xe_bo_get_sg(src_bo), 0, size, &src_it);
else else
xe_res_first(src, 0, size, &src_it); xe_res_first(src, 0, size, &src_it);
if (!dst_is_vram) if (!dst_is_vram)
xe_res_first_sg(xe_bo_get_sg(bo), 0, size, &dst_it); xe_res_first_sg(xe_bo_get_sg(dst_bo), 0, size, &dst_it);
else else
xe_res_first(dst, 0, size, &dst_it); xe_res_first(dst, 0, size, &dst_it);
if (copy_system_ccs) if (copy_system_ccs)
xe_res_first_sg(xe_bo_get_sg(bo), xe_bo_ccs_pages_start(bo), xe_res_first_sg(xe_bo_get_sg(src_bo), xe_bo_ccs_pages_start(src_bo),
PAGE_ALIGN(xe_device_ccs_bytes(xe, size)), PAGE_ALIGN(xe_device_ccs_bytes(xe, size)),
&ccs_it); &ccs_it);
...@@ -681,18 +690,18 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, ...@@ -681,18 +690,18 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
if (!src_is_vram) if (!src_is_vram)
emit_pte(m, bb, src_L0_pt, src_is_vram, &src_it, src_L0, emit_pte(m, bb, src_L0_pt, src_is_vram, &src_it, src_L0,
bo); src_bo);
else else
xe_res_next(&src_it, src_L0); xe_res_next(&src_it, src_L0);
if (!dst_is_vram) if (!dst_is_vram)
emit_pte(m, bb, dst_L0_pt, dst_is_vram, &dst_it, src_L0, emit_pte(m, bb, dst_L0_pt, dst_is_vram, &dst_it, src_L0,
bo); dst_bo);
else else
xe_res_next(&dst_it, src_L0); xe_res_next(&dst_it, src_L0);
if (copy_system_ccs) if (copy_system_ccs)
emit_pte(m, bb, ccs_pt, false, &ccs_it, ccs_size, bo); emit_pte(m, bb, ccs_pt, false, &ccs_it, ccs_size, src_bo);
bb->cs[bb->len++] = MI_BATCH_BUFFER_END; bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
update_idx = bb->len; update_idx = bb->len;
...@@ -714,8 +723,11 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, ...@@ -714,8 +723,11 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
xe_sched_job_add_migrate_flush(job, flush_flags); xe_sched_job_add_migrate_flush(job, flush_flags);
if (!fence) { if (!fence) {
err = job_add_deps(job, bo->ttm.base.resv, err = job_add_deps(job, src_bo->ttm.base.resv,
DMA_RESV_USAGE_BOOKKEEP); DMA_RESV_USAGE_BOOKKEEP);
if (!err && src_bo != dst_bo)
err = job_add_deps(job, dst_bo->ttm.base.resv,
DMA_RESV_USAGE_BOOKKEEP);
if (err) if (err)
goto err_job; goto err_job;
} }
......
...@@ -73,7 +73,8 @@ struct xe_migrate_pt_update { ...@@ -73,7 +73,8 @@ struct xe_migrate_pt_update {
struct xe_migrate *xe_migrate_init(struct xe_gt *gt); struct xe_migrate *xe_migrate_init(struct xe_gt *gt);
struct dma_fence *xe_migrate_copy(struct xe_migrate *m, struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct xe_bo *bo, struct xe_bo *src_bo,
struct xe_bo *dst_bo,
struct ttm_resource *src, struct ttm_resource *src,
struct ttm_resource *dst); struct ttm_resource *dst);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment