Commit a21fe5ee authored by Thomas Hellström's avatar Thomas Hellström Committed by Rodrigo Vivi

drm/xe/bo: Rename xe_bo_get_sg() to xe_bo_sg()

Using "get" typically refers to obtaining a refcount, which we don't do
here so rename to xe_bo_sg().
Suggested-by: default avatarOhad Sharabi <osharabi@habana.ai>
Link: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/946Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Reviewed-by: Ohad Sharabi<osharabi@habana.ai>
Link: https://patchwork.freedesktop.org/patch/msgid/20231122110359.4087-3-thomas.hellstrom@linux.intel.comSigned-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 8c54ee8a
...@@ -328,7 +328,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) ...@@ -328,7 +328,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
if (xe_bo_is_vram(pt)) if (xe_bo_is_vram(pt))
xe_res_first(pt->ttm.resource, 0, pt->size, &src_it); xe_res_first(pt->ttm.resource, 0, pt->size, &src_it);
else else
xe_res_first_sg(xe_bo_get_sg(pt), 0, pt->size, &src_it); xe_res_first_sg(xe_bo_sg(pt), 0, pt->size, &src_it);
emit_pte(m, bb, NUM_KERNEL_PDE - 1, xe_bo_is_vram(pt), emit_pte(m, bb, NUM_KERNEL_PDE - 1, xe_bo_is_vram(pt),
&src_it, XE_PAGE_SIZE, pt); &src_it, XE_PAGE_SIZE, pt);
......
...@@ -317,7 +317,7 @@ static int xe_tt_map_sg(struct ttm_tt *tt) ...@@ -317,7 +317,7 @@ static int xe_tt_map_sg(struct ttm_tt *tt)
return 0; return 0;
} }
struct sg_table *xe_bo_get_sg(struct xe_bo *bo) struct sg_table *xe_bo_sg(struct xe_bo *bo)
{ {
struct ttm_tt *tt = bo->ttm.ttm; struct ttm_tt *tt = bo->ttm.ttm;
struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
...@@ -1735,7 +1735,7 @@ dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size) ...@@ -1735,7 +1735,7 @@ dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size)
if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) { if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) {
xe_assert(xe, bo->ttm.ttm); xe_assert(xe, bo->ttm.ttm);
xe_res_first_sg(xe_bo_get_sg(bo), page << PAGE_SHIFT, xe_res_first_sg(xe_bo_sg(bo), page << PAGE_SHIFT,
page_size, &cur); page_size, &cur);
return xe_res_dma(&cur) + offset; return xe_res_dma(&cur) + offset;
} else { } else {
......
...@@ -299,7 +299,7 @@ xe_bo_put_deferred(struct xe_bo *bo, struct llist_head *deferred) ...@@ -299,7 +299,7 @@ xe_bo_put_deferred(struct xe_bo *bo, struct llist_head *deferred)
void xe_bo_put_commit(struct llist_head *deferred); void xe_bo_put_commit(struct llist_head *deferred);
struct sg_table *xe_bo_get_sg(struct xe_bo *bo); struct sg_table *xe_bo_sg(struct xe_bo *bo);
/* /*
* xe_sg_segment_size() - Provides upper limit for sg segment size. * xe_sg_segment_size() - Provides upper limit for sg segment size.
......
...@@ -682,16 +682,16 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, ...@@ -682,16 +682,16 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
if (!src_is_vram) if (!src_is_vram)
xe_res_first_sg(xe_bo_get_sg(src_bo), 0, size, &src_it); xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
else else
xe_res_first(src, 0, size, &src_it); xe_res_first(src, 0, size, &src_it);
if (!dst_is_vram) if (!dst_is_vram)
xe_res_first_sg(xe_bo_get_sg(dst_bo), 0, size, &dst_it); xe_res_first_sg(xe_bo_sg(dst_bo), 0, size, &dst_it);
else else
xe_res_first(dst, 0, size, &dst_it); xe_res_first(dst, 0, size, &dst_it);
if (copy_system_ccs) if (copy_system_ccs)
xe_res_first_sg(xe_bo_get_sg(src_bo), xe_bo_ccs_pages_start(src_bo), xe_res_first_sg(xe_bo_sg(src_bo), xe_bo_ccs_pages_start(src_bo),
PAGE_ALIGN(xe_device_ccs_bytes(xe, size)), PAGE_ALIGN(xe_device_ccs_bytes(xe, size)),
&ccs_it); &ccs_it);
...@@ -941,7 +941,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m, ...@@ -941,7 +941,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
int pass = 0; int pass = 0;
if (!clear_vram) if (!clear_vram)
xe_res_first_sg(xe_bo_get_sg(bo), 0, bo->size, &src_it); xe_res_first_sg(xe_bo_sg(bo), 0, bo->size, &src_it);
else else
xe_res_first(src, 0, bo->size, &src_it); xe_res_first(src, 0, bo->size, &src_it);
......
...@@ -677,7 +677,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma, ...@@ -677,7 +677,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
xe_res_first(bo->ttm.resource, xe_vma_bo_offset(vma), xe_res_first(bo->ttm.resource, xe_vma_bo_offset(vma),
xe_vma_size(vma), &curs); xe_vma_size(vma), &curs);
else else
xe_res_first_sg(xe_bo_get_sg(bo), xe_vma_bo_offset(vma), xe_res_first_sg(xe_bo_sg(bo), xe_vma_bo_offset(vma),
xe_vma_size(vma), &curs); xe_vma_size(vma), &curs);
} else { } else {
curs.size = xe_vma_size(vma); curs.size = xe_vma_size(vma);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment