Commit fb31517c authored by Michael J. Ruhl's avatar Michael J. Ruhl Committed by Rodrigo Vivi

drm/xe: Rename GPU offset helper to reflect true usage

The _io_offset helper function is returning an offset into the GPU
address space.  Using the CPU address offset (io_) is not correct.

Rename to reflect usage.
Update to use GPU offset information.
Update PT dma_offset to use the helper
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Signed-off-by: default avatarMichael J. Ruhl <michael.j.ruhl@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 2d830096
......@@ -1341,7 +1341,7 @@ struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_gt *gt,
* XXX: This is in the VM bind data path, likely should calculate this once and
* store, with a recalculation if the BO is moved.
*/
uint64_t vram_region_io_offset(struct ttm_resource *res)
uint64_t vram_region_gpu_offset(struct ttm_resource *res)
{
struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
struct xe_gt *gt = mem_type_to_gt(xe, res->mem_type);
......@@ -1349,7 +1349,7 @@ uint64_t vram_region_io_offset(struct ttm_resource *res)
if (res->mem_type == XE_PL_STOLEN)
return xe_ttm_stolen_gpu_offset(xe);
return gt->mem.vram.io_start - xe->mem.vram.io_start;
return xe->mem.vram.base + gt->mem.vram.base;
}
/**
......@@ -1433,7 +1433,7 @@ int xe_bo_pin(struct xe_bo *bo)
XE_BUG_ON(!(place->flags & TTM_PL_FLAG_CONTIGUOUS));
place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE, &vram) -
vram_region_io_offset(bo->ttm.resource)) >> PAGE_SHIFT;
vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT;
place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT);
spin_lock(&xe->pinned.lock);
......@@ -1580,7 +1580,7 @@ dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset,
xe_res_first(bo->ttm.resource, page << PAGE_SHIFT,
page_size, &cur);
return cur.start + offset + vram_region_io_offset(bo->ttm.resource);
return cur.start + offset + vram_region_gpu_offset(bo->ttm.resource);
}
}
......
......@@ -227,7 +227,7 @@ void xe_bo_vunmap(struct xe_bo *bo);
bool mem_type_is_vram(u32 mem_type);
bool xe_bo_is_vram(struct xe_bo *bo);
bool xe_bo_is_stolen(struct xe_bo *bo);
uint64_t vram_region_io_offset(struct ttm_resource *res);
uint64_t vram_region_gpu_offset(struct ttm_resource *res);
bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type);
......
......@@ -419,7 +419,7 @@ static u32 pte_update_size(struct xe_migrate *m,
} else {
/* Offset into identity map. */
*L0_ofs = xe_migrate_vram_ofs(cur->start +
vram_region_io_offset(res));
vram_region_gpu_offset(res));
cmds += cmd_size;
}
......@@ -469,7 +469,7 @@ static void emit_pte(struct xe_migrate *m,
addr |= XE_PTE_PS64;
}
addr += vram_region_io_offset(bo->ttm.resource);
addr += vram_region_gpu_offset(bo->ttm.resource);
addr |= XE_PPGTT_PTE_LM;
}
addr |= PPAT_CACHED | XE_PAGE_PRESENT | XE_PAGE_RW;
......
......@@ -759,13 +759,10 @@ xe_pt_stage_bind(struct xe_gt *gt, struct xe_vma *vma,
int ret;
if (is_vram) {
struct xe_gt *bo_gt = xe_bo_to_gt(bo);
xe_walk.default_pte = XE_PPGTT_PTE_LM;
if (vma && vma->use_atomic_access_pte_bit)
xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
xe_walk.dma_offset = bo_gt->mem.vram.io_start -
gt_to_xe(gt)->mem.vram.io_start;
xe_walk.dma_offset = vram_region_gpu_offset(bo->ttm.resource);
xe_walk.cache = XE_CACHE_WB;
} else {
if (!xe_vma_is_userptr(vma) && bo->flags & XE_BO_SCANOUT_BIT)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment