Commit 876611c2 authored by Matt Roper's avatar Matt Roper Committed by Rodrigo Vivi

drm/xe: Memory allocations are tile-based, not GT-based

Since memory and address spaces are a tile concept rather than a GT
concept, we need to plumb tile-based handling through lots of
memory-related code.

Note that one remaining shortcoming here that will need to be addressed
before media GT support can be re-enabled is that although the address
space is shared between a tile's GTs, each GT caches the PTEs
independently in their own TLB and thus TLB invalidation should be
handled at the GT level.

v2:
 - Fix kunit test build.
Reviewed-by: default avatarLucas De Marchi <lucas.demarchi@intel.com>
Link: https://lore.kernel.org/r/20230601215244.678611-13-matthew.d.roper@intel.comSigned-off-by: default avatarMatt Roper <matthew.d.roper@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent ebd288cb
......@@ -173,7 +173,7 @@ static int evict_test_run_gt(struct xe_device *xe, struct xe_gt *gt, struct kuni
{
struct xe_bo *bo, *external;
unsigned int bo_flags = XE_BO_CREATE_USER_BIT |
XE_BO_CREATE_VRAM_IF_DGFX(gt);
XE_BO_CREATE_VRAM_IF_DGFX(gt_to_tile(gt));
struct xe_vm *vm = xe_migrate_get_vm(xe_device_get_root_tile(xe)->primary_gt.migrate);
struct ww_acquire_ctx ww;
int err, i;
......
......@@ -63,7 +63,7 @@ static int run_sanity_job(struct xe_migrate *m, struct xe_device *xe,
static void
sanity_populate_cb(struct xe_migrate_pt_update *pt_update,
struct xe_gt *gt, struct iosys_map *map, void *dst,
struct xe_tile *tile, struct iosys_map *map, void *dst,
u32 qword_ofs, u32 num_qwords,
const struct xe_vm_pgtable_update *update)
{
......@@ -76,7 +76,7 @@ sanity_populate_cb(struct xe_migrate_pt_update *pt_update,
for (i = 0; i < num_qwords; i++) {
value = (qword_ofs + i - update->ofs) * 0x1111111111111111ULL;
if (map)
xe_map_wr(gt_to_xe(gt), map, (qword_ofs + i) *
xe_map_wr(tile_to_xe(tile), map, (qword_ofs + i) *
sizeof(u64), u64, value);
else
ptr[i] = value;
......@@ -108,7 +108,7 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
const char *str = big ? "Copying big bo" : "Copying small bo";
int err;
struct xe_bo *sysmem = xe_bo_create_locked(xe, m->gt, NULL,
struct xe_bo *sysmem = xe_bo_create_locked(xe, gt_to_tile(m->gt), NULL,
bo->size,
ttm_bo_type_kernel,
XE_BO_CREATE_SYSTEM_BIT);
......@@ -240,6 +240,7 @@ static void test_pt_update(struct xe_migrate *m, struct xe_bo *pt,
static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
{
struct xe_gt *gt = m->gt;
struct xe_tile *tile = gt_to_tile(m->gt);
struct xe_device *xe = gt_to_xe(gt);
struct xe_bo *pt, *bo = m->pt_bo, *big, *tiny;
struct xe_res_cursor src_it;
......@@ -256,18 +257,18 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
return;
}
big = xe_bo_create_pin_map(xe, m->gt, m->eng->vm, SZ_4M,
big = xe_bo_create_pin_map(xe, tile, m->eng->vm, SZ_4M,
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(m->gt) |
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_PINNED_BIT);
if (IS_ERR(big)) {
KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big));
goto vunmap;
}
pt = xe_bo_create_pin_map(xe, m->gt, m->eng->vm, XE_PAGE_SIZE,
pt = xe_bo_create_pin_map(xe, tile, m->eng->vm, XE_PAGE_SIZE,
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(m->gt) |
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_PINNED_BIT);
if (IS_ERR(pt)) {
KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
......@@ -275,10 +276,10 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
goto free_big;
}
tiny = xe_bo_create_pin_map(xe, m->gt, m->eng->vm,
tiny = xe_bo_create_pin_map(xe, tile, m->eng->vm,
2 * SZ_4K,
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(m->gt) |
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_PINNED_BIT);
if (IS_ERR(tiny)) {
KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
......@@ -286,7 +287,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
goto free_pt;
}
bb = xe_bb_new(m->gt, 32, xe->info.supports_usm);
bb = xe_bb_new(gt, 32, xe->info.supports_usm);
if (IS_ERR(bb)) {
KUNIT_FAIL(test, "Failed to create batchbuffer: %li\n",
PTR_ERR(bb));
......
......@@ -30,6 +30,7 @@ static int bb_prefetch(struct xe_gt *gt)
struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 dwords, bool usm)
{
struct xe_tile *tile = gt_to_tile(gt);
struct xe_bb *bb = kmalloc(sizeof(*bb), GFP_KERNEL);
int err;
......@@ -42,7 +43,7 @@ struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 dwords, bool usm)
* space to accomodate the platform-specific hardware prefetch
* requirements.
*/
bb->bo = xe_sa_bo_new(!usm ? gt->kernel_bb_pool : gt->usm.bb_pool,
bb->bo = xe_sa_bo_new(!usm ? tile->mem.kernel_bb_pool : gt->usm.bb_pool,
4 * (dwords + 1) + bb_prefetch(gt));
if (IS_ERR(bb->bo)) {
err = PTR_ERR(bb->bo);
......
......@@ -458,7 +458,7 @@ static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo,
}
xe_vm_assert_held(vm);
if (list_empty(&vma->rebind_link) && vma->gt_present)
if (list_empty(&vma->rebind_link) && vma->tile_present)
list_add_tail(&vma->rebind_link, &vm->rebind_list);
if (vm_resv_locked)
......@@ -565,7 +565,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
struct ttm_resource *old_mem = ttm_bo->resource;
struct ttm_tt *ttm = ttm_bo->ttm;
struct xe_gt *gt = NULL;
struct xe_tile *tile = NULL;
struct dma_fence *fence;
bool move_lacks_source;
bool needs_clear;
......@@ -635,15 +635,15 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
goto out;
}
if (bo->gt)
gt = bo->gt;
if (bo->tile)
tile = bo->tile;
else if (resource_is_vram(new_mem))
gt = &mem_type_to_tile(xe, new_mem->mem_type)->primary_gt;
tile = mem_type_to_tile(xe, new_mem->mem_type);
else if (resource_is_vram(old_mem))
gt = &mem_type_to_tile(xe, old_mem->mem_type)->primary_gt;
tile = mem_type_to_tile(xe, old_mem->mem_type);
XE_BUG_ON(!gt);
XE_BUG_ON(!gt->migrate);
XE_BUG_ON(!tile);
XE_BUG_ON(!tile->primary_gt.migrate);
trace_xe_bo_move(bo);
xe_device_mem_access_get(xe);
......@@ -664,7 +664,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
/* Create a new VMAP once kernel BO back in VRAM */
if (!ret && resource_is_vram(new_mem)) {
void *new_addr = gt_to_tile(gt)->mem.vram.mapping +
void *new_addr = tile->mem.vram.mapping +
(new_mem->start << PAGE_SHIFT);
if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) {
......@@ -681,9 +681,10 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
}
} else {
if (move_lacks_source)
fence = xe_migrate_clear(gt->migrate, bo, new_mem);
fence = xe_migrate_clear(tile->primary_gt.migrate, bo, new_mem);
else
fence = xe_migrate_copy(gt->migrate, bo, bo, old_mem, new_mem);
fence = xe_migrate_copy(tile->primary_gt.migrate,
bo, bo, old_mem, new_mem);
if (IS_ERR(fence)) {
ret = PTR_ERR(fence);
xe_device_mem_access_put(xe);
......@@ -964,7 +965,7 @@ static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
WARN_ON(!list_empty(&bo->vmas));
if (bo->ggtt_node.size)
xe_ggtt_remove_bo(gt_to_tile(bo->gt)->mem.ggtt, bo);
xe_ggtt_remove_bo(bo->tile->mem.ggtt, bo);
if (bo->vm && xe_bo_is_user(bo))
xe_vm_put(bo->vm);
......@@ -1086,7 +1087,7 @@ void xe_bo_free(struct xe_bo *bo)
}
struct xe_bo *__xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
struct xe_gt *gt, struct dma_resv *resv,
struct xe_tile *tile, struct dma_resv *resv,
size_t size, enum ttm_bo_type type,
u32 flags)
{
......@@ -1099,7 +1100,7 @@ struct xe_bo *__xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
int err;
/* Only kernel objects should set GT */
XE_BUG_ON(gt && type != ttm_bo_type_kernel);
XE_BUG_ON(tile && type != ttm_bo_type_kernel);
if (XE_WARN_ON(!size))
return ERR_PTR(-EINVAL);
......@@ -1120,7 +1121,7 @@ struct xe_bo *__xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
alignment = SZ_4K >> PAGE_SHIFT;
}
bo->gt = gt;
bo->tile = tile;
bo->size = size;
bo->flags = flags;
bo->ttm.base.funcs = &xe_gem_object_funcs;
......@@ -1202,7 +1203,7 @@ static int __xe_bo_fixed_placement(struct xe_device *xe,
struct xe_bo *
xe_bo_create_locked_range(struct xe_device *xe,
struct xe_gt *gt, struct xe_vm *vm,
struct xe_tile *tile, struct xe_vm *vm,
size_t size, u64 start, u64 end,
enum ttm_bo_type type, u32 flags)
{
......@@ -1225,7 +1226,7 @@ xe_bo_create_locked_range(struct xe_device *xe,
}
}
bo = __xe_bo_create_locked(xe, bo, gt, vm ? &vm->resv : NULL, size,
bo = __xe_bo_create_locked(xe, bo, tile, vm ? &vm->resv : NULL, size,
type, flags);
if (IS_ERR(bo))
return bo;
......@@ -1235,16 +1236,16 @@ xe_bo_create_locked_range(struct xe_device *xe,
bo->vm = vm;
if (bo->flags & XE_BO_CREATE_GGTT_BIT) {
if (!gt && flags & XE_BO_CREATE_STOLEN_BIT)
gt = xe_device_get_gt(xe, 0);
if (!tile && flags & XE_BO_CREATE_STOLEN_BIT)
tile = xe_device_get_root_tile(xe);
XE_BUG_ON(!gt);
XE_BUG_ON(!tile);
if (flags & XE_BO_CREATE_STOLEN_BIT &&
flags & XE_BO_FIXED_PLACEMENT_BIT) {
err = xe_ggtt_insert_bo_at(gt_to_tile(gt)->mem.ggtt, bo, start);
err = xe_ggtt_insert_bo_at(tile->mem.ggtt, bo, start);
} else {
err = xe_ggtt_insert_bo(gt_to_tile(gt)->mem.ggtt, bo);
err = xe_ggtt_insert_bo(tile->mem.ggtt, bo);
}
if (err)
goto err_unlock_put_bo;
......@@ -1258,18 +1259,18 @@ xe_bo_create_locked_range(struct xe_device *xe,
return ERR_PTR(err);
}
struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_gt *gt,
struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size,
enum ttm_bo_type type, u32 flags)
{
return xe_bo_create_locked_range(xe, gt, vm, size, 0, ~0ULL, type, flags);
return xe_bo_create_locked_range(xe, tile, vm, size, 0, ~0ULL, type, flags);
}
struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_gt *gt,
struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size,
enum ttm_bo_type type, u32 flags)
{
struct xe_bo *bo = xe_bo_create_locked(xe, gt, vm, size, type, flags);
struct xe_bo *bo = xe_bo_create_locked(xe, tile, vm, size, type, flags);
if (!IS_ERR(bo))
xe_bo_unlock_vm_held(bo);
......@@ -1277,7 +1278,7 @@ struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_gt *gt,
return bo;
}
struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_gt *gt,
struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm,
size_t size, u64 offset,
enum ttm_bo_type type, u32 flags)
......@@ -1291,7 +1292,7 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_gt *gt,
xe_ttm_stolen_cpu_access_needs_ggtt(xe))
flags |= XE_BO_CREATE_GGTT_BIT;
bo = xe_bo_create_locked_range(xe, gt, vm, size, start, end, type, flags);
bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type, flags);
if (IS_ERR(bo))
return bo;
......@@ -1315,18 +1316,18 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_gt *gt,
return ERR_PTR(err);
}
struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_gt *gt,
struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size,
enum ttm_bo_type type, u32 flags)
{
return xe_bo_create_pin_map_at(xe, gt, vm, size, ~0ull, type, flags);
return xe_bo_create_pin_map_at(xe, tile, vm, size, ~0ull, type, flags);
}
struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_gt *gt,
struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
const void *data, size_t size,
enum ttm_bo_type type, u32 flags)
{
struct xe_bo *bo = xe_bo_create_pin_map(xe, gt, NULL,
struct xe_bo *bo = xe_bo_create_pin_map(xe, tile, NULL,
ALIGN(size, PAGE_SIZE),
type, flags);
if (IS_ERR(bo))
......@@ -1957,7 +1958,7 @@ int xe_bo_dumb_create(struct drm_file *file_priv,
page_size);
bo = xe_bo_create(xe, NULL, NULL, args->size, ttm_bo_type_device,
XE_BO_CREATE_VRAM_IF_DGFX(to_gt(xe)) |
XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
XE_BO_CREATE_USER_BIT | XE_BO_SCANOUT_BIT);
if (IS_ERR(bo))
return PTR_ERR(bo);
......
......@@ -21,8 +21,8 @@
XE_BO_CREATE_VRAM1_BIT)
/* -- */
#define XE_BO_CREATE_STOLEN_BIT BIT(4)
#define XE_BO_CREATE_VRAM_IF_DGFX(gt) \
(IS_DGFX(gt_to_xe(gt)) ? XE_BO_CREATE_VRAM0_BIT << gt_to_tile(gt)->id : \
#define XE_BO_CREATE_VRAM_IF_DGFX(tile) \
(IS_DGFX(tile_to_xe(tile)) ? XE_BO_CREATE_VRAM0_BIT << (tile)->id : \
XE_BO_CREATE_SYSTEM_BIT)
#define XE_BO_CREATE_GGTT_BIT BIT(5)
#define XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT BIT(6)
......@@ -81,27 +81,27 @@ struct xe_bo *xe_bo_alloc(void);
void xe_bo_free(struct xe_bo *bo);
struct xe_bo *__xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
struct xe_gt *gt, struct dma_resv *resv,
struct xe_tile *tile, struct dma_resv *resv,
size_t size, enum ttm_bo_type type,
u32 flags);
struct xe_bo *
xe_bo_create_locked_range(struct xe_device *xe,
struct xe_gt *gt, struct xe_vm *vm,
struct xe_tile *tile, struct xe_vm *vm,
size_t size, u64 start, u64 end,
enum ttm_bo_type type, u32 flags);
struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_gt *gt,
struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size,
enum ttm_bo_type type, u32 flags);
struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_gt *gt,
struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size,
enum ttm_bo_type type, u32 flags);
struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_gt *gt,
struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size,
enum ttm_bo_type type, u32 flags);
struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_gt *gt,
struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size, u64 offset,
enum ttm_bo_type type, u32 flags);
struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_gt *gt,
struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
const void *data, size_t size,
enum ttm_bo_type type, u32 flags);
......
......@@ -149,7 +149,7 @@ int xe_bo_restore_kernel(struct xe_device *xe)
}
if (bo->flags & XE_BO_CREATE_GGTT_BIT) {
struct xe_tile *tile = gt_to_tile(bo->gt);
struct xe_tile *tile = bo->tile;
mutex_lock(&tile->mem.ggtt->lock);
xe_ggtt_map_bo(tile->mem.ggtt, bo);
......
......@@ -29,8 +29,8 @@ struct xe_bo {
u32 flags;
/** @vm: VM this BO is attached to, for extobj this will be NULL */
struct xe_vm *vm;
/** @gt: GT this BO is attached to (kernel BO only) */
struct xe_gt *gt;
/** @tile: Tile this BO is attached to (kernel BO only) */
struct xe_tile *tile;
/** @vmas: List of VMAs for this BO */
struct list_head vmas;
/** @placements: valid placements for this BO */
......
......@@ -128,6 +128,13 @@ struct xe_tile {
/** @ggtt: Global graphics translation table */
struct xe_ggtt *ggtt;
/**
* @kernel_bb_pool: Pool from which batchbuffers are allocated.
*
* Media GT shares a pool with its primary GT.
*/
struct xe_sa_manager *kernel_bb_pool;
} mem;
};
......
......@@ -151,7 +151,6 @@ static void xe_ggtt_initial_clear(struct xe_ggtt *ggtt)
int xe_ggtt_init(struct xe_ggtt *ggtt)
{
struct xe_device *xe = tile_to_xe(ggtt->tile);
struct xe_gt *gt = &ggtt->tile->primary_gt;
unsigned int flags;
int err;
......@@ -164,9 +163,9 @@ int xe_ggtt_init(struct xe_ggtt *ggtt)
if (ggtt->flags & XE_GGTT_FLAGS_64K)
flags |= XE_BO_CREATE_SYSTEM_BIT;
else
flags |= XE_BO_CREATE_VRAM_IF_DGFX(gt);
flags |= XE_BO_CREATE_VRAM_IF_DGFX(ggtt->tile);
ggtt->scratch = xe_bo_create_pin_map(xe, gt, NULL, XE_PAGE_SIZE,
ggtt->scratch = xe_bo_create_pin_map(xe, ggtt->tile, NULL, XE_PAGE_SIZE,
ttm_bo_type_kernel,
flags);
......
......@@ -95,7 +95,7 @@ static int emit_nop_job(struct xe_gt *gt, struct xe_engine *e)
if (IS_ERR(bb))
return PTR_ERR(bb);
batch_ofs = xe_bo_ggtt_addr(gt->kernel_bb_pool->bo);
batch_ofs = xe_bo_ggtt_addr(gt_to_tile(gt)->mem.kernel_bb_pool->bo);
job = xe_bb_create_wa_job(e, bb, batch_ofs);
if (IS_ERR(job)) {
xe_bb_free(bb, NULL);
......@@ -144,7 +144,7 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_engine *e)
}
}
batch_ofs = xe_bo_ggtt_addr(gt->kernel_bb_pool->bo);
batch_ofs = xe_bo_ggtt_addr(gt_to_tile(gt)->mem.kernel_bb_pool->bo);
job = xe_bb_create_wa_job(e, bb, batch_ofs);
if (IS_ERR(job)) {
xe_bb_free(bb, NULL);
......@@ -370,31 +370,16 @@ static int all_fw_domain_init(struct xe_gt *gt)
goto err_force_wake;
if (!xe_gt_is_media_type(gt)) {
gt->kernel_bb_pool = xe_sa_bo_manager_init(gt, SZ_1M, 16);
if (IS_ERR(gt->kernel_bb_pool)) {
err = PTR_ERR(gt->kernel_bb_pool);
goto err_force_wake;
}
/*
* USM has its only SA pool to non-block behind user operations
*/
if (gt_to_xe(gt)->info.supports_usm) {
gt->usm.bb_pool = xe_sa_bo_manager_init(gt, SZ_1M, 16);
gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt), SZ_1M, 16);
if (IS_ERR(gt->usm.bb_pool)) {
err = PTR_ERR(gt->usm.bb_pool);
goto err_force_wake;
}
}
} else {
struct xe_gt *full_gt = xe_find_full_gt(gt);
/*
* Media GT's kernel_bb_pool is only used while recording the
* default context during GT init. The USM pool should never
* be needed on the media GT.
*/
gt->kernel_bb_pool = full_gt->kernel_bb_pool;
}
if (!xe_gt_is_media_type(gt)) {
......
......@@ -64,11 +64,11 @@ static int force_reset(struct seq_file *m, void *data)
static int sa_info(struct seq_file *m, void *data)
{
struct xe_gt *gt = node_to_gt(m->private);
struct xe_tile *tile = gt_to_tile(node_to_gt(m->private));
struct drm_printer p = drm_seq_file_printer(m);
drm_suballoc_dump_debug_info(&gt->kernel_bb_pool->base, &p,
gt->kernel_bb_pool->gpu_addr);
drm_suballoc_dump_debug_info(&tile->mem.kernel_bb_pool->base, &p,
tile->mem.kernel_bb_pool->gpu_addr);
return 0;
}
......
......@@ -69,10 +69,10 @@ static bool access_is_atomic(enum access_type access_type)
return access_type == ACCESS_TYPE_ATOMIC;
}
static bool vma_is_valid(struct xe_gt *gt, struct xe_vma *vma)
static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
{
return BIT(gt->info.id) & vma->gt_present &&
!(BIT(gt->info.id) & vma->usm.gt_invalidated);
return BIT(tile->id) & vma->tile_present &&
!(BIT(tile->id) & vma->usm.tile_invalidated);
}
static bool vma_matches(struct xe_vma *vma, struct xe_vma *lookup)
......@@ -152,7 +152,7 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
atomic = access_is_atomic(pf->access_type);
/* Check if VMA is valid */
if (vma_is_valid(gt, vma) && !atomic)
if (vma_is_valid(tile, vma) && !atomic)
goto unlock_vm;
/* TODO: Validate fault */
......@@ -208,8 +208,8 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
/* Bind VMA only to the GT that has faulted */
trace_xe_vma_pf_bind(vma);
fence = __xe_pt_bind_vma(gt, vma, xe_gt_migrate_engine(gt), NULL, 0,
vma->gt_present & BIT(gt->info.id));
fence = __xe_pt_bind_vma(tile, vma, xe_gt_migrate_engine(gt), NULL, 0,
vma->tile_present & BIT(tile->id));
if (IS_ERR(fence)) {
ret = PTR_ERR(fence);
goto unlock_dma_resv;
......@@ -225,7 +225,7 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
if (xe_vma_is_userptr(vma))
ret = xe_vma_userptr_check_repin(vma);
vma->usm.gt_invalidated &= ~BIT(gt->info.id);
vma->usm.tile_invalidated &= ~BIT(tile->id);
unlock_dma_resv:
if (only_needs_bo_lock(bo))
......
......@@ -278,13 +278,6 @@ struct xe_gt {
/** @hw_engines: hardware engines on the GT */
struct xe_hw_engine hw_engines[XE_NUM_HW_ENGINES];
/**
* @kernel_bb_pool: Pool from which batchbuffers are allocated.
*
* Media GT shares a pool with its primary GT.
*/
struct xe_sa_manager *kernel_bb_pool;
/** @migrate: Migration helper for vram blits and clearing */
struct xe_migrate *migrate;
......
......@@ -273,16 +273,17 @@ int xe_guc_ads_init(struct xe_guc_ads *ads)
{
struct xe_device *xe = ads_to_xe(ads);
struct xe_gt *gt = ads_to_gt(ads);
struct xe_tile *tile = gt_to_tile(gt);
struct xe_bo *bo;
int err;
ads->golden_lrc_size = calculate_golden_lrc_size(ads);
ads->regset_size = calculate_regset_size(gt);
bo = xe_bo_create_pin_map(xe, gt, NULL, guc_ads_size(ads) +
bo = xe_bo_create_pin_map(xe, tile, NULL, guc_ads_size(ads) +
MAX_GOLDEN_LRC_SIZE,
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(gt) |
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_GGTT_BIT);
if (IS_ERR(bo))
return PTR_ERR(bo);
......
......@@ -130,6 +130,7 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
{
struct xe_device *xe = ct_to_xe(ct);
struct xe_gt *gt = ct_to_gt(ct);
struct xe_tile *tile = gt_to_tile(gt);
struct xe_bo *bo;
int err;
......@@ -145,9 +146,9 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
primelockdep(ct);
bo = xe_bo_create_pin_map(xe, gt, NULL, guc_ct_size(),
bo = xe_bo_create_pin_map(xe, tile, NULL, guc_ct_size(),
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(gt) |
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_GGTT_BIT);
if (IS_ERR(bo))
return PTR_ERR(bo);
......
......@@ -70,6 +70,7 @@ int xe_guc_hwconfig_init(struct xe_guc *guc)
{
struct xe_device *xe = guc_to_xe(guc);
struct xe_gt *gt = guc_to_gt(guc);
struct xe_tile *tile = gt_to_tile(gt);
struct xe_bo *bo;
u32 size;
int err;
......@@ -94,9 +95,9 @@ int xe_guc_hwconfig_init(struct xe_guc *guc)
if (!size)
return -EINVAL;
bo = xe_bo_create_pin_map(xe, gt, NULL, PAGE_ALIGN(size),
bo = xe_bo_create_pin_map(xe, tile, NULL, PAGE_ALIGN(size),
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(gt) |
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_GGTT_BIT);
if (IS_ERR(bo))
return PTR_ERR(bo);
......
......@@ -87,13 +87,13 @@ static void guc_log_fini(struct drm_device *drm, void *arg)
int xe_guc_log_init(struct xe_guc_log *log)
{
struct xe_device *xe = log_to_xe(log);
struct xe_gt *gt = log_to_gt(log);
struct xe_tile *tile = gt_to_tile(log_to_gt(log));
struct xe_bo *bo;
int err;
bo = xe_bo_create_pin_map(xe, gt, NULL, guc_log_size(),
bo = xe_bo_create_pin_map(xe, tile, NULL, guc_log_size(),
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(gt) |
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_GGTT_BIT);
if (IS_ERR(bo))
return PTR_ERR(bo);
......
......@@ -888,6 +888,7 @@ static void pc_fini(struct drm_device *drm, void *arg)
int xe_guc_pc_init(struct xe_guc_pc *pc)
{
struct xe_gt *gt = pc_to_gt(pc);
struct xe_tile *tile = gt_to_tile(gt);
struct xe_device *xe = gt_to_xe(gt);
struct xe_bo *bo;
u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
......@@ -895,9 +896,9 @@ int xe_guc_pc_init(struct xe_guc_pc *pc)
mutex_init(&pc->freq_lock);
bo = xe_bo_create_pin_map(xe, gt, NULL, size,
bo = xe_bo_create_pin_map(xe, tile, NULL, size,
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(gt) |
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_GGTT_BIT);
if (IS_ERR(bo))
......
......@@ -373,6 +373,7 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
enum xe_hw_engine_id id)
{
struct xe_device *xe = gt_to_xe(gt);
struct xe_tile *tile = gt_to_tile(gt);
int err;
XE_BUG_ON(id >= ARRAY_SIZE(engine_infos) || !engine_infos[id].name);
......@@ -381,8 +382,8 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
xe_reg_sr_apply_whitelist(&hwe->reg_whitelist, hwe->mmio_base, gt);
hwe->hwsp = xe_bo_create_pin_map(xe, gt, NULL, SZ_4K, ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(gt) |
hwe->hwsp = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K, ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_GGTT_BIT);
if (IS_ERR(hwe->hwsp)) {
err = PTR_ERR(hwe->hwsp);
......
......@@ -592,7 +592,7 @@ static void *empty_lrc_data(struct xe_hw_engine *hwe)
static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm)
{
u64 desc = xe_vm_pdp4_descriptor(vm, lrc->full_gt);
u64 desc = xe_vm_pdp4_descriptor(vm, lrc->tile);
xe_lrc_write_ctx_reg(lrc, CTX_PDP0_UDW, upper_32_bits(desc));
xe_lrc_write_ctx_reg(lrc, CTX_PDP0_LDW, lower_32_bits(desc));
......@@ -607,6 +607,7 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
struct xe_engine *e, struct xe_vm *vm, u32 ring_size)
{
struct xe_gt *gt = hwe->gt;
struct xe_tile *tile = gt_to_tile(gt);
struct xe_device *xe = gt_to_xe(gt);
struct iosys_map map;
void *init_data = NULL;
......@@ -619,19 +620,15 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
* FIXME: Perma-pinning LRC as we don't yet support moving GGTT address
* via VM bind calls.
*/
lrc->bo = xe_bo_create_pin_map(xe, hwe->gt, vm,
lrc->bo = xe_bo_create_pin_map(xe, tile, vm,
ring_size + xe_lrc_size(xe, hwe->class),
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(hwe->gt) |
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_GGTT_BIT);
if (IS_ERR(lrc->bo))
return PTR_ERR(lrc->bo);
if (xe_gt_is_media_type(hwe->gt))
lrc->full_gt = xe_find_full_gt(hwe->gt);
else
lrc->full_gt = hwe->gt;
lrc->tile = gt_to_tile(hwe->gt);
lrc->ring.size = ring_size;
lrc->ring.tail = 0;
......
......@@ -20,8 +20,8 @@ struct xe_lrc {
*/
struct xe_bo *bo;
/** @full_gt: full GT which this LRC belongs to */
struct xe_gt *full_gt;
/** @tile: tile which this LRC belongs to */
struct xe_tile *tile;
/** @flags: LRC flags */
u32 flags;
......
......@@ -129,6 +129,7 @@ static u64 xe_migrate_vram_ofs(u64 addr)
static int xe_migrate_create_cleared_bo(struct xe_migrate *m, struct xe_vm *vm)
{
struct xe_gt *gt = m->gt;
struct xe_tile *tile = gt_to_tile(gt);
struct xe_device *xe = vm->xe;
size_t cleared_size;
u64 vram_addr;
......@@ -139,9 +140,9 @@ static int xe_migrate_create_cleared_bo(struct xe_migrate *m, struct xe_vm *vm)
cleared_size = xe_device_ccs_bytes(xe, MAX_PREEMPTDISABLE_TRANSFER);
cleared_size = PAGE_ALIGN(cleared_size);
m->cleared_bo = xe_bo_create_pin_map(xe, gt, vm, cleared_size,
m->cleared_bo = xe_bo_create_pin_map(xe, tile, vm, cleared_size,
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(gt) |
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_PINNED_BIT);
if (IS_ERR(m->cleared_bo))
return PTR_ERR(m->cleared_bo);
......@@ -161,7 +162,8 @@ static int xe_migrate_prepare_vm(struct xe_gt *gt, struct xe_migrate *m,
u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level;
u32 map_ofs, level, i;
struct xe_device *xe = gt_to_xe(m->gt);
struct xe_bo *bo, *batch = gt->kernel_bb_pool->bo;
struct xe_tile *tile = gt_to_tile(m->gt);
struct xe_bo *bo, *batch = tile->mem.kernel_bb_pool->bo;
u64 entry;
int ret;
......@@ -175,10 +177,10 @@ static int xe_migrate_prepare_vm(struct xe_gt *gt, struct xe_migrate *m,
/* Need to be sure everything fits in the first PT, or create more */
XE_BUG_ON(m->batch_base_ofs + batch->size >= SZ_2M);
bo = xe_bo_create_pin_map(vm->xe, m->gt, vm,
bo = xe_bo_create_pin_map(vm->xe, tile, vm,
num_entries * XE_PAGE_SIZE,
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(m->gt) |
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_PINNED_BIT);
if (IS_ERR(bo))
return PTR_ERR(bo);
......@@ -984,7 +986,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
return fence;
}
static void write_pgtable(struct xe_gt *gt, struct xe_bb *bb, u64 ppgtt_ofs,
static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
const struct xe_vm_pgtable_update *update,
struct xe_migrate_pt_update *pt_update)
{
......@@ -1023,7 +1025,7 @@ static void write_pgtable(struct xe_gt *gt, struct xe_bb *bb, u64 ppgtt_ofs,
(chunk * 2 + 1);
bb->cs[bb->len++] = lower_32_bits(addr);
bb->cs[bb->len++] = upper_32_bits(addr);
ops->populate(pt_update, gt, NULL, bb->cs + bb->len, ofs, chunk,
ops->populate(pt_update, tile, NULL, bb->cs + bb->len, ofs, chunk,
update);
bb->len += chunk * 2;
......@@ -1081,7 +1083,7 @@ xe_migrate_update_pgtables_cpu(struct xe_migrate *m,
for (i = 0; i < num_updates; i++) {
const struct xe_vm_pgtable_update *update = &updates[i];
ops->populate(pt_update, m->gt, &update->pt_bo->vmap, NULL,
ops->populate(pt_update, gt_to_tile(m->gt), &update->pt_bo->vmap, NULL,
update->ofs, update->qwords, update);
}
......@@ -1149,6 +1151,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
{
const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
struct xe_gt *gt = m->gt;
struct xe_tile *tile = gt_to_tile(m->gt);
struct xe_device *xe = gt_to_xe(gt);
struct xe_sched_job *job;
struct dma_fence *fence;
......@@ -1243,7 +1246,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
addr = xe_migrate_vm_addr(ppgtt_ofs, 0) +
(page_ofs / sizeof(u64)) * XE_PAGE_SIZE;
for (i = 0; i < num_updates; i++)
write_pgtable(m->gt, bb, addr + i * XE_PAGE_SIZE,
write_pgtable(tile, bb, addr + i * XE_PAGE_SIZE,
&updates[i], pt_update);
} else {
/* phys pages, no preamble required */
......@@ -1253,7 +1256,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
/* Preemption is enabled again by the ring ops. */
emit_arb_clear(bb);
for (i = 0; i < num_updates; i++)
write_pgtable(m->gt, bb, 0, &updates[i], pt_update);
write_pgtable(tile, bb, 0, &updates[i], pt_update);
}
if (!eng)
......
......@@ -19,6 +19,7 @@ struct xe_migrate;
struct xe_migrate_pt_update;
struct xe_sync_entry;
struct xe_pt;
struct xe_tile;
struct xe_vm;
struct xe_vm_pgtable_update;
struct xe_vma;
......@@ -31,7 +32,7 @@ struct xe_migrate_pt_update_ops {
/**
* @populate: Populate a command buffer or page-table with ptes.
* @pt_update: Embeddable callback argument.
* @gt: The gt for the current operation.
* @tile: The tile for the current operation.
* @map: struct iosys_map into the memory to be populated.
* @pos: If @map is NULL, map into the memory to be populated.
* @ofs: qword offset into @map, unused if @map is NULL.
......@@ -43,7 +44,7 @@ struct xe_migrate_pt_update_ops {
* page-tables with PTEs.
*/
void (*populate)(struct xe_migrate_pt_update *pt_update,
struct xe_gt *gt, struct iosys_map *map,
struct xe_tile *tile, struct iosys_map *map,
void *pos, u32 ofs, u32 num_qwords,
const struct xe_vm_pgtable_update *update);
......
This diff is collapsed.
......@@ -13,8 +13,8 @@ struct dma_fence;
struct xe_bo;
struct xe_device;
struct xe_engine;
struct xe_gt;
struct xe_sync_entry;
struct xe_tile;
struct xe_vm;
struct xe_vma;
......@@ -23,27 +23,27 @@ struct xe_vma;
unsigned int xe_pt_shift(unsigned int level);
struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_gt *gt,
struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
unsigned int level);
int xe_pt_create_scratch(struct xe_device *xe, struct xe_gt *gt,
int xe_pt_create_scratch(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm);
void xe_pt_populate_empty(struct xe_gt *gt, struct xe_vm *vm,
void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm,
struct xe_pt *pt);
void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred);
struct dma_fence *
__xe_pt_bind_vma(struct xe_gt *gt, struct xe_vma *vma, struct xe_engine *e,
__xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e,
struct xe_sync_entry *syncs, u32 num_syncs,
bool rebind);
struct dma_fence *
__xe_pt_unbind_vma(struct xe_gt *gt, struct xe_vma *vma, struct xe_engine *e,
__xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_engine *e,
struct xe_sync_entry *syncs, u32 num_syncs);
bool xe_pt_zap_ptes(struct xe_gt *gt, struct xe_vma *vma);
bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma);
u64 gen8_pde_encode(struct xe_bo *bo, u64 bo_offset,
const enum xe_cache_level level);
......
......@@ -11,7 +11,6 @@
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_map.h"
static void xe_sa_bo_manager_fini(struct drm_device *drm, void *arg)
......@@ -33,14 +32,14 @@ static void xe_sa_bo_manager_fini(struct drm_device *drm, void *arg)
sa_manager->bo = NULL;
}
struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_gt *gt, u32 size, u32 align)
struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 align)
{
struct xe_device *xe = gt_to_xe(gt);
struct xe_device *xe = tile_to_xe(tile);
u32 managed_size = size - SZ_4K;
struct xe_bo *bo;
int ret;
struct xe_sa_manager *sa_manager = drmm_kzalloc(&gt_to_xe(gt)->drm,
struct xe_sa_manager *sa_manager = drmm_kzalloc(&tile_to_xe(tile)->drm,
sizeof(*sa_manager),
GFP_KERNEL);
if (!sa_manager)
......@@ -48,8 +47,8 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_gt *gt, u32 size, u32 alig
sa_manager->bo = NULL;
bo = xe_bo_create_pin_map(xe, gt, NULL, size, ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(gt) |
bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_GGTT_BIT);
if (IS_ERR(bo)) {
drm_err(&xe->drm, "failed to allocate bo for sa manager: %ld\n",
......@@ -90,7 +89,7 @@ struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager,
void xe_sa_bo_flush_write(struct drm_suballoc *sa_bo)
{
struct xe_sa_manager *sa_manager = to_xe_sa_manager(sa_bo->manager);
struct xe_device *xe = gt_to_xe(sa_manager->bo->gt);
struct xe_device *xe = tile_to_xe(sa_manager->bo->tile);
if (!sa_manager->bo->vmap.is_iomem)
return;
......
......@@ -9,9 +9,9 @@
struct dma_fence;
struct xe_bo;
struct xe_gt;
struct xe_tile;
struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_gt *gt, u32 size, u32 align);
struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 align);
struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager,
u32 size);
......
......@@ -7,6 +7,7 @@
#include "xe_device.h"
#include "xe_ggtt.h"
#include "xe_sa.h"
#include "xe_tile.h"
#include "xe_ttm_vram_mgr.h"
......@@ -76,6 +77,12 @@ int xe_tile_init_noalloc(struct xe_tile *tile)
goto err_mem_access;
err = xe_ggtt_init_noalloc(tile->mem.ggtt);
if (err)
goto err_mem_access;
tile->mem.kernel_bb_pool = xe_sa_bo_manager_init(tile, SZ_1M, 16);
if (IS_ERR(tile->mem.kernel_bb_pool))
err = PTR_ERR(tile->mem.kernel_bb_pool);
err_mem_access:
xe_device_mem_access_put(tile_to_xe(tile));
......
......@@ -322,6 +322,7 @@ int xe_uc_fw_init(struct xe_uc_fw *uc_fw)
{
struct xe_device *xe = uc_fw_to_xe(uc_fw);
struct xe_gt *gt = uc_fw_to_gt(uc_fw);
struct xe_tile *tile = gt_to_tile(gt);
struct device *dev = xe->drm.dev;
const struct firmware *fw = NULL;
struct uc_css_header *css;
......@@ -411,9 +412,9 @@ int xe_uc_fw_init(struct xe_uc_fw *uc_fw)
if (uc_fw->type == XE_UC_FW_TYPE_GUC)
guc_read_css_info(uc_fw, css);
obj = xe_bo_create_from_data(xe, gt, fw->data, fw->size,
obj = xe_bo_create_from_data(xe, tile, fw->data, fw->size,
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(gt) |
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_GGTT_BIT);
if (IS_ERR(obj)) {
drm_notice(&xe->drm, "%s firmware %s: failed to create / populate bo",
......
This diff is collapsed.
......@@ -54,7 +54,7 @@ xe_vm_find_overlapping_vma(struct xe_vm *vm, const struct xe_vma *vma);
#define xe_vm_assert_held(vm) dma_resv_assert_held(&(vm)->resv)
u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_gt *full_gt);
u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
int xe_vm_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
......
......@@ -37,17 +37,17 @@ struct xe_vma {
/** @bo_offset: offset into BO if not a userptr, unused for userptr */
u64 bo_offset;
/** @gt_mask: GT mask of where to create binding for this VMA */
u64 gt_mask;
/** @tile_mask: Tile mask of where to create binding for this VMA */
u64 tile_mask;
/**
* @gt_present: GT mask of binding are present for this VMA.
* @tile_present: GT mask of binding are present for this VMA.
* protected by vm->lock, vm->resv and for userptrs,
* vm->userptr.notifier_lock for writing. Needs either for reading,
* but if reading is done under the vm->lock only, it needs to be held
* in write mode.
*/
u64 gt_present;
u64 tile_present;
/**
* @destroyed: VMA is destroyed, in the sense that it shouldn't be
......@@ -132,8 +132,8 @@ struct xe_vma {
/** @usm: unified shared memory state */
struct {
/** @gt_invalidated: VMA has been invalidated */
u64 gt_invalidated;
/** @tile_invalidated: VMA has been invalidated */
u64 tile_invalidated;
} usm;
struct {
......
......@@ -407,10 +407,10 @@ struct drm_xe_vm_bind_op {
__u64 addr;
/**
* @gt_mask: Mask for which GTs to create binds for, 0 == All GTs,
* @tile_mask: Mask for which tiles to create binds for, 0 == All tiles,
* only applies to creating new VMAs
*/
__u64 gt_mask;
__u64 tile_mask;
/** @op: Operation to perform (lower 16 bits) and flags (upper 16 bits) */
__u32 op;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment