Commit 3aa3c5c2 authored by Matthew Brost's avatar Matthew Brost Committed by Thomas Hellström

drm/xe: Map both mem.kernel_bb_pool and usm.bb_pool

For integrated devices we need to map both mem.kernel_bb_pool and
usm.bb_pool to be able to run batches from both pools.

Fixes: a682b6a4 ("drm/xe: Support device page faults on integrated platforms")
Tested-by: default avatarBrian Welty <brian.welty@intel.com>
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarBrian Welty <brian.welty@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240202033440.2351862-1-matthew.brost@intel.com
(cherry picked from commit 72f86ed3)
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
parent 90773aaf
...@@ -437,7 +437,10 @@ static int all_fw_domain_init(struct xe_gt *gt) ...@@ -437,7 +437,10 @@ static int all_fw_domain_init(struct xe_gt *gt)
* USM has its only SA pool to non-block behind user operations * USM has its only SA pool to non-block behind user operations
*/ */
if (gt_to_xe(gt)->info.has_usm) { if (gt_to_xe(gt)->info.has_usm) {
gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt), SZ_1M, 16); struct xe_device *xe = gt_to_xe(gt);
gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt),
IS_DGFX(xe) ? SZ_1M : SZ_512K, 16);
if (IS_ERR(gt->usm.bb_pool)) { if (IS_ERR(gt->usm.bb_pool)) {
err = PTR_ERR(gt->usm.bb_pool); err = PTR_ERR(gt->usm.bb_pool);
goto err_force_wake; goto err_force_wake;
......
...@@ -170,10 +170,22 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, ...@@ -170,10 +170,22 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
if (!IS_DGFX(xe)) { if (!IS_DGFX(xe)) {
/* Write out batch too */ /* Write out batch too */
m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE; m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
for (i = 0; i < batch->size;
i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
XE_PAGE_SIZE) {
entry = vm->pt_ops->pte_encode_bo(batch, i,
pat_index, 0);
xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
entry);
level++;
}
if (xe->info.has_usm) { if (xe->info.has_usm) {
xe_tile_assert(tile, batch->size == SZ_1M);
batch = tile->primary_gt->usm.bb_pool->bo; batch = tile->primary_gt->usm.bb_pool->bo;
m->usm_batch_base_ofs = m->batch_base_ofs; m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M;
} xe_tile_assert(tile, batch->size == SZ_512K);
for (i = 0; i < batch->size; for (i = 0; i < batch->size;
i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE : i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
...@@ -185,6 +197,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, ...@@ -185,6 +197,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
entry); entry);
level++; level++;
} }
}
} else { } else {
u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE); u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment