Commit 59276f05 authored by Nirmoy Das's avatar Nirmoy Das Committed by Alex Deucher

drm/amdgpu: switch to amdgpu_bo_vm for vm code

The subclass, amdgpu_bo_vm is intended for PT/PD BOs which are also
shadowed, so switch to amdgpu_bo_vm BO for PT/PD BOs.

v4: update amdgpu_vm_update_funcs to accept amdgpu_bo_vm.
v3: simplify code.
    check also if shadow bo exist instead of checking bo only type.
v2: squash three related patches.
Signed-off-by: default avatarNirmoy Das <nirmoy.das@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 1fdc79f6
...@@ -652,15 +652,15 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, ...@@ -652,15 +652,15 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
spin_lock(&adev->mman.bdev.lru_lock); spin_lock(&adev->mman.bdev.lru_lock);
list_for_each_entry(bo_base, &vm->idle, vm_status) { list_for_each_entry(bo_base, &vm->idle, vm_status) {
struct amdgpu_bo *bo = bo_base->bo; struct amdgpu_bo *bo = bo_base->bo;
struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo);
if (!bo->parent) if (!bo->parent)
continue; continue;
ttm_bo_move_to_lru_tail(&bo->tbo, &bo->tbo.mem, ttm_bo_move_to_lru_tail(&bo->tbo, &bo->tbo.mem,
&vm->lru_bulk_move); &vm->lru_bulk_move);
if (bo->shadow) if (shadow)
ttm_bo_move_to_lru_tail(&bo->shadow->tbo, ttm_bo_move_to_lru_tail(&shadow->tbo, &shadow->tbo.mem,
&bo->shadow->tbo.mem,
&vm->lru_bulk_move); &vm->lru_bulk_move);
} }
spin_unlock(&adev->mman.bdev.lru_lock); spin_unlock(&adev->mman.bdev.lru_lock);
...@@ -692,12 +692,13 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -692,12 +692,13 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) { list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
struct amdgpu_bo *bo = bo_base->bo; struct amdgpu_bo *bo = bo_base->bo;
struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo);
r = validate(param, bo); r = validate(param, bo);
if (r) if (r)
return r; return r;
if (bo->shadow) { if (shadow) {
r = validate(param, bo->shadow); r = validate(param, shadow);
if (r) if (r)
return r; return r;
} }
...@@ -705,7 +706,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -705,7 +706,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (bo->tbo.type != ttm_bo_type_kernel) { if (bo->tbo.type != ttm_bo_type_kernel) {
amdgpu_vm_bo_moved(bo_base); amdgpu_vm_bo_moved(bo_base);
} else { } else {
vm->update_funcs->map_table(bo); vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
amdgpu_vm_bo_relocated(bo_base); amdgpu_vm_bo_relocated(bo_base);
} }
} }
...@@ -737,7 +738,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm) ...@@ -737,7 +738,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
* *
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @vm: VM to clear BO from * @vm: VM to clear BO from
* @bo: BO to clear * @vmbo: BO to clear
* @immediate: use an immediate update * @immediate: use an immediate update
* *
* Root PD needs to be reserved when calling this. * Root PD needs to be reserved when calling this.
...@@ -747,13 +748,14 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm) ...@@ -747,13 +748,14 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
*/ */
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_vm *vm, struct amdgpu_vm *vm,
struct amdgpu_bo *bo, struct amdgpu_bo_vm *vmbo,
bool immediate) bool immediate)
{ {
struct ttm_operation_ctx ctx = { true, false }; struct ttm_operation_ctx ctx = { true, false };
unsigned level = adev->vm_manager.root_level; unsigned level = adev->vm_manager.root_level;
struct amdgpu_vm_update_params params; struct amdgpu_vm_update_params params;
struct amdgpu_bo *ancestor = bo; struct amdgpu_bo *ancestor = &vmbo->bo;
struct amdgpu_bo *bo = &vmbo->bo;
unsigned entries, ats_entries; unsigned entries, ats_entries;
uint64_t addr; uint64_t addr;
int r; int r;
...@@ -793,14 +795,15 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, ...@@ -793,14 +795,15 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
if (r) if (r)
return r; return r;
if (bo->shadow) { if (vmbo->shadow) {
r = ttm_bo_validate(&bo->shadow->tbo, &bo->shadow->placement, struct amdgpu_bo *shadow = vmbo->shadow;
&ctx);
r = ttm_bo_validate(&shadow->tbo, &shadow->placement, &ctx);
if (r) if (r)
return r; return r;
} }
r = vm->update_funcs->map_table(bo); r = vm->update_funcs->map_table(vmbo);
if (r) if (r)
return r; return r;
...@@ -824,7 +827,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, ...@@ -824,7 +827,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
amdgpu_gmc_get_vm_pde(adev, level, &value, &flags); amdgpu_gmc_get_vm_pde(adev, level, &value, &flags);
} }
r = vm->update_funcs->update(&params, bo, addr, 0, ats_entries, r = vm->update_funcs->update(&params, vmbo, addr, 0, ats_entries,
value, flags); value, flags);
if (r) if (r)
return r; return r;
...@@ -847,7 +850,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, ...@@ -847,7 +850,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
} }
} }
r = vm->update_funcs->update(&params, bo, addr, 0, entries, r = vm->update_funcs->update(&params, vmbo, addr, 0, entries,
value, flags); value, flags);
if (r) if (r)
return r; return r;
...@@ -863,14 +866,16 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, ...@@ -863,14 +866,16 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
* @vm: requesting vm * @vm: requesting vm
* @level: the page table level * @level: the page table level
* @immediate: use a immediate update * @immediate: use a immediate update
* @bo: pointer to the buffer object pointer * @vmbo: pointer to the buffer object pointer
*/ */
static int amdgpu_vm_pt_create(struct amdgpu_device *adev, static int amdgpu_vm_pt_create(struct amdgpu_device *adev,
struct amdgpu_vm *vm, struct amdgpu_vm *vm,
int level, bool immediate, int level, bool immediate,
struct amdgpu_bo **bo) struct amdgpu_bo_vm **vmbo)
{ {
struct amdgpu_bo_param bp; struct amdgpu_bo_param bp;
struct amdgpu_bo *bo;
struct dma_resv *resv;
int r; int r;
memset(&bp, 0, sizeof(bp)); memset(&bp, 0, sizeof(bp));
...@@ -881,7 +886,7 @@ static int amdgpu_vm_pt_create(struct amdgpu_device *adev, ...@@ -881,7 +886,7 @@ static int amdgpu_vm_pt_create(struct amdgpu_device *adev,
bp.domain = amdgpu_bo_get_preferred_pin_domain(adev, bp.domain); bp.domain = amdgpu_bo_get_preferred_pin_domain(adev, bp.domain);
bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_CPU_GTT_USWC; AMDGPU_GEM_CREATE_CPU_GTT_USWC;
bp.bo_ptr_size = sizeof(struct amdgpu_bo); bp.bo_ptr_size = sizeof(struct amdgpu_bo_vm);
if (vm->use_cpu_for_update) if (vm->use_cpu_for_update)
bp.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; bp.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
...@@ -890,26 +895,41 @@ static int amdgpu_vm_pt_create(struct amdgpu_device *adev, ...@@ -890,26 +895,41 @@ static int amdgpu_vm_pt_create(struct amdgpu_device *adev,
if (vm->root.base.bo) if (vm->root.base.bo)
bp.resv = vm->root.base.bo->tbo.base.resv; bp.resv = vm->root.base.bo->tbo.base.resv;
r = amdgpu_bo_create(adev, &bp, bo); r = amdgpu_bo_create_vm(adev, &bp, vmbo);
if (r) if (r)
return r; return r;
if (vm->is_compute_context && (adev->flags & AMD_IS_APU)) bo = &(*vmbo)->bo;
if (vm->is_compute_context && (adev->flags & AMD_IS_APU)) {
(*vmbo)->shadow = NULL;
return 0; return 0;
}
if (!bp.resv) if (!bp.resv)
WARN_ON(dma_resv_lock((*bo)->tbo.base.resv, WARN_ON(dma_resv_lock(bo->tbo.base.resv,
NULL)); NULL));
r = amdgpu_bo_create_shadow(adev, bp.size, *bo); resv = bp.resv;
memset(&bp, 0, sizeof(bp));
bp.size = amdgpu_vm_bo_size(adev, level);
bp.domain = AMDGPU_GEM_DOMAIN_GTT;
bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
bp.type = ttm_bo_type_kernel;
bp.resv = bo->tbo.base.resv;
bp.bo_ptr_size = sizeof(struct amdgpu_bo);
if (!bp.resv) r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow);
dma_resv_unlock((*bo)->tbo.base.resv);
if (!resv)
dma_resv_unlock(bo->tbo.base.resv);
if (r) { if (r) {
amdgpu_bo_unref(bo); amdgpu_bo_unref(&bo);
return r; return r;
} }
(*vmbo)->shadow->parent = amdgpu_bo_ref(bo);
amdgpu_bo_add_to_shadow_list((*vmbo)->shadow);
return 0; return 0;
} }
...@@ -933,7 +953,8 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, ...@@ -933,7 +953,8 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
bool immediate) bool immediate)
{ {
struct amdgpu_vm_pt *entry = cursor->entry; struct amdgpu_vm_pt *entry = cursor->entry;
struct amdgpu_bo *pt; struct amdgpu_bo *pt_bo;
struct amdgpu_bo_vm *pt;
int r; int r;
if (cursor->level < AMDGPU_VM_PTB && !entry->entries) { if (cursor->level < AMDGPU_VM_PTB && !entry->entries) {
...@@ -957,8 +978,9 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, ...@@ -957,8 +978,9 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
/* Keep a reference to the root directory to avoid /* Keep a reference to the root directory to avoid
* freeing them up in the wrong order. * freeing them up in the wrong order.
*/ */
pt->parent = amdgpu_bo_ref(cursor->parent->base.bo); pt_bo = &pt->bo;
amdgpu_vm_bo_base_init(&entry->base, vm, pt); pt_bo->parent = amdgpu_bo_ref(cursor->parent->base.bo);
amdgpu_vm_bo_base_init(&entry->base, vm, pt_bo);
r = amdgpu_vm_clear_bo(adev, vm, pt, immediate); r = amdgpu_vm_clear_bo(adev, vm, pt, immediate);
if (r) if (r)
...@@ -968,7 +990,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, ...@@ -968,7 +990,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
error_free_pt: error_free_pt:
amdgpu_bo_unref(&pt->shadow); amdgpu_bo_unref(&pt->shadow);
amdgpu_bo_unref(&pt); amdgpu_bo_unref(&pt_bo);
return r; return r;
} }
...@@ -979,10 +1001,13 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, ...@@ -979,10 +1001,13 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
*/ */
static void amdgpu_vm_free_table(struct amdgpu_vm_pt *entry) static void amdgpu_vm_free_table(struct amdgpu_vm_pt *entry)
{ {
struct amdgpu_bo *shadow;
if (entry->base.bo) { if (entry->base.bo) {
shadow = amdgpu_bo_shadowed(entry->base.bo);
entry->base.bo->vm_bo = NULL; entry->base.bo->vm_bo = NULL;
list_del(&entry->base.vm_status); list_del(&entry->base.vm_status);
amdgpu_bo_unref(&entry->base.bo->shadow); amdgpu_bo_unref(&shadow);
amdgpu_bo_unref(&entry->base.bo); amdgpu_bo_unref(&entry->base.bo);
} }
kvfree(entry->entries); kvfree(entry->entries);
...@@ -1284,7 +1309,8 @@ static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params, ...@@ -1284,7 +1309,8 @@ static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
level += params->adev->vm_manager.root_level; level += params->adev->vm_manager.root_level;
amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags); amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags);
pde = (entry - parent->entries) * 8; pde = (entry - parent->entries) * 8;
return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags); return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt,
1, 0, flags);
} }
/** /**
...@@ -1364,9 +1390,9 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, ...@@ -1364,9 +1390,9 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
* Make sure to set the right flags for the PTEs at the desired level. * Make sure to set the right flags for the PTEs at the desired level.
*/ */
static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params, static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params,
struct amdgpu_bo *bo, unsigned level, struct amdgpu_bo_vm *pt, unsigned int level,
uint64_t pe, uint64_t addr, uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr, unsigned int count, uint32_t incr,
uint64_t flags) uint64_t flags)
{ {
...@@ -1382,7 +1408,7 @@ static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params, ...@@ -1382,7 +1408,7 @@ static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params,
flags |= AMDGPU_PTE_EXECUTABLE; flags |= AMDGPU_PTE_EXECUTABLE;
} }
params->vm->update_funcs->update(params, bo, pe, addr, count, incr, params->vm->update_funcs->update(params, pt, pe, addr, count, incr,
flags); flags);
} }
...@@ -1562,9 +1588,9 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, ...@@ -1562,9 +1588,9 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
nptes, dst, incr, upd_flags, nptes, dst, incr, upd_flags,
vm->task_info.pid, vm->task_info.pid,
vm->immediate.fence_context); vm->immediate.fence_context);
amdgpu_vm_update_flags(params, pt, cursor.level, amdgpu_vm_update_flags(params, to_amdgpu_bo_vm(pt),
pe_start, dst, nptes, incr, cursor.level, pe_start, dst,
upd_flags); nptes, incr, upd_flags);
pe_start += nptes * 8; pe_start += nptes * 8;
dst += nptes * incr; dst += nptes * incr;
...@@ -2674,7 +2700,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, ...@@ -2674,7 +2700,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_vm_bo_base *bo_base; struct amdgpu_vm_bo_base *bo_base;
/* shadow bo doesn't have bo base, its validation needs its parent */ /* shadow bo doesn't have bo base, its validation needs its parent */
if (bo->parent && bo->parent->shadow == bo) if (bo->parent && (amdgpu_bo_shadowed(bo->parent) == bo))
bo = bo->parent; bo = bo->parent;
for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
...@@ -2843,7 +2869,8 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) ...@@ -2843,7 +2869,8 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
*/ */
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid) int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid)
{ {
struct amdgpu_bo *root; struct amdgpu_bo *root_bo;
struct amdgpu_bo_vm *root;
int r, i; int r, i;
vm->va = RB_ROOT_CACHED; vm->va = RB_ROOT_CACHED;
...@@ -2897,16 +2924,16 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid) ...@@ -2897,16 +2924,16 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid)
false, &root); false, &root);
if (r) if (r)
goto error_free_delayed; goto error_free_delayed;
root_bo = &root->bo;
r = amdgpu_bo_reserve(root, true); r = amdgpu_bo_reserve(root_bo, true);
if (r) if (r)
goto error_free_root; goto error_free_root;
r = dma_resv_reserve_shared(root->tbo.base.resv, 1); r = dma_resv_reserve_shared(root_bo->tbo.base.resv, 1);
if (r) if (r)
goto error_unreserve; goto error_unreserve;
amdgpu_vm_bo_base_init(&vm->root.base, vm, root); amdgpu_vm_bo_base_init(&vm->root.base, vm, root_bo);
r = amdgpu_vm_clear_bo(adev, vm, root, false); r = amdgpu_vm_clear_bo(adev, vm, root, false);
if (r) if (r)
...@@ -2935,8 +2962,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid) ...@@ -2935,8 +2962,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid)
amdgpu_bo_unreserve(vm->root.base.bo); amdgpu_bo_unreserve(vm->root.base.bo);
error_free_root: error_free_root:
amdgpu_bo_unref(&vm->root.base.bo->shadow); amdgpu_bo_unref(&root->shadow);
amdgpu_bo_unref(&vm->root.base.bo); amdgpu_bo_unref(&root_bo);
vm->root.base.bo = NULL; vm->root.base.bo = NULL;
error_free_delayed: error_free_delayed:
...@@ -3034,7 +3061,9 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -3034,7 +3061,9 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
*/ */
if (pte_support_ats != vm->pte_support_ats) { if (pte_support_ats != vm->pte_support_ats) {
vm->pte_support_ats = pte_support_ats; vm->pte_support_ats = pte_support_ats;
r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo, false); r = amdgpu_vm_clear_bo(adev, vm,
to_amdgpu_bo_vm(vm->root.base.bo),
false);
if (r) if (r)
goto free_idr; goto free_idr;
} }
...@@ -3078,7 +3107,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -3078,7 +3107,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
} }
/* Free the shadow bo for compute VM */ /* Free the shadow bo for compute VM */
amdgpu_bo_unref(&vm->root.base.bo->shadow); amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.base.bo)->shadow);
if (pasid) if (pasid)
vm->pasid = pasid; vm->pasid = pasid;
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
struct amdgpu_bo_va; struct amdgpu_bo_va;
struct amdgpu_job; struct amdgpu_job;
struct amdgpu_bo_list_entry; struct amdgpu_bo_list_entry;
struct amdgpu_bo_vm;
/* /*
* GPUVM handling * GPUVM handling
...@@ -239,11 +240,11 @@ struct amdgpu_vm_update_params { ...@@ -239,11 +240,11 @@ struct amdgpu_vm_update_params {
}; };
struct amdgpu_vm_update_funcs { struct amdgpu_vm_update_funcs {
int (*map_table)(struct amdgpu_bo *bo); int (*map_table)(struct amdgpu_bo_vm *bo);
int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv, int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv,
enum amdgpu_sync_mode sync_mode); enum amdgpu_sync_mode sync_mode);
int (*update)(struct amdgpu_vm_update_params *p, int (*update)(struct amdgpu_vm_update_params *p,
struct amdgpu_bo *bo, uint64_t pe, uint64_t addr, struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr, uint64_t flags); unsigned count, uint32_t incr, uint64_t flags);
int (*commit)(struct amdgpu_vm_update_params *p, int (*commit)(struct amdgpu_vm_update_params *p,
struct dma_fence **fence); struct dma_fence **fence);
......
...@@ -29,9 +29,9 @@ ...@@ -29,9 +29,9 @@
* *
* @table: newly allocated or validated PD/PT * @table: newly allocated or validated PD/PT
*/ */
static int amdgpu_vm_cpu_map_table(struct amdgpu_bo *table) static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table)
{ {
return amdgpu_bo_kmap(table, NULL); return amdgpu_bo_kmap(&table->bo, NULL);
} }
/** /**
...@@ -58,7 +58,7 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, ...@@ -58,7 +58,7 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
* amdgpu_vm_cpu_update - helper to update page tables via CPU * amdgpu_vm_cpu_update - helper to update page tables via CPU
* *
* @p: see amdgpu_vm_update_params definition * @p: see amdgpu_vm_update_params definition
* @bo: PD/PT to update * @vmbo: PD/PT to update
* @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
* @addr: dst addr to write into pe * @addr: dst addr to write into pe
* @count: number of page entries to update * @count: number of page entries to update
...@@ -68,7 +68,7 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, ...@@ -68,7 +68,7 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
* Write count number of PT/PD entries directly. * Write count number of PT/PD entries directly.
*/ */
static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p, static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
struct amdgpu_bo *bo, uint64_t pe, struct amdgpu_bo_vm *vmbo, uint64_t pe,
uint64_t addr, unsigned count, uint32_t incr, uint64_t addr, unsigned count, uint32_t incr,
uint64_t flags) uint64_t flags)
{ {
...@@ -76,13 +76,13 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p, ...@@ -76,13 +76,13 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
uint64_t value; uint64_t value;
int r; int r;
if (bo->tbo.moving) { if (vmbo->bo.tbo.moving) {
r = dma_fence_wait(bo->tbo.moving, true); r = dma_fence_wait(vmbo->bo.tbo.moving, true);
if (r) if (r)
return r; return r;
} }
pe += (unsigned long)amdgpu_bo_kptr(bo); pe += (unsigned long)amdgpu_bo_kptr(&vmbo->bo);
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate); trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);
......
...@@ -33,11 +33,11 @@ ...@@ -33,11 +33,11 @@
* *
* @table: newly allocated or validated PD/PT * @table: newly allocated or validated PD/PT
*/ */
static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table) static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table)
{ {
int r; int r;
r = amdgpu_ttm_alloc_gart(&table->tbo); r = amdgpu_ttm_alloc_gart(&table->bo.tbo);
if (r) if (r)
return r; return r;
...@@ -186,7 +186,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p, ...@@ -186,7 +186,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
* amdgpu_vm_sdma_update - execute VM update * amdgpu_vm_sdma_update - execute VM update
* *
* @p: see amdgpu_vm_update_params definition * @p: see amdgpu_vm_update_params definition
* @bo: PD/PT to update * @vmbo: PD/PT to update
* @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
* @addr: dst addr to write into pe * @addr: dst addr to write into pe
* @count: number of page entries to update * @count: number of page entries to update
...@@ -197,10 +197,11 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p, ...@@ -197,10 +197,11 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
* the IB. * the IB.
*/ */
static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
struct amdgpu_bo *bo, uint64_t pe, struct amdgpu_bo_vm *vmbo, uint64_t pe,
uint64_t addr, unsigned count, uint32_t incr, uint64_t addr, unsigned count, uint32_t incr,
uint64_t flags) uint64_t flags)
{ {
struct amdgpu_bo *bo = &vmbo->bo;
enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
: AMDGPU_IB_POOL_DELAYED; : AMDGPU_IB_POOL_DELAYED;
unsigned int i, ndw, nptes; unsigned int i, ndw, nptes;
...@@ -238,8 +239,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, ...@@ -238,8 +239,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
if (!p->pages_addr) { if (!p->pages_addr) {
/* set page commands needed */ /* set page commands needed */
if (bo->shadow) if (vmbo->shadow)
amdgpu_vm_sdma_set_ptes(p, bo->shadow, pe, addr, amdgpu_vm_sdma_set_ptes(p, vmbo->shadow, pe, addr,
count, incr, flags); count, incr, flags);
amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count, amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count,
incr, flags); incr, flags);
...@@ -248,7 +249,7 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, ...@@ -248,7 +249,7 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
/* copy commands needed */ /* copy commands needed */
ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw * ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw *
(bo->shadow ? 2 : 1); (vmbo->shadow ? 2 : 1);
/* for padding */ /* for padding */
ndw -= 7; ndw -= 7;
...@@ -263,8 +264,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, ...@@ -263,8 +264,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
pte[i] |= flags; pte[i] |= flags;
} }
if (bo->shadow) if (vmbo->shadow)
amdgpu_vm_sdma_copy_ptes(p, bo->shadow, pe, nptes); amdgpu_vm_sdma_copy_ptes(p, vmbo->shadow, pe, nptes);
amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes); amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes);
pe += nptes * 8; pe += nptes * 8;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment