Commit eaad0c3a authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: rename direct to immediate for VM updates

To avoid confusion with direct ring submissions rename bottom
of pipe VM table changes to immediate updates.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 9ecefb19
...@@ -282,7 +282,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, ...@@ -282,7 +282,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
!dma_fence_is_later(updates, (*id)->flushed_updates)) !dma_fence_is_later(updates, (*id)->flushed_updates))
updates = NULL; updates = NULL;
if ((*id)->owner != vm->direct.fence_context || if ((*id)->owner != vm->immediate.fence_context ||
job->vm_pd_addr != (*id)->pd_gpu_addr || job->vm_pd_addr != (*id)->pd_gpu_addr ||
updates || !(*id)->last_flush || updates || !(*id)->last_flush ||
((*id)->last_flush->context != fence_context && ((*id)->last_flush->context != fence_context &&
...@@ -349,7 +349,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, ...@@ -349,7 +349,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
struct dma_fence *flushed; struct dma_fence *flushed;
/* Check all the prerequisites to using this VMID */ /* Check all the prerequisites to using this VMID */
if ((*id)->owner != vm->direct.fence_context) if ((*id)->owner != vm->immediate.fence_context)
continue; continue;
if ((*id)->pd_gpu_addr != job->vm_pd_addr) if ((*id)->pd_gpu_addr != job->vm_pd_addr)
...@@ -448,7 +448,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -448,7 +448,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
} }
id->pd_gpu_addr = job->vm_pd_addr; id->pd_gpu_addr = job->vm_pd_addr;
id->owner = vm->direct.fence_context; id->owner = vm->immediate.fence_context;
if (job->vm_needs_flush) { if (job->vm_needs_flush) {
dma_fence_put(id->last_flush); dma_fence_put(id->last_flush);
......
...@@ -726,7 +726,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm) ...@@ -726,7 +726,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @vm: VM to clear BO from * @vm: VM to clear BO from
* @bo: BO to clear * @bo: BO to clear
* @direct: use a direct update * @immediate: use an immediate update
* *
* Root PD needs to be reserved when calling this. * Root PD needs to be reserved when calling this.
* *
...@@ -736,7 +736,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm) ...@@ -736,7 +736,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_vm *vm, struct amdgpu_vm *vm,
struct amdgpu_bo *bo, struct amdgpu_bo *bo,
bool direct) bool immediate)
{ {
struct ttm_operation_ctx ctx = { true, false }; struct ttm_operation_ctx ctx = { true, false };
unsigned level = adev->vm_manager.root_level; unsigned level = adev->vm_manager.root_level;
...@@ -795,7 +795,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, ...@@ -795,7 +795,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params)); memset(&params, 0, sizeof(params));
params.adev = adev; params.adev = adev;
params.vm = vm; params.vm = vm;
params.direct = direct; params.immediate = immediate;
r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT); r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
if (r) if (r)
...@@ -850,11 +850,11 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, ...@@ -850,11 +850,11 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @vm: requesting vm * @vm: requesting vm
* @level: the page table level * @level: the page table level
* @direct: use a direct update * @immediate: use a immediate update
* @bp: resulting BO allocation parameters * @bp: resulting BO allocation parameters
*/ */
static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int level, bool direct, int level, bool immediate,
struct amdgpu_bo_param *bp) struct amdgpu_bo_param *bp)
{ {
memset(bp, 0, sizeof(*bp)); memset(bp, 0, sizeof(*bp));
...@@ -870,7 +870,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -870,7 +870,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
else if (!vm->root.base.bo || vm->root.base.bo->shadow) else if (!vm->root.base.bo || vm->root.base.bo->shadow)
bp->flags |= AMDGPU_GEM_CREATE_SHADOW; bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
bp->type = ttm_bo_type_kernel; bp->type = ttm_bo_type_kernel;
bp->no_wait_gpu = direct; bp->no_wait_gpu = immediate;
if (vm->root.base.bo) if (vm->root.base.bo)
bp->resv = vm->root.base.bo->tbo.base.resv; bp->resv = vm->root.base.bo->tbo.base.resv;
} }
...@@ -881,7 +881,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -881,7 +881,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @vm: VM to allocate page tables for * @vm: VM to allocate page tables for
* @cursor: Which page table to allocate * @cursor: Which page table to allocate
* @direct: use a direct update * @immediate: use an immediate update
* *
* Make sure a specific page table or directory is allocated. * Make sure a specific page table or directory is allocated.
* *
...@@ -892,7 +892,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -892,7 +892,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
struct amdgpu_vm *vm, struct amdgpu_vm *vm,
struct amdgpu_vm_pt_cursor *cursor, struct amdgpu_vm_pt_cursor *cursor,
bool direct) bool immediate)
{ {
struct amdgpu_vm_pt *entry = cursor->entry; struct amdgpu_vm_pt *entry = cursor->entry;
struct amdgpu_bo_param bp; struct amdgpu_bo_param bp;
...@@ -913,7 +913,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, ...@@ -913,7 +913,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
if (entry->base.bo) if (entry->base.bo)
return 0; return 0;
amdgpu_vm_bo_param(adev, vm, cursor->level, direct, &bp); amdgpu_vm_bo_param(adev, vm, cursor->level, immediate, &bp);
r = amdgpu_bo_create(adev, &bp, &pt); r = amdgpu_bo_create(adev, &bp, &pt);
if (r) if (r)
...@@ -925,7 +925,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, ...@@ -925,7 +925,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
pt->parent = amdgpu_bo_ref(cursor->parent->base.bo); pt->parent = amdgpu_bo_ref(cursor->parent->base.bo);
amdgpu_vm_bo_base_init(&entry->base, vm, pt); amdgpu_vm_bo_base_init(&entry->base, vm, pt);
r = amdgpu_vm_clear_bo(adev, vm, pt, direct); r = amdgpu_vm_clear_bo(adev, vm, pt, immediate);
if (r) if (r)
goto error_free_pt; goto error_free_pt;
...@@ -1276,7 +1276,7 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev, ...@@ -1276,7 +1276,7 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
* *
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @vm: requested vm * @vm: requested vm
* @direct: submit directly to the paging queue * @immediate: submit immediately to the paging queue
* *
* Makes sure all directories are up to date. * Makes sure all directories are up to date.
* *
...@@ -1284,7 +1284,7 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev, ...@@ -1284,7 +1284,7 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
* 0 for success, error for failure. * 0 for success, error for failure.
*/ */
int amdgpu_vm_update_pdes(struct amdgpu_device *adev, int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
struct amdgpu_vm *vm, bool direct) struct amdgpu_vm *vm, bool immediate)
{ {
struct amdgpu_vm_update_params params; struct amdgpu_vm_update_params params;
int r; int r;
...@@ -1295,7 +1295,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, ...@@ -1295,7 +1295,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params)); memset(&params, 0, sizeof(params));
params.adev = adev; params.adev = adev;
params.vm = vm; params.vm = vm;
params.direct = direct; params.immediate = immediate;
r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT); r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
if (r) if (r)
...@@ -1451,7 +1451,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, ...@@ -1451,7 +1451,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* address range are actually allocated * address range are actually allocated
*/ */
r = amdgpu_vm_alloc_pts(params->adev, params->vm, r = amdgpu_vm_alloc_pts(params->adev, params->vm,
&cursor, params->direct); &cursor, params->immediate);
if (r) if (r)
return r; return r;
} }
...@@ -1557,7 +1557,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, ...@@ -1557,7 +1557,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* *
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @vm: requested vm * @vm: requested vm
* @direct: direct submission in a page fault * @immediate: immediate submission in a page fault
* @resv: fences we need to sync to * @resv: fences we need to sync to
* @start: start of mapped range * @start: start of mapped range
* @last: last mapped entry * @last: last mapped entry
...@@ -1572,7 +1572,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, ...@@ -1572,7 +1572,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* 0 for success, -EINVAL for failure. * 0 for success, -EINVAL for failure.
*/ */
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
struct amdgpu_vm *vm, bool direct, struct amdgpu_vm *vm, bool immediate,
struct dma_resv *resv, struct dma_resv *resv,
uint64_t start, uint64_t last, uint64_t start, uint64_t last,
uint64_t flags, uint64_t addr, uint64_t flags, uint64_t addr,
...@@ -1586,7 +1586,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, ...@@ -1586,7 +1586,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params)); memset(&params, 0, sizeof(params));
params.adev = adev; params.adev = adev;
params.vm = vm; params.vm = vm;
params.direct = direct; params.immediate = immediate;
params.pages_addr = pages_addr; params.pages_addr = pages_addr;
/* Implicitly sync to command submissions in the same VM before /* Implicitly sync to command submissions in the same VM before
...@@ -1606,8 +1606,8 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, ...@@ -1606,8 +1606,8 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) { if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
struct amdgpu_bo *root = vm->root.base.bo; struct amdgpu_bo *root = vm->root.base.bo;
if (!dma_fence_is_signaled(vm->last_direct)) if (!dma_fence_is_signaled(vm->last_immediate))
amdgpu_bo_fence(root, vm->last_direct, true); amdgpu_bo_fence(root, vm->last_immediate, true);
} }
r = vm->update_funcs->prepare(&params, resv, sync_mode); r = vm->update_funcs->prepare(&params, resv, sync_mode);
...@@ -2582,7 +2582,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo) ...@@ -2582,7 +2582,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
return false; return false;
/* Don't evict VM page tables while they are updated */ /* Don't evict VM page tables while they are updated */
if (!dma_fence_is_signaled(bo_base->vm->last_direct)) { if (!dma_fence_is_signaled(bo_base->vm->last_immediate)) {
amdgpu_vm_eviction_unlock(bo_base->vm); amdgpu_vm_eviction_unlock(bo_base->vm);
return false; return false;
} }
...@@ -2759,7 +2759,7 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) ...@@ -2759,7 +2759,7 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
if (timeout <= 0) if (timeout <= 0)
return timeout; return timeout;
return dma_fence_wait_timeout(vm->last_direct, true, timeout); return dma_fence_wait_timeout(vm->last_immediate, true, timeout);
} }
/** /**
...@@ -2795,7 +2795,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -2795,7 +2795,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
/* create scheduler entities for page table updates */ /* create scheduler entities for page table updates */
r = drm_sched_entity_init(&vm->direct, DRM_SCHED_PRIORITY_NORMAL, r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
adev->vm_manager.vm_pte_scheds, adev->vm_manager.vm_pte_scheds,
adev->vm_manager.vm_pte_num_scheds, NULL); adev->vm_manager.vm_pte_num_scheds, NULL);
if (r) if (r)
...@@ -2805,7 +2805,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -2805,7 +2805,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
adev->vm_manager.vm_pte_scheds, adev->vm_manager.vm_pte_scheds,
adev->vm_manager.vm_pte_num_scheds, NULL); adev->vm_manager.vm_pte_num_scheds, NULL);
if (r) if (r)
goto error_free_direct; goto error_free_immediate;
vm->pte_support_ats = false; vm->pte_support_ats = false;
vm->is_compute_context = false; vm->is_compute_context = false;
...@@ -2831,7 +2831,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -2831,7 +2831,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
else else
vm->update_funcs = &amdgpu_vm_sdma_funcs; vm->update_funcs = &amdgpu_vm_sdma_funcs;
vm->last_update = NULL; vm->last_update = NULL;
vm->last_direct = dma_fence_get_stub(); vm->last_immediate = dma_fence_get_stub();
mutex_init(&vm->eviction_lock); mutex_init(&vm->eviction_lock);
vm->evicting = false; vm->evicting = false;
...@@ -2885,11 +2885,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -2885,11 +2885,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->root.base.bo = NULL; vm->root.base.bo = NULL;
error_free_delayed: error_free_delayed:
dma_fence_put(vm->last_direct); dma_fence_put(vm->last_immediate);
drm_sched_entity_destroy(&vm->delayed); drm_sched_entity_destroy(&vm->delayed);
error_free_direct: error_free_immediate:
drm_sched_entity_destroy(&vm->direct); drm_sched_entity_destroy(&vm->immediate);
return r; return r;
} }
...@@ -3086,8 +3086,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) ...@@ -3086,8 +3086,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
vm->pasid = 0; vm->pasid = 0;
} }
dma_fence_wait(vm->last_direct, false); dma_fence_wait(vm->last_immediate, false);
dma_fence_put(vm->last_direct); dma_fence_put(vm->last_immediate);
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) { if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
...@@ -3104,7 +3104,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) ...@@ -3104,7 +3104,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_bo_unref(&root); amdgpu_bo_unref(&root);
WARN_ON(vm->root.base.bo); WARN_ON(vm->root.base.bo);
drm_sched_entity_destroy(&vm->direct); drm_sched_entity_destroy(&vm->immediate);
drm_sched_entity_destroy(&vm->delayed); drm_sched_entity_destroy(&vm->delayed);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
......
...@@ -206,9 +206,9 @@ struct amdgpu_vm_update_params { ...@@ -206,9 +206,9 @@ struct amdgpu_vm_update_params {
struct amdgpu_vm *vm; struct amdgpu_vm *vm;
/** /**
* @direct: if changes should be made directly * @immediate: if changes should be made immediately
*/ */
bool direct; bool immediate;
/** /**
* @pages_addr: * @pages_addr:
...@@ -274,11 +274,11 @@ struct amdgpu_vm { ...@@ -274,11 +274,11 @@ struct amdgpu_vm {
struct dma_fence *last_update; struct dma_fence *last_update;
/* Scheduler entities for page table updates */ /* Scheduler entities for page table updates */
struct drm_sched_entity direct; struct drm_sched_entity immediate;
struct drm_sched_entity delayed; struct drm_sched_entity delayed;
/* Last submission to the scheduler entities */ /* Last submission to the scheduler entities */
struct dma_fence *last_direct; struct dma_fence *last_immediate;
unsigned int pasid; unsigned int pasid;
/* dedicated to vm */ /* dedicated to vm */
...@@ -379,7 +379,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -379,7 +379,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
void *param); void *param);
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync); int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
int amdgpu_vm_update_pdes(struct amdgpu_device *adev, int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
struct amdgpu_vm *vm, bool direct); struct amdgpu_vm *vm, bool immediate);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev, int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm, struct amdgpu_vm *vm,
struct dma_fence **fence); struct dma_fence **fence);
......
...@@ -84,7 +84,7 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p, ...@@ -84,7 +84,7 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
pe += (unsigned long)amdgpu_bo_kptr(bo); pe += (unsigned long)amdgpu_bo_kptr(bo);
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct); trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
value = p->pages_addr ? value = p->pages_addr ?
......
...@@ -61,8 +61,8 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, ...@@ -61,8 +61,8 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
struct dma_resv *resv, struct dma_resv *resv,
enum amdgpu_sync_mode sync_mode) enum amdgpu_sync_mode sync_mode)
{ {
enum amdgpu_ib_pool_type pool = p->direct ? AMDGPU_IB_POOL_IMMEDIATE : enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
AMDGPU_IB_POOL_DELAYED; : AMDGPU_IB_POOL_DELAYED;
unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW; unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
int r; int r;
...@@ -96,7 +96,7 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, ...@@ -96,7 +96,7 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
struct amdgpu_ring *ring; struct amdgpu_ring *ring;
int r; int r;
entity = p->direct ? &p->vm->direct : &p->vm->delayed; entity = p->immediate ? &p->vm->immediate : &p->vm->delayed;
ring = container_of(entity->rq->sched, struct amdgpu_ring, sched); ring = container_of(entity->rq->sched, struct amdgpu_ring, sched);
WARN_ON(ib->length_dw == 0); WARN_ON(ib->length_dw == 0);
...@@ -106,15 +106,16 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, ...@@ -106,15 +106,16 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
if (r) if (r)
goto error; goto error;
if (p->direct) { if (p->immediate) {
tmp = dma_fence_get(f); tmp = dma_fence_get(f);
swap(p->vm->last_direct, tmp); swap(p->vm->last_immediate, f);
dma_fence_put(tmp); dma_fence_put(tmp);
} else { } else {
dma_resv_add_shared_fence(p->vm->root.base.bo->tbo.base.resv, f); dma_resv_add_shared_fence(p->vm->root.base.bo->tbo.base.resv,
f);
} }
if (fence && !p->direct) if (fence && !p->immediate)
swap(*fence, f); swap(*fence, f);
dma_fence_put(f); dma_fence_put(f);
return 0; return 0;
...@@ -144,7 +145,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p, ...@@ -144,7 +145,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
src += p->num_dw_left * 4; src += p->num_dw_left * 4;
pe += amdgpu_gmc_sign_extend(bo->tbo.offset); pe += amdgpu_gmc_sign_extend(bo->tbo.offset);
trace_amdgpu_vm_copy_ptes(pe, src, count, p->direct); trace_amdgpu_vm_copy_ptes(pe, src, count, p->immediate);
amdgpu_vm_copy_pte(p->adev, ib, pe, src, count); amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
} }
...@@ -171,7 +172,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p, ...@@ -171,7 +172,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
struct amdgpu_ib *ib = p->job->ibs; struct amdgpu_ib *ib = p->job->ibs;
pe += amdgpu_gmc_sign_extend(bo->tbo.offset); pe += amdgpu_gmc_sign_extend(bo->tbo.offset);
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct); trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);
if (count < 3) { if (count < 3) {
amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags, amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
count, incr); count, incr);
...@@ -200,8 +201,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, ...@@ -200,8 +201,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
uint64_t addr, unsigned count, uint32_t incr, uint64_t addr, unsigned count, uint32_t incr,
uint64_t flags) uint64_t flags)
{ {
enum amdgpu_ib_pool_type pool = p->direct ? AMDGPU_IB_POOL_IMMEDIATE : enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
AMDGPU_IB_POOL_DELAYED; : AMDGPU_IB_POOL_DELAYED;
unsigned int i, ndw, nptes; unsigned int i, ndw, nptes;
uint64_t *pte; uint64_t *pte;
int r; int r;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment