Commit bb1e38a4 authored by Chunming Zhou's avatar Chunming Zhou Committed by Alex Deucher

drm/amdgpu: use kernel fence for last_pt_update

Signed-off-by: default avatarChunming Zhou <david1.zhou@amd.com>
Reviewed-by: default avatarChristian K?nig <christian.koenig@amd.com>
Reviewed-by: default avatarJammy Zhou <jammy.zhou@amd.com>
parent e40a3115
...@@ -539,7 +539,7 @@ struct amdgpu_bo_va_mapping { ...@@ -539,7 +539,7 @@ struct amdgpu_bo_va_mapping {
struct amdgpu_bo_va { struct amdgpu_bo_va {
/* protected by bo being reserved */ /* protected by bo being reserved */
struct list_head bo_list; struct list_head bo_list;
struct amdgpu_fence *last_pt_update; struct fence *last_pt_update;
unsigned ref_count; unsigned ref_count;
/* protected by vm mutex and spinlock */ /* protected by vm mutex and spinlock */
...@@ -1241,7 +1241,7 @@ union amdgpu_sched_job_param { ...@@ -1241,7 +1241,7 @@ union amdgpu_sched_job_param {
struct amdgpu_vm *vm; struct amdgpu_vm *vm;
uint64_t start; uint64_t start;
uint64_t last; uint64_t last;
struct amdgpu_fence **fence; struct fence **fence;
} vm_mapping; } vm_mapping;
struct { struct {
......
...@@ -581,7 +581,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, ...@@ -581,7 +581,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
if (r) if (r)
return r; return r;
f = &bo_va->last_pt_update->base; f = bo_va->last_pt_update;
r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f); r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f);
if (r) if (r)
return r; return r;
......
...@@ -737,7 +737,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev, ...@@ -737,7 +737,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
*/ */
static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm, static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
uint64_t start, uint64_t end, uint64_t start, uint64_t end,
struct amdgpu_fence *fence) struct fence *fence)
{ {
unsigned i; unsigned i;
...@@ -745,20 +745,20 @@ static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm, ...@@ -745,20 +745,20 @@ static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
end >>= amdgpu_vm_block_size; end >>= amdgpu_vm_block_size;
for (i = start; i <= end; ++i) for (i = start; i <= end; ++i)
amdgpu_bo_fence(vm->page_tables[i].bo, &fence->base, true); amdgpu_bo_fence(vm->page_tables[i].bo, fence, true);
} }
static int amdgpu_vm_bo_update_mapping_run_job( static int amdgpu_vm_bo_update_mapping_run_job(
struct amdgpu_cs_parser *sched_job) struct amdgpu_cs_parser *sched_job)
{ {
struct amdgpu_fence **fence = sched_job->job_param.vm_mapping.fence; struct fence **fence = sched_job->job_param.vm_mapping.fence;
amdgpu_vm_fence_pts(sched_job->job_param.vm_mapping.vm, amdgpu_vm_fence_pts(sched_job->job_param.vm_mapping.vm,
sched_job->job_param.vm_mapping.start, sched_job->job_param.vm_mapping.start,
sched_job->job_param.vm_mapping.last + 1, sched_job->job_param.vm_mapping.last + 1,
sched_job->ibs[sched_job->num_ibs -1].fence); &sched_job->ibs[sched_job->num_ibs -1].fence->base);
if (fence) { if (fence) {
amdgpu_fence_unref(fence); fence_put(*fence);
*fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs -1].fence); *fence = fence_get(&sched_job->ibs[sched_job->num_ibs -1].fence->base);
} }
return 0; return 0;
} }
...@@ -781,7 +781,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, ...@@ -781,7 +781,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
struct amdgpu_vm *vm, struct amdgpu_vm *vm,
struct amdgpu_bo_va_mapping *mapping, struct amdgpu_bo_va_mapping *mapping,
uint64_t addr, uint32_t gtt_flags, uint64_t addr, uint32_t gtt_flags,
struct amdgpu_fence **fence) struct fence **fence)
{ {
struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
unsigned nptes, ncmds, ndw; unsigned nptes, ncmds, ndw;
...@@ -902,10 +902,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, ...@@ -902,10 +902,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
} }
amdgpu_vm_fence_pts(vm, mapping->it.start, amdgpu_vm_fence_pts(vm, mapping->it.start,
mapping->it.last + 1, ib->fence); mapping->it.last + 1, &ib->fence->base);
if (fence) { if (fence) {
amdgpu_fence_unref(fence); fence_put(*fence);
*fence = amdgpu_fence_ref(ib->fence); *fence = fence_get(&ib->fence->base);
} }
amdgpu_ib_free(adev, ib); amdgpu_ib_free(adev, ib);
...@@ -1038,7 +1038,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, ...@@ -1038,7 +1038,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
spin_unlock(&vm->status_lock); spin_unlock(&vm->status_lock);
if (bo_va) if (bo_va)
r = amdgpu_sync_fence(adev, sync, &bo_va->last_pt_update->base); r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
return r; return r;
} }
...@@ -1318,7 +1318,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, ...@@ -1318,7 +1318,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
kfree(mapping); kfree(mapping);
} }
amdgpu_fence_unref(&bo_va->last_pt_update); fence_put(bo_va->last_pt_update);
kfree(bo_va); kfree(bo_va);
mutex_unlock(&vm->mutex); mutex_unlock(&vm->mutex);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment