Commit 10237448 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: make VMID owner none atomic v2

The variable is protected by the VMID mutex anyway.

v2: grab the mutex while resetting the VMID as well
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 3a80e92b
...@@ -267,7 +267,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm, ...@@ -267,7 +267,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
flushed = id->flushed_updates; flushed = id->flushed_updates;
if ((amdgpu_vmid_had_gpu_reset(adev, id)) || if ((amdgpu_vmid_had_gpu_reset(adev, id)) ||
(atomic64_read(&id->owner) != vm->entity.fence_context) || (id->owner != vm->entity.fence_context) ||
(job->vm_pd_addr != id->pd_gpu_addr) || (job->vm_pd_addr != id->pd_gpu_addr) ||
(updates && (!flushed || updates->context != flushed->context || (updates && (!flushed || updates->context != flushed->context ||
dma_fence_is_later(updates, flushed))) || dma_fence_is_later(updates, flushed))) ||
...@@ -296,7 +296,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm, ...@@ -296,7 +296,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
id->flushed_updates = dma_fence_get(updates); id->flushed_updates = dma_fence_get(updates);
} }
id->pd_gpu_addr = job->vm_pd_addr; id->pd_gpu_addr = job->vm_pd_addr;
atomic64_set(&id->owner, vm->entity.fence_context); id->owner = vm->entity.fence_context;
job->vm_needs_flush = needs_flush; job->vm_needs_flush = needs_flush;
if (needs_flush) { if (needs_flush) {
dma_fence_put(id->last_flush); dma_fence_put(id->last_flush);
...@@ -353,7 +353,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -353,7 +353,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (amdgpu_vmid_had_gpu_reset(adev, id)) if (amdgpu_vmid_had_gpu_reset(adev, id))
continue; continue;
if (atomic64_read(&id->owner) != vm->entity.fence_context) if (id->owner != vm->entity.fence_context)
continue; continue;
if (job->vm_pd_addr != id->pd_gpu_addr) if (job->vm_pd_addr != id->pd_gpu_addr)
...@@ -402,7 +402,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -402,7 +402,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
id->pd_gpu_addr = job->vm_pd_addr; id->pd_gpu_addr = job->vm_pd_addr;
dma_fence_put(id->flushed_updates); dma_fence_put(id->flushed_updates);
id->flushed_updates = dma_fence_get(updates); id->flushed_updates = dma_fence_get(updates);
atomic64_set(&id->owner, vm->entity.fence_context); id->owner = vm->entity.fence_context;
needs_flush: needs_flush:
job->vm_needs_flush = true; job->vm_needs_flush = true;
...@@ -482,13 +482,15 @@ void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub, ...@@ -482,13 +482,15 @@ void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct amdgpu_vmid *id = &id_mgr->ids[vmid]; struct amdgpu_vmid *id = &id_mgr->ids[vmid];
atomic64_set(&id->owner, 0); mutex_lock(&id_mgr->lock);
id->owner = 0;
id->gds_base = 0; id->gds_base = 0;
id->gds_size = 0; id->gds_size = 0;
id->gws_base = 0; id->gws_base = 0;
id->gws_size = 0; id->gws_size = 0;
id->oa_base = 0; id->oa_base = 0;
id->oa_size = 0; id->oa_size = 0;
mutex_unlock(&id_mgr->lock);
} }
/** /**
......
...@@ -43,7 +43,7 @@ struct amdgpu_vmid { ...@@ -43,7 +43,7 @@ struct amdgpu_vmid {
struct list_head list; struct list_head list;
struct amdgpu_sync active; struct amdgpu_sync active;
struct dma_fence *last_flush; struct dma_fence *last_flush;
atomic64_t owner; uint64_t owner;
uint64_t pd_gpu_addr; uint64_t pd_gpu_addr;
/* last flushed PD/PT update */ /* last flushed PD/PT update */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment