Commit cebb52b7 authored by Andrey Grodzovsky's avatar Andrey Grodzovsky Committed by Alex Deucher

drm/amdgpu: Get rid of dep_sync as a seperate object.

Instead mark fence as explicit in it's amdgpu_sync_entry.

v2:
Fix use after free bug and add new parameter description.
Signed-off-by: default avatarAndrey Grodzovsky <andrey.grodzovsky@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 97489129
...@@ -1121,7 +1121,6 @@ struct amdgpu_job { ...@@ -1121,7 +1121,6 @@ struct amdgpu_job {
struct amdgpu_vm *vm; struct amdgpu_vm *vm;
struct amdgpu_ring *ring; struct amdgpu_ring *ring;
struct amdgpu_sync sync; struct amdgpu_sync sync;
struct amdgpu_sync dep_sync;
struct amdgpu_sync sched_sync; struct amdgpu_sync sched_sync;
struct amdgpu_ib *ibs; struct amdgpu_ib *ibs;
struct dma_fence *fence; /* the hw fence */ struct dma_fence *fence; /* the hw fence */
......
...@@ -786,7 +786,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) ...@@ -786,7 +786,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
return r; return r;
r = amdgpu_sync_fence(adev, &p->job->sync, r = amdgpu_sync_fence(adev, &p->job->sync,
fpriv->prt_va->last_pt_update); fpriv->prt_va->last_pt_update, false);
if (r) if (r)
return r; return r;
...@@ -800,7 +800,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) ...@@ -800,7 +800,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
return r; return r;
f = bo_va->last_pt_update; f = bo_va->last_pt_update;
r = amdgpu_sync_fence(adev, &p->job->sync, f); r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
if (r) if (r)
return r; return r;
} }
...@@ -823,7 +823,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) ...@@ -823,7 +823,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
return r; return r;
f = bo_va->last_pt_update; f = bo_va->last_pt_update;
r = amdgpu_sync_fence(adev, &p->job->sync, f); r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
if (r) if (r)
return r; return r;
} }
...@@ -834,7 +834,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) ...@@ -834,7 +834,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
if (r) if (r)
return r; return r;
r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update); r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update, false);
if (r) if (r)
return r; return r;
...@@ -1038,8 +1038,8 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, ...@@ -1038,8 +1038,8 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
amdgpu_ctx_put(ctx); amdgpu_ctx_put(ctx);
return r; return r;
} else if (fence) { } else if (fence) {
r = amdgpu_sync_fence(p->adev, &p->job->dep_sync, r = amdgpu_sync_fence(p->adev, &p->job->sync, fence,
fence); true);
dma_fence_put(fence); dma_fence_put(fence);
amdgpu_ctx_put(ctx); amdgpu_ctx_put(ctx);
if (r) if (r)
...@@ -1058,7 +1058,7 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p, ...@@ -1058,7 +1058,7 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
if (r) if (r)
return r; return r;
r = amdgpu_sync_fence(p->adev, &p->job->dep_sync, fence); r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
dma_fence_put(fence); dma_fence_put(fence);
return r; return r;
......
...@@ -164,7 +164,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, ...@@ -164,7 +164,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
} }
if (ring->funcs->emit_pipeline_sync && job && if (ring->funcs->emit_pipeline_sync && job &&
((tmp = amdgpu_sync_get_fence(&job->sched_sync)) || ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) ||
amdgpu_vm_need_pipeline_sync(ring, job))) { amdgpu_vm_need_pipeline_sync(ring, job))) {
need_pipe_sync = true; need_pipe_sync = true;
dma_fence_put(tmp); dma_fence_put(tmp);
......
...@@ -60,7 +60,6 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, ...@@ -60,7 +60,6 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
(*job)->num_ibs = num_ibs; (*job)->num_ibs = num_ibs;
amdgpu_sync_create(&(*job)->sync); amdgpu_sync_create(&(*job)->sync);
amdgpu_sync_create(&(*job)->dep_sync);
amdgpu_sync_create(&(*job)->sched_sync); amdgpu_sync_create(&(*job)->sched_sync);
(*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter); (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
...@@ -104,7 +103,6 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job) ...@@ -104,7 +103,6 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
amdgpu_ring_priority_put(job->ring, s_job->s_priority); amdgpu_ring_priority_put(job->ring, s_job->s_priority);
dma_fence_put(job->fence); dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync); amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->dep_sync);
amdgpu_sync_free(&job->sched_sync); amdgpu_sync_free(&job->sched_sync);
kfree(job); kfree(job);
} }
...@@ -115,7 +113,6 @@ void amdgpu_job_free(struct amdgpu_job *job) ...@@ -115,7 +113,6 @@ void amdgpu_job_free(struct amdgpu_job *job)
dma_fence_put(job->fence); dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync); amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->dep_sync);
amdgpu_sync_free(&job->sched_sync); amdgpu_sync_free(&job->sched_sync);
kfree(job); kfree(job);
} }
...@@ -149,17 +146,18 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job, ...@@ -149,17 +146,18 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job,
{ {
struct amdgpu_job *job = to_amdgpu_job(sched_job); struct amdgpu_job *job = to_amdgpu_job(sched_job);
struct amdgpu_vm *vm = job->vm; struct amdgpu_vm *vm = job->vm;
bool explicit = false;
struct dma_fence *fence = amdgpu_sync_get_fence(&job->dep_sync);
int r; int r;
struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync, &explicit);
if (fence && explicit) {
if (amd_sched_dependency_optimized(fence, s_entity)) { if (amd_sched_dependency_optimized(fence, s_entity)) {
r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence); r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence, false);
if (r) if (r)
DRM_ERROR("Error adding fence to sync (%d)\n", r); DRM_ERROR("Error adding fence to sync (%d)\n", r);
} }
if (!fence) }
fence = amdgpu_sync_get_fence(&job->sync);
while (fence == NULL && vm && !job->vm_id) { while (fence == NULL && vm && !job->vm_id) {
struct amdgpu_ring *ring = job->ring; struct amdgpu_ring *ring = job->ring;
...@@ -169,7 +167,7 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job, ...@@ -169,7 +167,7 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job,
if (r) if (r)
DRM_ERROR("Error getting VM ID (%d)\n", r); DRM_ERROR("Error getting VM ID (%d)\n", r);
fence = amdgpu_sync_get_fence(&job->sync); fence = amdgpu_sync_get_fence(&job->sync, NULL);
} }
return fence; return fence;
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
struct amdgpu_sync_entry { struct amdgpu_sync_entry {
struct hlist_node node; struct hlist_node node;
struct dma_fence *fence; struct dma_fence *fence;
bool explicit;
}; };
static struct kmem_cache *amdgpu_sync_slab; static struct kmem_cache *amdgpu_sync_slab;
...@@ -141,7 +142,7 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f) ...@@ -141,7 +142,7 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
* *
*/ */
int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
struct dma_fence *f) struct dma_fence *f, bool explicit)
{ {
struct amdgpu_sync_entry *e; struct amdgpu_sync_entry *e;
...@@ -159,6 +160,8 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, ...@@ -159,6 +160,8 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
if (!e) if (!e)
return -ENOMEM; return -ENOMEM;
e->explicit = explicit;
hash_add(sync->fences, &e->node, f->context); hash_add(sync->fences, &e->node, f->context);
e->fence = dma_fence_get(f); e->fence = dma_fence_get(f);
return 0; return 0;
...@@ -189,7 +192,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, ...@@ -189,7 +192,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
/* always sync to the exclusive fence */ /* always sync to the exclusive fence */
f = reservation_object_get_excl(resv); f = reservation_object_get_excl(resv);
r = amdgpu_sync_fence(adev, sync, f); r = amdgpu_sync_fence(adev, sync, f, false);
if (explicit_sync) if (explicit_sync)
return r; return r;
...@@ -220,7 +223,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, ...@@ -220,7 +223,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
continue; continue;
} }
r = amdgpu_sync_fence(adev, sync, f); r = amdgpu_sync_fence(adev, sync, f, false);
if (r) if (r)
break; break;
} }
...@@ -275,19 +278,21 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, ...@@ -275,19 +278,21 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
* amdgpu_sync_get_fence - get the next fence from the sync object * amdgpu_sync_get_fence - get the next fence from the sync object
* *
* @sync: sync object to use * @sync: sync object to use
* @explicit: true if the next fence is explicit
* *
* Get and removes the next fence from the sync object not signaled yet. * Get and removes the next fence from the sync object not signaled yet.
*/ */
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit)
{ {
struct amdgpu_sync_entry *e; struct amdgpu_sync_entry *e;
struct hlist_node *tmp; struct hlist_node *tmp;
struct dma_fence *f; struct dma_fence *f;
int i; int i;
hash_for_each_safe(sync->fences, i, tmp, e, node) { hash_for_each_safe(sync->fences, i, tmp, e, node) {
f = e->fence; f = e->fence;
if (explicit)
*explicit = e->explicit;
hash_del(&e->node); hash_del(&e->node);
kmem_cache_free(amdgpu_sync_slab, e); kmem_cache_free(amdgpu_sync_slab, e);
......
...@@ -41,7 +41,7 @@ struct amdgpu_sync { ...@@ -41,7 +41,7 @@ struct amdgpu_sync {
void amdgpu_sync_create(struct amdgpu_sync *sync); void amdgpu_sync_create(struct amdgpu_sync *sync);
int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
struct dma_fence *f); struct dma_fence *f, bool explicit);
int amdgpu_sync_resv(struct amdgpu_device *adev, int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync, struct amdgpu_sync *sync,
struct reservation_object *resv, struct reservation_object *resv,
...@@ -49,7 +49,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, ...@@ -49,7 +49,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
bool explicit_sync); bool explicit_sync);
struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring); struct amdgpu_ring *ring);
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync, bool *explicit);
int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr); int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr);
void amdgpu_sync_free(struct amdgpu_sync *sync); void amdgpu_sync_free(struct amdgpu_sync *sync);
int amdgpu_sync_init(void); int amdgpu_sync_init(void);
......
...@@ -488,7 +488,7 @@ static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm, ...@@ -488,7 +488,7 @@ static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm,
id->pd_gpu_addr = 0; id->pd_gpu_addr = 0;
tmp = amdgpu_sync_peek_fence(&id->active, ring); tmp = amdgpu_sync_peek_fence(&id->active, ring);
if (tmp) { if (tmp) {
r = amdgpu_sync_fence(adev, sync, tmp); r = amdgpu_sync_fence(adev, sync, tmp, false);
return r; return r;
} }
} }
...@@ -496,7 +496,7 @@ static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm, ...@@ -496,7 +496,7 @@ static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm,
/* Good we can use this VMID. Remember this submission as /* Good we can use this VMID. Remember this submission as
* user of the VMID. * user of the VMID.
*/ */
r = amdgpu_sync_fence(ring->adev, &id->active, fence); r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
if (r) if (r)
goto out; goto out;
...@@ -583,7 +583,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -583,7 +583,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
} }
r = amdgpu_sync_fence(ring->adev, sync, &array->base); r = amdgpu_sync_fence(ring->adev, sync, &array->base, false);
dma_fence_put(&array->base); dma_fence_put(&array->base);
if (r) if (r)
goto error; goto error;
...@@ -626,7 +626,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -626,7 +626,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
/* Good we can use this VMID. Remember this submission as /* Good we can use this VMID. Remember this submission as
* user of the VMID. * user of the VMID.
*/ */
r = amdgpu_sync_fence(ring->adev, &id->active, fence); r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
if (r) if (r)
goto error; goto error;
...@@ -646,7 +646,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -646,7 +646,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
id = idle; id = idle;
/* Remember this submission as user of the VMID */ /* Remember this submission as user of the VMID */
r = amdgpu_sync_fence(ring->adev, &id->active, fence); r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
if (r) if (r)
goto error; goto error;
...@@ -1657,7 +1657,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, ...@@ -1657,7 +1657,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
addr = 0; addr = 0;
} }
r = amdgpu_sync_fence(adev, &job->sync, exclusive); r = amdgpu_sync_fence(adev, &job->sync, exclusive, false);
if (r) if (r)
goto error_free; goto error_free;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment