Commit 8d0a7cea authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: grab VMID before submitting job v5

This allows the scheduler to handle the dependencies on ID contention as well.

v2: grab id only once
v3: use a separate lock for the VMIDs
v4: cleanup after semaphore removal
v5: minor coding style change
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
Acked-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 165e4e07
...@@ -777,6 +777,7 @@ struct amdgpu_ib { ...@@ -777,6 +777,7 @@ struct amdgpu_ib {
struct amdgpu_ring *ring; struct amdgpu_ring *ring;
struct amdgpu_fence *fence; struct amdgpu_fence *fence;
struct amdgpu_user_fence *user; struct amdgpu_user_fence *user;
bool grabbed_vmid;
struct amdgpu_vm *vm; struct amdgpu_vm *vm;
struct amdgpu_ctx *ctx; struct amdgpu_ctx *ctx;
struct amdgpu_sync sync; struct amdgpu_sync sync;
...@@ -925,6 +926,9 @@ struct amdgpu_vm { ...@@ -925,6 +926,9 @@ struct amdgpu_vm {
}; };
struct amdgpu_vm_manager { struct amdgpu_vm_manager {
/* protecting IDs */
struct mutex lock;
struct { struct {
struct fence *active; struct fence *active;
atomic_long_t owner; atomic_long_t owner;
......
...@@ -1456,6 +1456,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, ...@@ -1456,6 +1456,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* mutex initialization are all done here so we /* mutex initialization are all done here so we
* can recall function without having locking issues */ * can recall function without having locking issues */
mutex_init(&adev->ring_lock); mutex_init(&adev->ring_lock);
mutex_init(&adev->vm_manager.lock);
atomic_set(&adev->irq.ih.lock, 0); atomic_set(&adev->irq.ih.lock, 0);
mutex_init(&adev->gem.mutex); mutex_init(&adev->gem.mutex);
mutex_init(&adev->pm.mutex); mutex_init(&adev->pm.mutex);
......
...@@ -142,21 +142,17 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, ...@@ -142,21 +142,17 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
return -EINVAL; return -EINVAL;
} }
if (vm && !ibs->grabbed_vmid) {
dev_err(adev->dev, "VM IB without ID\n");
return -EINVAL;
}
r = amdgpu_ring_lock(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs); r = amdgpu_ring_lock(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs);
if (r) { if (r) {
dev_err(adev->dev, "scheduling IB failed (%d).\n", r); dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
return r; return r;
} }
if (vm) {
/* grab a vm id if necessary */
r = amdgpu_vm_grab_id(ibs->vm, ibs->ring, &ibs->sync);
if (r) {
amdgpu_ring_unlock_undo(ring);
return r;
}
}
r = amdgpu_sync_wait(&ibs->sync); r = amdgpu_sync_wait(&ibs->sync);
if (r) { if (r) {
amdgpu_ring_unlock_undo(ring); amdgpu_ring_unlock_undo(ring);
...@@ -207,9 +203,6 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, ...@@ -207,9 +203,6 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
AMDGPU_FENCE_FLAG_64BIT); AMDGPU_FENCE_FLAG_64BIT);
} }
if (ib->vm)
amdgpu_vm_fence(adev, ib->vm, &ib->fence->base);
amdgpu_ring_unlock_commit(ring); amdgpu_ring_unlock_commit(ring);
return 0; return 0;
} }
......
...@@ -31,7 +31,31 @@ ...@@ -31,7 +31,31 @@
static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job) static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)
{ {
struct amdgpu_job *job = to_amdgpu_job(sched_job); struct amdgpu_job *job = to_amdgpu_job(sched_job);
return amdgpu_sync_get_fence(&job->ibs->sync); struct amdgpu_sync *sync = &job->ibs->sync;
struct amdgpu_vm *vm = job->ibs->vm;
struct fence *fence = amdgpu_sync_get_fence(sync);
if (fence == NULL && vm && !job->ibs->grabbed_vmid) {
struct amdgpu_ring *ring = job->ibs->ring;
struct amdgpu_device *adev = ring->adev;
int r;
mutex_lock(&adev->vm_manager.lock);
r = amdgpu_vm_grab_id(vm, ring, sync);
if (r) {
DRM_ERROR("Error getting VM ID (%d)\n", r);
} else {
fence = &job->base.s_fence->base;
amdgpu_vm_fence(ring->adev, vm, fence);
job->ibs->grabbed_vmid = true;
}
mutex_unlock(&adev->vm_manager.lock);
fence = amdgpu_sync_get_fence(sync);
}
return fence;
} }
static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job) static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment