Commit 7484667c authored by Chunming Zhou's avatar Chunming Zhou Committed by Alex Deucher

drm/amdgpu: move sched job process from isr to fence callback

This way can avoid interrupt lost, and can process sched job exactly.
Signed-off-by: default avatarChunming Zhou <david1.zhou@amd.com>
Reviewed-by: default avatarJammy Zhou <Jammy.Zhou@amd.com>
parent 27f6642d
...@@ -404,7 +404,7 @@ struct amdgpu_fence_driver { ...@@ -404,7 +404,7 @@ struct amdgpu_fence_driver {
struct amdgpu_fence { struct amdgpu_fence {
struct fence base; struct fence base;
struct fence_cb cb;
/* RB, DMA, etc. */ /* RB, DMA, etc. */
struct amdgpu_ring *ring; struct amdgpu_ring *ring;
uint64_t seq; uint64_t seq;
......
...@@ -350,25 +350,8 @@ void amdgpu_fence_process(struct amdgpu_ring *ring) ...@@ -350,25 +350,8 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
} }
} while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq); } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
if (wake) { if (wake)
if (amdgpu_enable_scheduler) {
uint64_t handled_seq =
amd_sched_get_handled_seq(ring->scheduler);
uint64_t latest_seq =
atomic64_read(&ring->fence_drv.last_seq);
if (handled_seq == latest_seq) {
DRM_ERROR("ring %d, EOP without seq update (lastest_seq=%llu)\n",
ring->idx, latest_seq);
goto exit;
}
do {
amd_sched_isr(ring->scheduler);
} while (amd_sched_get_handled_seq(ring->scheduler) < latest_seq);
}
wake_up_all(&ring->fence_drv.fence_queue); wake_up_all(&ring->fence_drv.fence_queue);
}
exit:
spin_unlock_irqrestore(&ring->fence_lock, irqflags); spin_unlock_irqrestore(&ring->fence_lock, irqflags);
} }
......
...@@ -43,12 +43,20 @@ static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler *sched, ...@@ -43,12 +43,20 @@ static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler *sched,
return r; return r;
} }
static void amdgpu_fence_sched_cb(struct fence *f, struct fence_cb *cb)
{
struct amdgpu_fence *fence =
container_of(cb, struct amdgpu_fence, cb);
amd_sched_isr(fence->ring->scheduler);
}
static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched, static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
struct amd_context_entity *c_entity, struct amd_context_entity *c_entity,
void *job) void *job)
{ {
int r = 0; int r = 0;
struct amdgpu_cs_parser *sched_job = (struct amdgpu_cs_parser *)job; struct amdgpu_cs_parser *sched_job = (struct amdgpu_cs_parser *)job;
struct amdgpu_fence *fence;
mutex_lock(&sched_job->job_lock); mutex_lock(&sched_job->job_lock);
r = amdgpu_ib_schedule(sched_job->adev, r = amdgpu_ib_schedule(sched_job->adev,
...@@ -57,6 +65,11 @@ static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched, ...@@ -57,6 +65,11 @@ static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
sched_job->filp); sched_job->filp);
if (r) if (r)
goto err; goto err;
fence = sched_job->ibs[sched_job->num_ibs - 1].fence;
if (fence_add_callback(&fence->base,
&fence->cb, amdgpu_fence_sched_cb))
goto err;
if (sched_job->run_job) { if (sched_job->run_job) {
r = sched_job->run_job(sched_job); r = sched_job->run_job(sched_job);
if (r) if (r)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment