Commit 0de2479c authored by Monk Liu's avatar Monk Liu Committed by Alex Deucher

drm/amdgpu: rework TDR in scheduler (v2)

Add two callbacks to scheduler to maintain jobs, and invoked for
job timeout calculations. Now TDR measures time gap from
job is processed by hw.

v2:
fix typo
Signed-off-by: default avatarMonk Liu <Monk.Liu@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent cccd9bce
...@@ -754,6 +754,7 @@ void amdgpu_job_free(struct amdgpu_job *job); ...@@ -754,6 +754,7 @@ void amdgpu_job_free(struct amdgpu_job *job);
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct amd_sched_entity *entity, void *owner, struct amd_sched_entity *entity, void *owner,
struct fence **f); struct fence **f);
void amdgpu_job_timeout_func(struct work_struct *work);
struct amdgpu_ring { struct amdgpu_ring {
struct amdgpu_device *adev; struct amdgpu_device *adev;
......
...@@ -871,6 +871,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, ...@@ -871,6 +871,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
r = amd_sched_job_init(&job->base, &ring->sched, r = amd_sched_job_init(&job->base, &ring->sched,
&p->ctx->rings[ring->idx].entity, &p->ctx->rings[ring->idx].entity,
amdgpu_job_timeout_func,
p->filp, &fence); p->filp, &fence);
if (r) { if (r) {
amdgpu_job_free(job); amdgpu_job_free(job);
......
...@@ -34,6 +34,15 @@ static void amdgpu_job_free_handler(struct work_struct *ws) ...@@ -34,6 +34,15 @@ static void amdgpu_job_free_handler(struct work_struct *ws)
kfree(job); kfree(job);
} }
void amdgpu_job_timeout_func(struct work_struct *work)
{
struct amdgpu_job *job = container_of(work, struct amdgpu_job, base.work_tdr.work);
DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
job->base.sched->name,
(uint32_t)atomic_read(&job->ring->fence_drv.last_seq),
job->ring->fence_drv.sync_seq);
}
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
struct amdgpu_job **job) struct amdgpu_job **job)
{ {
...@@ -103,7 +112,10 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, ...@@ -103,7 +112,10 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
if (!f) if (!f)
return -EINVAL; return -EINVAL;
r = amd_sched_job_init(&job->base, &ring->sched, entity, owner, &fence); r = amd_sched_job_init(&job->base, &ring->sched,
entity, owner,
amdgpu_job_timeout_func,
&fence);
if (r) if (r)
return r; return r;
...@@ -180,4 +192,6 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job) ...@@ -180,4 +192,6 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
struct amd_sched_backend_ops amdgpu_sched_ops = { struct amd_sched_backend_ops amdgpu_sched_ops = {
.dependency = amdgpu_job_dependency, .dependency = amdgpu_job_dependency,
.run_job = amdgpu_job_run, .run_job = amdgpu_job_run,
.begin_job = amd_sched_job_begin,
.finish_job = amd_sched_job_finish,
}; };
...@@ -324,6 +324,40 @@ static void amd_sched_free_job(struct fence *f, struct fence_cb *cb) { ...@@ -324,6 +324,40 @@ static void amd_sched_free_job(struct fence *f, struct fence_cb *cb) {
schedule_work(&job->work_free_job); schedule_work(&job->work_free_job);
} }
/* job_finish is called after hw fence signaled, and
* the job had already been deleted from ring_mirror_list
*/
void amd_sched_job_finish(struct amd_sched_job *s_job)
{
struct amd_sched_job *next;
struct amd_gpu_scheduler *sched = s_job->sched;
if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
cancel_delayed_work(&s_job->work_tdr); /*TODO: how to deal the case that tdr is running */
/* queue TDR for next job */
next = list_first_entry_or_null(&sched->ring_mirror_list,
struct amd_sched_job, node);
if (next) {
INIT_DELAYED_WORK(&next->work_tdr, s_job->timeout_callback);
schedule_delayed_work(&next->work_tdr, sched->timeout);
}
}
}
void amd_sched_job_begin(struct amd_sched_job *s_job)
{
struct amd_gpu_scheduler *sched = s_job->sched;
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
list_first_entry_or_null(&sched->ring_mirror_list, struct amd_sched_job, node) == s_job)
{
INIT_DELAYED_WORK(&s_job->work_tdr, s_job->timeout_callback);
schedule_delayed_work(&s_job->work_tdr, sched->timeout);
}
}
/** /**
* Submit a job to the job queue * Submit a job to the job queue
* *
...@@ -347,6 +381,7 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job) ...@@ -347,6 +381,7 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
int amd_sched_job_init(struct amd_sched_job *job, int amd_sched_job_init(struct amd_sched_job *job,
struct amd_gpu_scheduler *sched, struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity, struct amd_sched_entity *entity,
void (*timeout_cb)(struct work_struct *work),
void *owner, struct fence **fence) void *owner, struct fence **fence)
{ {
INIT_LIST_HEAD(&job->node); INIT_LIST_HEAD(&job->node);
...@@ -357,6 +392,7 @@ int amd_sched_job_init(struct amd_sched_job *job, ...@@ -357,6 +392,7 @@ int amd_sched_job_init(struct amd_sched_job *job,
return -ENOMEM; return -ENOMEM;
job->s_fence->s_job = job; job->s_fence->s_job = job;
job->timeout_callback = timeout_cb;
if (fence) if (fence)
*fence = &job->s_fence->base; *fence = &job->s_fence->base;
...@@ -415,6 +451,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) ...@@ -415,6 +451,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
/* remove job from ring_mirror_list */ /* remove job from ring_mirror_list */
spin_lock_irqsave(&sched->job_list_lock, flags); spin_lock_irqsave(&sched->job_list_lock, flags);
list_del_init(&s_fence->s_job->node); list_del_init(&s_fence->s_job->node);
sched->ops->finish_job(s_fence->s_job);
spin_unlock_irqrestore(&sched->job_list_lock, flags); spin_unlock_irqrestore(&sched->job_list_lock, flags);
amd_sched_fence_signal(s_fence); amd_sched_fence_signal(s_fence);
......
...@@ -85,6 +85,8 @@ struct amd_sched_job { ...@@ -85,6 +85,8 @@ struct amd_sched_job {
struct fence_cb cb_free_job; struct fence_cb cb_free_job;
struct work_struct work_free_job; struct work_struct work_free_job;
struct list_head node; struct list_head node;
struct delayed_work work_tdr;
void (*timeout_callback) (struct work_struct *work);
}; };
extern const struct fence_ops amd_sched_fence_ops; extern const struct fence_ops amd_sched_fence_ops;
...@@ -105,6 +107,8 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f) ...@@ -105,6 +107,8 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
struct amd_sched_backend_ops { struct amd_sched_backend_ops {
struct fence *(*dependency)(struct amd_sched_job *sched_job); struct fence *(*dependency)(struct amd_sched_job *sched_job);
struct fence *(*run_job)(struct amd_sched_job *sched_job); struct fence *(*run_job)(struct amd_sched_job *sched_job);
void (*begin_job)(struct amd_sched_job *sched_job);
void (*finish_job)(struct amd_sched_job *sched_job);
}; };
enum amd_sched_priority { enum amd_sched_priority {
...@@ -150,7 +154,10 @@ void amd_sched_fence_signal(struct amd_sched_fence *fence); ...@@ -150,7 +154,10 @@ void amd_sched_fence_signal(struct amd_sched_fence *fence);
int amd_sched_job_init(struct amd_sched_job *job, int amd_sched_job_init(struct amd_sched_job *job,
struct amd_gpu_scheduler *sched, struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity, struct amd_sched_entity *entity,
void (*timeout_cb)(struct work_struct *work),
void *owner, struct fence **fence); void *owner, struct fence **fence);
void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched , void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
struct amd_sched_job *s_job); struct amd_sched_job *s_job);
void amd_sched_job_finish(struct amd_sched_job *s_job);
void amd_sched_job_begin(struct amd_sched_job *s_job);
#endif #endif
...@@ -63,6 +63,7 @@ void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched , ...@@ -63,6 +63,7 @@ void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&sched->job_list_lock, flags); spin_lock_irqsave(&sched->job_list_lock, flags);
list_add_tail(&s_job->node, &sched->ring_mirror_list); list_add_tail(&s_job->node, &sched->ring_mirror_list);
sched->ops->begin_job(s_job);
spin_unlock_irqrestore(&sched->job_list_lock, flags); spin_unlock_irqrestore(&sched->job_list_lock, flags);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment