Commit cccd9bce authored by Monk Liu's avatar Monk Liu Committed by Alex Deucher

drm/amdgpu: get rid of incorrect TDR

original time out detect routine is incorrect, cuz it measures
the gap from job scheduled, but we should only measure the
gap from processed by hw.
Signed-off-by: default avatarMonk Liu <Monk.Liu@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 4835096b
...@@ -418,46 +418,18 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) ...@@ -418,46 +418,18 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
spin_unlock_irqrestore(&sched->job_list_lock, flags); spin_unlock_irqrestore(&sched->job_list_lock, flags);
amd_sched_fence_signal(s_fence); amd_sched_fence_signal(s_fence);
if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
cancel_delayed_work(&s_fence->dwork);
spin_lock_irqsave(&sched->fence_list_lock, flags);
list_del_init(&s_fence->list);
spin_unlock_irqrestore(&sched->fence_list_lock, flags);
}
trace_amd_sched_process_job(s_fence); trace_amd_sched_process_job(s_fence);
fence_put(&s_fence->base); fence_put(&s_fence->base);
wake_up_interruptible(&sched->wake_up_worker); wake_up_interruptible(&sched->wake_up_worker);
} }
static void amd_sched_fence_work_func(struct work_struct *work)
{
struct amd_sched_fence *s_fence =
container_of(work, struct amd_sched_fence, dwork.work);
struct amd_gpu_scheduler *sched = s_fence->sched;
struct amd_sched_fence *entity, *tmp;
unsigned long flags;
DRM_ERROR("[%s] scheduler is timeout!\n", sched->name);
/* Clean all pending fences */
spin_lock_irqsave(&sched->fence_list_lock, flags);
list_for_each_entry_safe(entity, tmp, &sched->fence_list, list) {
DRM_ERROR(" fence no %d\n", entity->base.seqno);
cancel_delayed_work(&entity->dwork);
list_del_init(&entity->list);
fence_put(&entity->base);
}
spin_unlock_irqrestore(&sched->fence_list_lock, flags);
}
static int amd_sched_main(void *param) static int amd_sched_main(void *param)
{ {
struct sched_param sparam = {.sched_priority = 1}; struct sched_param sparam = {.sched_priority = 1};
struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param; struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
int r, count; int r, count;
spin_lock_init(&sched->fence_list_lock);
INIT_LIST_HEAD(&sched->fence_list);
sched_setscheduler(current, SCHED_FIFO, &sparam); sched_setscheduler(current, SCHED_FIFO, &sparam);
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
...@@ -465,7 +437,6 @@ static int amd_sched_main(void *param) ...@@ -465,7 +437,6 @@ static int amd_sched_main(void *param)
struct amd_sched_fence *s_fence; struct amd_sched_fence *s_fence;
struct amd_sched_job *sched_job; struct amd_sched_job *sched_job;
struct fence *fence; struct fence *fence;
unsigned long flags;
wait_event_interruptible(sched->wake_up_worker, wait_event_interruptible(sched->wake_up_worker,
(entity = amd_sched_select_entity(sched)) || (entity = amd_sched_select_entity(sched)) ||
...@@ -480,14 +451,6 @@ static int amd_sched_main(void *param) ...@@ -480,14 +451,6 @@ static int amd_sched_main(void *param)
s_fence = sched_job->s_fence; s_fence = sched_job->s_fence;
if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
INIT_DELAYED_WORK(&s_fence->dwork, amd_sched_fence_work_func);
schedule_delayed_work(&s_fence->dwork, sched->timeout);
spin_lock_irqsave(&sched->fence_list_lock, flags);
list_add_tail(&s_fence->list, &sched->fence_list);
spin_unlock_irqrestore(&sched->fence_list_lock, flags);
}
atomic_inc(&sched->hw_rq_count); atomic_inc(&sched->hw_rq_count);
amd_sched_job_pre_schedule(sched, sched_job); amd_sched_job_pre_schedule(sched, sched_job);
fence = sched->ops->run_job(sched_job); fence = sched->ops->run_job(sched_job);
......
...@@ -74,8 +74,6 @@ struct amd_sched_fence { ...@@ -74,8 +74,6 @@ struct amd_sched_fence {
struct amd_gpu_scheduler *sched; struct amd_gpu_scheduler *sched;
spinlock_t lock; spinlock_t lock;
void *owner; void *owner;
struct delayed_work dwork;
struct list_head list;
struct amd_sched_job *s_job; struct amd_sched_job *s_job;
}; };
...@@ -127,8 +125,6 @@ struct amd_gpu_scheduler { ...@@ -127,8 +125,6 @@ struct amd_gpu_scheduler {
wait_queue_head_t wake_up_worker; wait_queue_head_t wake_up_worker;
wait_queue_head_t job_scheduled; wait_queue_head_t job_scheduled;
atomic_t hw_rq_count; atomic_t hw_rq_count;
struct list_head fence_list;
spinlock_t fence_list_lock;
struct task_struct *thread; struct task_struct *thread;
struct list_head ring_mirror_list; struct list_head ring_mirror_list;
spinlock_t job_list_lock; spinlock_t job_list_lock;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment