Commit 7a36dcfa authored by Matthew Brost's avatar Matthew Brost Committed by Luben Tuikov

drm/sched: Add drm_sched_start_timeout_unlocked helper

Also add a lockdep assert to drm_sched_start_timeout.
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarLuben Tuikov <luben.tuikov@amd.com>
Link: https://lore.kernel.org/r/20231031032439.1558703-5-matthew.brost@intel.comSigned-off-by: default avatarLuben Tuikov <ltuikov89@gmail.com>
parent f7fe64ad
......@@ -334,11 +334,20 @@ static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
*/
static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
{
lockdep_assert_held(&sched->job_list_lock);
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
!list_empty(&sched->pending_list))
queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
}
static void drm_sched_start_timeout_unlocked(struct drm_gpu_scheduler *sched)
{
spin_lock(&sched->job_list_lock);
drm_sched_start_timeout(sched);
spin_unlock(&sched->job_list_lock);
}
/**
* drm_sched_fault - immediately start timeout handler
*
......@@ -451,11 +460,8 @@ static void drm_sched_job_timedout(struct work_struct *work)
spin_unlock(&sched->job_list_lock);
}
if (status != DRM_GPU_SCHED_STAT_ENODEV) {
spin_lock(&sched->job_list_lock);
drm_sched_start_timeout(sched);
spin_unlock(&sched->job_list_lock);
}
if (status != DRM_GPU_SCHED_STAT_ENODEV)
drm_sched_start_timeout_unlocked(sched);
}
/**
......@@ -581,11 +587,8 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
drm_sched_job_done(s_job, -ECANCELED);
}
if (full_recovery) {
spin_lock(&sched->job_list_lock);
drm_sched_start_timeout(sched);
spin_unlock(&sched->job_list_lock);
}
if (full_recovery)
drm_sched_start_timeout_unlocked(sched);
drm_sched_wqueue_start(sched);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment