Commit 4835096b authored by Monk Liu's avatar Monk Liu Committed by Alex Deucher

drm/amdgpu: put job to list before done

the mirror_list will be used for later time out detect
feature.  This is needed to properly detect a GPU
timeout with the scheduler.
Signed-off-by: default avatarMonk Liu <Monk.Liu@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent e472d258
...@@ -349,12 +349,15 @@ int amd_sched_job_init(struct amd_sched_job *job, ...@@ -349,12 +349,15 @@ int amd_sched_job_init(struct amd_sched_job *job,
struct amd_sched_entity *entity, struct amd_sched_entity *entity,
void *owner, struct fence **fence) void *owner, struct fence **fence)
{ {
INIT_LIST_HEAD(&job->node);
job->sched = sched; job->sched = sched;
job->s_entity = entity; job->s_entity = entity;
job->s_fence = amd_sched_fence_create(entity, owner); job->s_fence = amd_sched_fence_create(entity, owner);
if (!job->s_fence) if (!job->s_fence)
return -ENOMEM; return -ENOMEM;
job->s_fence->s_job = job;
if (fence) if (fence)
*fence = &job->s_fence->base; *fence = &job->s_fence->base;
return 0; return 0;
...@@ -408,6 +411,12 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) ...@@ -408,6 +411,12 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
unsigned long flags; unsigned long flags;
atomic_dec(&sched->hw_rq_count); atomic_dec(&sched->hw_rq_count);
/* remove job from ring_mirror_list */
spin_lock_irqsave(&sched->job_list_lock, flags);
list_del_init(&s_fence->s_job->node);
spin_unlock_irqrestore(&sched->job_list_lock, flags);
amd_sched_fence_signal(s_fence); amd_sched_fence_signal(s_fence);
if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
cancel_delayed_work(&s_fence->dwork); cancel_delayed_work(&s_fence->dwork);
...@@ -480,6 +489,7 @@ static int amd_sched_main(void *param) ...@@ -480,6 +489,7 @@ static int amd_sched_main(void *param)
} }
atomic_inc(&sched->hw_rq_count); atomic_inc(&sched->hw_rq_count);
amd_sched_job_pre_schedule(sched, sched_job);
fence = sched->ops->run_job(sched_job); fence = sched->ops->run_job(sched_job);
amd_sched_fence_scheduled(s_fence); amd_sched_fence_scheduled(s_fence);
if (fence) { if (fence) {
...@@ -527,6 +537,8 @@ int amd_sched_init(struct amd_gpu_scheduler *sched, ...@@ -527,6 +537,8 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
init_waitqueue_head(&sched->wake_up_worker); init_waitqueue_head(&sched->wake_up_worker);
init_waitqueue_head(&sched->job_scheduled); init_waitqueue_head(&sched->job_scheduled);
INIT_LIST_HEAD(&sched->ring_mirror_list);
spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0); atomic_set(&sched->hw_rq_count, 0);
if (atomic_inc_return(&sched_fence_slab_ref) == 1) { if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
sched_fence_slab = kmem_cache_create( sched_fence_slab = kmem_cache_create(
......
...@@ -76,6 +76,7 @@ struct amd_sched_fence { ...@@ -76,6 +76,7 @@ struct amd_sched_fence {
void *owner; void *owner;
struct delayed_work dwork; struct delayed_work dwork;
struct list_head list; struct list_head list;
struct amd_sched_job *s_job;
}; };
struct amd_sched_job { struct amd_sched_job {
...@@ -85,6 +86,7 @@ struct amd_sched_job { ...@@ -85,6 +86,7 @@ struct amd_sched_job {
bool use_sched; /* true if the job goes to scheduler */ bool use_sched; /* true if the job goes to scheduler */
struct fence_cb cb_free_job; struct fence_cb cb_free_job;
struct work_struct work_free_job; struct work_struct work_free_job;
struct list_head node;
}; };
extern const struct fence_ops amd_sched_fence_ops; extern const struct fence_ops amd_sched_fence_ops;
...@@ -128,6 +130,8 @@ struct amd_gpu_scheduler { ...@@ -128,6 +130,8 @@ struct amd_gpu_scheduler {
struct list_head fence_list; struct list_head fence_list;
spinlock_t fence_list_lock; spinlock_t fence_list_lock;
struct task_struct *thread; struct task_struct *thread;
struct list_head ring_mirror_list;
spinlock_t job_list_lock;
}; };
int amd_sched_init(struct amd_gpu_scheduler *sched, int amd_sched_init(struct amd_gpu_scheduler *sched,
...@@ -151,4 +155,6 @@ int amd_sched_job_init(struct amd_sched_job *job, ...@@ -151,4 +155,6 @@ int amd_sched_job_init(struct amd_sched_job *job,
struct amd_gpu_scheduler *sched, struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity, struct amd_sched_entity *entity,
void *owner, struct fence **fence); void *owner, struct fence **fence);
void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
struct amd_sched_job *s_job);
#endif #endif
...@@ -57,6 +57,15 @@ void amd_sched_fence_signal(struct amd_sched_fence *fence) ...@@ -57,6 +57,15 @@ void amd_sched_fence_signal(struct amd_sched_fence *fence)
FENCE_TRACE(&fence->base, "was already signaled\n"); FENCE_TRACE(&fence->base, "was already signaled\n");
} }
void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
struct amd_sched_job *s_job)
{
unsigned long flags;
spin_lock_irqsave(&sched->job_list_lock, flags);
list_add_tail(&s_job->node, &sched->ring_mirror_list);
spin_unlock_irqrestore(&sched->job_list_lock, flags);
}
void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence) void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence)
{ {
struct fence_cb *cur, *tmp; struct fence_cb *cur, *tmp;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment