Commit 3d651936 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: move dependency handling out of atomic section v2

This way the driver isn't limited in the dependency handling callback.

v2: remove extra check in amd_sched_entity_pop_job()
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
parent 393a0bd4
...@@ -30,8 +30,7 @@ ...@@ -30,8 +30,7 @@
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include "gpu_sched_trace.h" #include "gpu_sched_trace.h"
static struct amd_sched_job * static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
amd_sched_entity_pop_job(struct amd_sched_entity *entity);
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
struct kmem_cache *sched_fence_slab; struct kmem_cache *sched_fence_slab;
...@@ -64,36 +63,36 @@ static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, ...@@ -64,36 +63,36 @@ static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
} }
/** /**
* Select next job from a specified run queue with round robin policy. * Select an entity which could provide a job to run
* Return NULL if nothing available. *
* @rq The run queue to check.
*
* Try to find a ready entity, returns NULL if none found.
*/ */
static struct amd_sched_job * static struct amd_sched_entity *
amd_sched_rq_select_job(struct amd_sched_rq *rq) amd_sched_rq_select_entity(struct amd_sched_rq *rq)
{ {
struct amd_sched_entity *entity; struct amd_sched_entity *entity;
struct amd_sched_job *sched_job;
spin_lock(&rq->lock); spin_lock(&rq->lock);
entity = rq->current_entity; entity = rq->current_entity;
if (entity) { if (entity) {
list_for_each_entry_continue(entity, &rq->entities, list) { list_for_each_entry_continue(entity, &rq->entities, list) {
sched_job = amd_sched_entity_pop_job(entity); if (amd_sched_entity_is_ready(entity)) {
if (sched_job) {
rq->current_entity = entity; rq->current_entity = entity;
spin_unlock(&rq->lock); spin_unlock(&rq->lock);
return sched_job; return entity;
} }
} }
} }
list_for_each_entry(entity, &rq->entities, list) { list_for_each_entry(entity, &rq->entities, list) {
sched_job = amd_sched_entity_pop_job(entity); if (amd_sched_entity_is_ready(entity)) {
if (sched_job) {
rq->current_entity = entity; rq->current_entity = entity;
spin_unlock(&rq->lock); spin_unlock(&rq->lock);
return sched_job; return entity;
} }
if (entity == rq->current_entity) if (entity == rq->current_entity)
...@@ -176,6 +175,24 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity) ...@@ -176,6 +175,24 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
return false; return false;
} }
/**
* Check if entity is ready
*
* @entity The pointer to a valid scheduler entity
*
* Return true if entity could provide a job.
*/
static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
{
if (kfifo_is_empty(&entity->job_queue))
return false;
if (ACCESS_ONCE(entity->dependency))
return false;
return true;
}
/** /**
* Destroy a context entity * Destroy a context entity
* *
...@@ -252,9 +269,6 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity) ...@@ -252,9 +269,6 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity)
struct amd_gpu_scheduler *sched = entity->sched; struct amd_gpu_scheduler *sched = entity->sched;
struct amd_sched_job *sched_job; struct amd_sched_job *sched_job;
if (ACCESS_ONCE(entity->dependency))
return NULL;
if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
return NULL; return NULL;
...@@ -328,22 +342,22 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched) ...@@ -328,22 +342,22 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
} }
/** /**
* Select next to run * Select next entity to process
*/ */
static struct amd_sched_job * static struct amd_sched_entity *
amd_sched_select_job(struct amd_gpu_scheduler *sched) amd_sched_select_entity(struct amd_gpu_scheduler *sched)
{ {
struct amd_sched_job *sched_job; struct amd_sched_entity *entity;
if (!amd_sched_ready(sched)) if (!amd_sched_ready(sched))
return NULL; return NULL;
/* Kernel run queue has higher priority than normal run queue*/ /* Kernel run queue has higher priority than normal run queue*/
sched_job = amd_sched_rq_select_job(&sched->kernel_rq); entity = amd_sched_rq_select_entity(&sched->kernel_rq);
if (sched_job == NULL) if (entity == NULL)
sched_job = amd_sched_rq_select_job(&sched->sched_rq); entity = amd_sched_rq_select_entity(&sched->sched_rq);
return sched_job; return entity;
} }
static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
...@@ -405,13 +419,16 @@ static int amd_sched_main(void *param) ...@@ -405,13 +419,16 @@ static int amd_sched_main(void *param)
unsigned long flags; unsigned long flags;
wait_event_interruptible(sched->wake_up_worker, wait_event_interruptible(sched->wake_up_worker,
kthread_should_stop() || (entity = amd_sched_select_entity(sched)) ||
(sched_job = amd_sched_select_job(sched))); kthread_should_stop());
if (!entity)
continue;
sched_job = amd_sched_entity_pop_job(entity);
if (!sched_job) if (!sched_job)
continue; continue;
entity = sched_job->s_entity;
s_fence = sched_job->s_fence; s_fence = sched_job->s_fence;
if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment