Commit 70102d77 authored by Christian König's avatar Christian König

drm/scheduler: add drm_sched_entity_error and use rcu for last_scheduled

Switch to using RCU handling for the last scheduled job and add a
function to return the error code of it.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarLuben Tuikov <luben.tuikov@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230420115752.31470-2-christian.koenig@amd.com
parent 539f9ee4
...@@ -72,7 +72,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, ...@@ -72,7 +72,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
entity->num_sched_list = num_sched_list; entity->num_sched_list = num_sched_list;
entity->priority = priority; entity->priority = priority;
entity->sched_list = num_sched_list > 1 ? sched_list : NULL; entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
entity->last_scheduled = NULL; RCU_INIT_POINTER(entity->last_scheduled, NULL);
RB_CLEAR_NODE(&entity->rb_tree_node); RB_CLEAR_NODE(&entity->rb_tree_node);
if(num_sched_list) if(num_sched_list)
...@@ -140,6 +140,27 @@ bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) ...@@ -140,6 +140,27 @@ bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
return true; return true;
} }
/**
* drm_sched_entity_error - return error of last scheduled job
* @entity: scheduler entity to check
*
* Opportunistically return the error of the last scheduled job. Result can
* change any time when new jobs are pushed to the hw.
*/
int drm_sched_entity_error(struct drm_sched_entity *entity)
{
struct dma_fence *fence;
int r;
rcu_read_lock();
fence = rcu_dereference(entity->last_scheduled);
r = fence ? fence->error : 0;
rcu_read_unlock();
return r;
}
EXPORT_SYMBOL(drm_sched_entity_error);
static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk) static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
{ {
struct drm_sched_job *job = container_of(wrk, typeof(*job), work); struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
...@@ -191,7 +212,9 @@ static void drm_sched_entity_kill(struct drm_sched_entity *entity) ...@@ -191,7 +212,9 @@ static void drm_sched_entity_kill(struct drm_sched_entity *entity)
/* Make sure this entity is not used by the scheduler at the moment */ /* Make sure this entity is not used by the scheduler at the moment */
wait_for_completion(&entity->entity_idle); wait_for_completion(&entity->entity_idle);
prev = dma_fence_get(entity->last_scheduled); /* The entity is guaranteed to not be used by the scheduler */
prev = rcu_dereference_check(entity->last_scheduled, true);
dma_fence_get(prev);
while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
struct drm_sched_fence *s_fence = job->s_fence; struct drm_sched_fence *s_fence = job->s_fence;
...@@ -278,8 +301,8 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity) ...@@ -278,8 +301,8 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
entity->dependency = NULL; entity->dependency = NULL;
} }
dma_fence_put(entity->last_scheduled); dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
entity->last_scheduled = NULL; RCU_INIT_POINTER(entity->last_scheduled, NULL);
} }
EXPORT_SYMBOL(drm_sched_entity_fini); EXPORT_SYMBOL(drm_sched_entity_fini);
...@@ -421,9 +444,9 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) ...@@ -421,9 +444,9 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
if (entity->guilty && atomic_read(entity->guilty)) if (entity->guilty && atomic_read(entity->guilty))
dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED); dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
dma_fence_put(entity->last_scheduled); dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
rcu_assign_pointer(entity->last_scheduled,
entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished); dma_fence_get(&sched_job->s_fence->finished));
/* /*
* If the queue is empty we allow drm_sched_entity_select_rq() to * If the queue is empty we allow drm_sched_entity_select_rq() to
...@@ -477,7 +500,7 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity) ...@@ -477,7 +500,7 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
*/ */
smp_rmb(); smp_rmb();
fence = entity->last_scheduled; fence = rcu_dereference_check(entity->last_scheduled, true);
/* stay on the same engine if the previous job hasn't finished */ /* stay on the same engine if the previous job hasn't finished */
if (fence && !dma_fence_is_signaled(fence)) if (fence && !dma_fence_is_signaled(fence))
......
...@@ -201,7 +201,7 @@ struct drm_sched_entity { ...@@ -201,7 +201,7 @@ struct drm_sched_entity {
* by the scheduler thread, can be accessed locklessly from * by the scheduler thread, can be accessed locklessly from
* drm_sched_job_arm() iff the queue is empty. * drm_sched_job_arm() iff the queue is empty.
*/ */
struct dma_fence *last_scheduled; struct dma_fence __rcu *last_scheduled;
/** /**
* @last_user: last group leader pushing a job into the entity. * @last_user: last group leader pushing a job into the entity.
...@@ -588,6 +588,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job); ...@@ -588,6 +588,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
void drm_sched_entity_set_priority(struct drm_sched_entity *entity, void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
enum drm_sched_priority priority); enum drm_sched_priority priority);
bool drm_sched_entity_is_ready(struct drm_sched_entity *entity); bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
int drm_sched_entity_error(struct drm_sched_entity *entity);
void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence, void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence,
struct dma_fence *fence); struct dma_fence *fence);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment