Commit 0f75aee7 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: cleanup entity init

Reorder the fields and properly return the kfifo_alloc error code.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarJunwei Zhang <Jerry.Zhang@amd.com>
Reviewed-by: Chunming Zhou<david1.zhou@amd.com>
parent a6db8a33
...@@ -118,23 +118,27 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, ...@@ -118,23 +118,27 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
struct amd_sched_rq *rq, struct amd_sched_rq *rq,
uint32_t jobs) uint32_t jobs)
{ {
int r;
if (!(sched && entity && rq)) if (!(sched && entity && rq))
return -EINVAL; return -EINVAL;
memset(entity, 0, sizeof(struct amd_sched_entity)); memset(entity, 0, sizeof(struct amd_sched_entity));
entity->belongto_rq = rq; INIT_LIST_HEAD(&entity->list);
entity->scheduler = sched; entity->rq = rq;
entity->fence_context = fence_context_alloc(1); entity->sched = sched;
if(kfifo_alloc(&entity->job_queue,
jobs * sizeof(void *),
GFP_KERNEL))
return -EINVAL;
spin_lock_init(&entity->queue_lock); spin_lock_init(&entity->queue_lock);
r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
if (r)
return r;
atomic_set(&entity->fence_seq, 0); atomic_set(&entity->fence_seq, 0);
entity->fence_context = fence_context_alloc(1);
/* Add the entity to the run queue */ /* Add the entity to the run queue */
amd_sched_rq_add_entity(rq, entity); amd_sched_rq_add_entity(rq, entity);
return 0; return 0;
} }
...@@ -149,8 +153,8 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, ...@@ -149,8 +153,8 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched, static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity) struct amd_sched_entity *entity)
{ {
return entity->scheduler == sched && return entity->sched == sched &&
entity->belongto_rq != NULL; entity->rq != NULL;
} }
/** /**
...@@ -180,7 +184,7 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity) ...@@ -180,7 +184,7 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity) struct amd_sched_entity *entity)
{ {
struct amd_sched_rq *rq = entity->belongto_rq; struct amd_sched_rq *rq = entity->rq;
if (!amd_sched_entity_is_initialized(sched, entity)) if (!amd_sched_entity_is_initialized(sched, entity))
return; return;
...@@ -201,13 +205,13 @@ static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb) ...@@ -201,13 +205,13 @@ static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
container_of(cb, struct amd_sched_entity, cb); container_of(cb, struct amd_sched_entity, cb);
entity->dependency = NULL; entity->dependency = NULL;
fence_put(f); fence_put(f);
amd_sched_wakeup(entity->scheduler); amd_sched_wakeup(entity->sched);
} }
static struct amd_sched_job * static struct amd_sched_job *
amd_sched_entity_pop_job(struct amd_sched_entity *entity) amd_sched_entity_pop_job(struct amd_sched_entity *entity)
{ {
struct amd_gpu_scheduler *sched = entity->scheduler; struct amd_gpu_scheduler *sched = entity->sched;
struct amd_sched_job *sched_job; struct amd_sched_job *sched_job;
if (ACCESS_ONCE(entity->dependency)) if (ACCESS_ONCE(entity->dependency))
...@@ -275,7 +279,7 @@ int amd_sched_entity_push_job(struct amd_sched_job *sched_job) ...@@ -275,7 +279,7 @@ int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
fence_get(&fence->base); fence_get(&fence->base);
sched_job->s_fence = fence; sched_job->s_fence = fence;
wait_event(entity->scheduler->job_scheduled, wait_event(entity->sched->job_scheduled,
amd_sched_entity_in(sched_job)); amd_sched_entity_in(sched_job));
trace_amd_sched_job(sched_job); trace_amd_sched_job(sched_job);
return 0; return 0;
......
...@@ -38,13 +38,15 @@ struct amd_sched_rq; ...@@ -38,13 +38,15 @@ struct amd_sched_rq;
*/ */
struct amd_sched_entity { struct amd_sched_entity {
struct list_head list; struct list_head list;
struct amd_sched_rq *belongto_rq; struct amd_sched_rq *rq;
atomic_t fence_seq; struct amd_gpu_scheduler *sched;
/* the job_queue maintains the jobs submitted by clients */
struct kfifo job_queue;
spinlock_t queue_lock; spinlock_t queue_lock;
struct amd_gpu_scheduler *scheduler; struct kfifo job_queue;
atomic_t fence_seq;
uint64_t fence_context; uint64_t fence_context;
struct fence *dependency; struct fence *dependency;
struct fence_cb cb; struct fence_cb cb;
}; };
......
...@@ -36,7 +36,7 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity ...@@ -36,7 +36,7 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
if (fence == NULL) if (fence == NULL)
return NULL; return NULL;
fence->owner = owner; fence->owner = owner;
fence->scheduler = s_entity->scheduler; fence->scheduler = s_entity->sched;
spin_lock_init(&fence->lock); spin_lock_init(&fence->lock);
seq = atomic_inc_return(&s_entity->fence_seq); seq = atomic_inc_return(&s_entity->fence_seq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment