Commit 8935ff00 authored by Luben Tuikov's avatar Luben Tuikov Committed by Christian König

drm/scheduler: "node" --> "list"

Rename "node" to "list" in struct drm_sched_job,
in order to make it consistent with what we see
being used throughout gpu_scheduler.h, for
instance in struct drm_sched_entity, as well as
the rest of DRM and the kernel.
Signed-off-by: default avatarLuben Tuikov <luben.tuikov@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/403515/

Cc: Alexander Deucher <Alexander.Deucher@amd.com>
Cc: Andrey Grodzovsky <Andrey.Grodzovsky@amd.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
parent 2e2bf3a5
...@@ -1427,7 +1427,7 @@ static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched) ...@@ -1427,7 +1427,7 @@ static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
struct dma_fence *fence; struct dma_fence *fence;
spin_lock(&sched->job_list_lock); spin_lock(&sched->job_list_lock);
list_for_each_entry(s_job, &sched->ring_mirror_list, node) { list_for_each_entry(s_job, &sched->ring_mirror_list, list) {
fence = sched->ops->run_job(s_job); fence = sched->ops->run_job(s_job);
dma_fence_put(fence); dma_fence_put(fence);
} }
...@@ -1459,10 +1459,10 @@ static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring) ...@@ -1459,10 +1459,10 @@ static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
no_preempt: no_preempt:
spin_lock(&sched->job_list_lock); spin_lock(&sched->job_list_lock);
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, list) {
if (dma_fence_is_signaled(&s_job->s_fence->finished)) { if (dma_fence_is_signaled(&s_job->s_fence->finished)) {
/* remove job from ring_mirror_list */ /* remove job from ring_mirror_list */
list_del_init(&s_job->node); list_del_init(&s_job->list);
sched->ops->free_job(s_job); sched->ops->free_job(s_job);
continue; continue;
} }
......
...@@ -4128,7 +4128,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev) ...@@ -4128,7 +4128,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
spin_lock(&ring->sched.job_list_lock); spin_lock(&ring->sched.job_list_lock);
job = list_first_entry_or_null(&ring->sched.ring_mirror_list, job = list_first_entry_or_null(&ring->sched.ring_mirror_list,
struct drm_sched_job, node); struct drm_sched_job, list);
spin_unlock(&ring->sched.job_list_lock); spin_unlock(&ring->sched.job_list_lock);
if (job) if (job)
return true; return true;
......
...@@ -271,7 +271,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) ...@@ -271,7 +271,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
} }
/* Signal all jobs already scheduled to HW */ /* Signal all jobs already scheduled to HW */
list_for_each_entry(s_job, &sched->ring_mirror_list, node) { list_for_each_entry(s_job, &sched->ring_mirror_list, list) {
struct drm_sched_fence *s_fence = s_job->s_fence; struct drm_sched_fence *s_fence = s_job->s_fence;
dma_fence_set_error(&s_fence->finished, -EHWPOISON); dma_fence_set_error(&s_fence->finished, -EHWPOISON);
......
...@@ -272,7 +272,7 @@ static void drm_sched_job_begin(struct drm_sched_job *s_job) ...@@ -272,7 +272,7 @@ static void drm_sched_job_begin(struct drm_sched_job *s_job)
struct drm_gpu_scheduler *sched = s_job->sched; struct drm_gpu_scheduler *sched = s_job->sched;
spin_lock(&sched->job_list_lock); spin_lock(&sched->job_list_lock);
list_add_tail(&s_job->node, &sched->ring_mirror_list); list_add_tail(&s_job->list, &sched->ring_mirror_list);
drm_sched_start_timeout(sched); drm_sched_start_timeout(sched);
spin_unlock(&sched->job_list_lock); spin_unlock(&sched->job_list_lock);
} }
...@@ -287,7 +287,7 @@ static void drm_sched_job_timedout(struct work_struct *work) ...@@ -287,7 +287,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
/* Protects against concurrent deletion in drm_sched_get_cleanup_job */ /* Protects against concurrent deletion in drm_sched_get_cleanup_job */
spin_lock(&sched->job_list_lock); spin_lock(&sched->job_list_lock);
job = list_first_entry_or_null(&sched->ring_mirror_list, job = list_first_entry_or_null(&sched->ring_mirror_list,
struct drm_sched_job, node); struct drm_sched_job, list);
if (job) { if (job) {
/* /*
...@@ -295,7 +295,7 @@ static void drm_sched_job_timedout(struct work_struct *work) ...@@ -295,7 +295,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
* drm_sched_cleanup_jobs. It will be reinserted back after sched->thread * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
* is parked at which point it's safe. * is parked at which point it's safe.
*/ */
list_del_init(&job->node); list_del_init(&job->list);
spin_unlock(&sched->job_list_lock); spin_unlock(&sched->job_list_lock);
job->sched->ops->timedout_job(job); job->sched->ops->timedout_job(job);
...@@ -392,7 +392,7 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) ...@@ -392,7 +392,7 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
* Add at the head of the queue to reflect it was the earliest * Add at the head of the queue to reflect it was the earliest
* job extracted. * job extracted.
*/ */
list_add(&bad->node, &sched->ring_mirror_list); list_add(&bad->list, &sched->ring_mirror_list);
/* /*
* Iterate the job list from later to earlier one and either deactive * Iterate the job list from later to earlier one and either deactive
...@@ -400,7 +400,8 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) ...@@ -400,7 +400,8 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
* signaled. * signaled.
* This iteration is thread safe as sched thread is stopped. * This iteration is thread safe as sched thread is stopped.
*/ */
list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list, node) { list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list,
list) {
if (s_job->s_fence->parent && if (s_job->s_fence->parent &&
dma_fence_remove_callback(s_job->s_fence->parent, dma_fence_remove_callback(s_job->s_fence->parent,
&s_job->cb)) { &s_job->cb)) {
...@@ -411,7 +412,7 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) ...@@ -411,7 +412,7 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
* Locking here is for concurrent resume timeout * Locking here is for concurrent resume timeout
*/ */
spin_lock(&sched->job_list_lock); spin_lock(&sched->job_list_lock);
list_del_init(&s_job->node); list_del_init(&s_job->list);
spin_unlock(&sched->job_list_lock); spin_unlock(&sched->job_list_lock);
/* /*
...@@ -462,7 +463,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) ...@@ -462,7 +463,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
* so no new jobs are being inserted or removed. Also concurrent * so no new jobs are being inserted or removed. Also concurrent
* GPU recovers can't run in parallel. * GPU recovers can't run in parallel.
*/ */
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, list) {
struct dma_fence *fence = s_job->s_fence->parent; struct dma_fence *fence = s_job->s_fence->parent;
atomic_inc(&sched->hw_rq_count); atomic_inc(&sched->hw_rq_count);
...@@ -505,7 +506,7 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) ...@@ -505,7 +506,7 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
bool found_guilty = false; bool found_guilty = false;
struct dma_fence *fence; struct dma_fence *fence;
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, list) {
struct drm_sched_fence *s_fence = s_job->s_fence; struct drm_sched_fence *s_fence = s_job->s_fence;
if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) { if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
...@@ -565,7 +566,7 @@ int drm_sched_job_init(struct drm_sched_job *job, ...@@ -565,7 +566,7 @@ int drm_sched_job_init(struct drm_sched_job *job,
return -ENOMEM; return -ENOMEM;
job->id = atomic64_inc_return(&sched->job_id_count); job->id = atomic64_inc_return(&sched->job_id_count);
INIT_LIST_HEAD(&job->node); INIT_LIST_HEAD(&job->list);
return 0; return 0;
} }
...@@ -684,11 +685,11 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) ...@@ -684,11 +685,11 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
spin_lock(&sched->job_list_lock); spin_lock(&sched->job_list_lock);
job = list_first_entry_or_null(&sched->ring_mirror_list, job = list_first_entry_or_null(&sched->ring_mirror_list,
struct drm_sched_job, node); struct drm_sched_job, list);
if (job && dma_fence_is_signaled(&job->s_fence->finished)) { if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
/* remove job from ring_mirror_list */ /* remove job from ring_mirror_list */
list_del_init(&job->node); list_del_init(&job->list);
} else { } else {
job = NULL; job = NULL;
/* queue timeout for next job */ /* queue timeout for next job */
......
...@@ -189,14 +189,14 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); ...@@ -189,14 +189,14 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
*/ */
struct drm_sched_job { struct drm_sched_job {
struct spsc_node queue_node; struct spsc_node queue_node;
struct list_head list;
struct drm_gpu_scheduler *sched; struct drm_gpu_scheduler *sched;
struct drm_sched_fence *s_fence; struct drm_sched_fence *s_fence;
struct dma_fence_cb finish_cb; struct dma_fence_cb finish_cb;
struct list_head node;
uint64_t id; uint64_t id;
atomic_t karma; atomic_t karma;
enum drm_sched_priority s_priority; enum drm_sched_priority s_priority;
struct drm_sched_entity *entity; struct drm_sched_entity *entity;
struct dma_fence_cb cb; struct dma_fence_cb cb;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment