Commit fe375c74 authored by Luben Tuikov's avatar Luben Tuikov

drm/sched: Rename priority MIN to LOW

Rename DRM_SCHED_PRIORITY_MIN to DRM_SCHED_PRIORITY_LOW.

This mirrors DRM_SCHED_PRIORITY_HIGH, for a list of DRM scheduler priorities
in ascending order,
  DRM_SCHED_PRIORITY_LOW,
  DRM_SCHED_PRIORITY_NORMAL,
  DRM_SCHED_PRIORITY_HIGH,
  DRM_SCHED_PRIORITY_KERNEL.

Cc: Rob Clark <robdclark@gmail.com>
Cc: Abhinav Kumar <quic_abhinavk@quicinc.com>
Cc: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
Cc: Danilo Krummrich <dakr@redhat.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: linux-arm-msm@vger.kernel.org
Cc: freedreno@lists.freedesktop.org
Cc: dri-devel@lists.freedesktop.org
Signed-off-by: default avatarLuben Tuikov <ltuikov89@gmail.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231124052752.6915-5-ltuikov89@gmail.com
parent 2bbe6ab2
...@@ -73,10 +73,10 @@ amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio) ...@@ -73,10 +73,10 @@ amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
return DRM_SCHED_PRIORITY_NORMAL; return DRM_SCHED_PRIORITY_NORMAL;
case AMDGPU_CTX_PRIORITY_VERY_LOW: case AMDGPU_CTX_PRIORITY_VERY_LOW:
return DRM_SCHED_PRIORITY_MIN; return DRM_SCHED_PRIORITY_LOW;
case AMDGPU_CTX_PRIORITY_LOW: case AMDGPU_CTX_PRIORITY_LOW:
return DRM_SCHED_PRIORITY_MIN; return DRM_SCHED_PRIORITY_LOW;
case AMDGPU_CTX_PRIORITY_NORMAL: case AMDGPU_CTX_PRIORITY_NORMAL:
return DRM_SCHED_PRIORITY_NORMAL; return DRM_SCHED_PRIORITY_NORMAL;
......
...@@ -325,7 +325,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) ...@@ -325,7 +325,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
int i; int i;
/* Signal all jobs not yet scheduled */ /* Signal all jobs not yet scheduled */
for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_LOW; i--) {
struct drm_sched_rq *rq = sched->sched_rq[i]; struct drm_sched_rq *rq = sched->sched_rq[i];
spin_lock(&rq->lock); spin_lock(&rq->lock);
list_for_each_entry(s_entity, &rq->entities, list) { list_for_each_entry(s_entity, &rq->entities, list) {
......
...@@ -347,7 +347,7 @@ struct msm_gpu_perfcntr { ...@@ -347,7 +347,7 @@ struct msm_gpu_perfcntr {
* DRM_SCHED_PRIORITY_KERNEL priority level is treated specially in some * DRM_SCHED_PRIORITY_KERNEL priority level is treated specially in some
* cases, so we don't use it (no need for kernel generated jobs). * cases, so we don't use it (no need for kernel generated jobs).
*/ */
#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN) #define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_LOW)
/** /**
* struct msm_file_private - per-drm_file context * struct msm_file_private - per-drm_file context
......
...@@ -88,7 +88,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, ...@@ -88,7 +88,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n", drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n",
entity->priority, sched_list[0]->num_rqs); entity->priority, sched_list[0]->num_rqs);
entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1, entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1,
(s32) DRM_SCHED_PRIORITY_MIN); (s32) DRM_SCHED_PRIORITY_LOW);
} }
entity->rq = sched_list[0]->sched_rq[entity->priority]; entity->rq = sched_list[0]->sched_rq[entity->priority];
} }
......
...@@ -1052,7 +1052,7 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched) ...@@ -1052,7 +1052,7 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
int i; int i;
/* Kernel run queue has higher priority than normal run queue*/ /* Kernel run queue has higher priority than normal run queue*/
for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_LOW; i--) {
entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ? entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
drm_sched_rq_select_entity_fifo(sched, sched->sched_rq[i]) : drm_sched_rq_select_entity_fifo(sched, sched->sched_rq[i]) :
drm_sched_rq_select_entity_rr(sched, sched->sched_rq[i]); drm_sched_rq_select_entity_rr(sched, sched->sched_rq[i]);
...@@ -1291,7 +1291,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, ...@@ -1291,7 +1291,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
if (!sched->sched_rq) if (!sched->sched_rq)
goto Out_free; goto Out_free;
sched->num_rqs = num_rqs; sched->num_rqs = num_rqs;
for (i = DRM_SCHED_PRIORITY_MIN; i < sched->num_rqs; i++) { for (i = DRM_SCHED_PRIORITY_LOW; i < sched->num_rqs; i++) {
sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL); sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL);
if (!sched->sched_rq[i]) if (!sched->sched_rq[i])
goto Out_unroll; goto Out_unroll;
...@@ -1312,7 +1312,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, ...@@ -1312,7 +1312,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
sched->ready = true; sched->ready = true;
return 0; return 0;
Out_unroll: Out_unroll:
for (--i ; i >= DRM_SCHED_PRIORITY_MIN; i--) for (--i ; i >= DRM_SCHED_PRIORITY_LOW; i--)
kfree(sched->sched_rq[i]); kfree(sched->sched_rq[i]);
Out_free: Out_free:
kfree(sched->sched_rq); kfree(sched->sched_rq);
...@@ -1338,7 +1338,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched) ...@@ -1338,7 +1338,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
drm_sched_wqueue_stop(sched); drm_sched_wqueue_stop(sched);
for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_LOW; i--) {
struct drm_sched_rq *rq = sched->sched_rq[i]; struct drm_sched_rq *rq = sched->sched_rq[i];
spin_lock(&rq->lock); spin_lock(&rq->lock);
...@@ -1390,7 +1390,7 @@ void drm_sched_increase_karma(struct drm_sched_job *bad) ...@@ -1390,7 +1390,7 @@ void drm_sched_increase_karma(struct drm_sched_job *bad)
if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) { if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
atomic_inc(&bad->karma); atomic_inc(&bad->karma);
for (i = DRM_SCHED_PRIORITY_MIN; for (i = DRM_SCHED_PRIORITY_LOW;
i < min_t(typeof(sched->num_rqs), sched->num_rqs, DRM_SCHED_PRIORITY_KERNEL); i < min_t(typeof(sched->num_rqs), sched->num_rqs, DRM_SCHED_PRIORITY_KERNEL);
i++) { i++) {
struct drm_sched_rq *rq = sched->sched_rq[i]; struct drm_sched_rq *rq = sched->sched_rq[i];
......
...@@ -63,7 +63,7 @@ struct drm_file; ...@@ -63,7 +63,7 @@ struct drm_file;
* to an array, and as such should start at 0. * to an array, and as such should start at 0.
*/ */
enum drm_sched_priority { enum drm_sched_priority {
DRM_SCHED_PRIORITY_MIN, DRM_SCHED_PRIORITY_LOW,
DRM_SCHED_PRIORITY_NORMAL, DRM_SCHED_PRIORITY_NORMAL,
DRM_SCHED_PRIORITY_HIGH, DRM_SCHED_PRIORITY_HIGH,
DRM_SCHED_PRIORITY_KERNEL, DRM_SCHED_PRIORITY_KERNEL,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment