Commit 9b398fa5 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: rename fence->scheduler to sched v2

Just to be consistent with the other members.

v2: rename the ring member as well.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com> (v1)
Reviewed-by: Chunming Zhou<david1.zhou@amd.com>
parent 0f75aee7
...@@ -891,7 +891,7 @@ struct amdgpu_ring { ...@@ -891,7 +891,7 @@ struct amdgpu_ring {
struct amdgpu_device *adev; struct amdgpu_device *adev;
const struct amdgpu_ring_funcs *funcs; const struct amdgpu_ring_funcs *funcs;
struct amdgpu_fence_driver fence_drv; struct amdgpu_fence_driver fence_drv;
struct amd_gpu_scheduler *scheduler; struct amd_gpu_scheduler *sched;
spinlock_t fence_lock; spinlock_t fence_lock;
struct mutex *ring_lock; struct mutex *ring_lock;
......
...@@ -848,7 +848,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -848,7 +848,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
if (!job) if (!job)
return -ENOMEM; return -ENOMEM;
job->base.sched = ring->scheduler; job->base.sched = ring->sched;
job->base.s_entity = &parser->ctx->rings[ring->idx].entity; job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
job->adev = parser->adev; job->adev = parser->adev;
job->ibs = parser->ibs; job->ibs = parser->ibs;
......
...@@ -43,10 +43,10 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel, ...@@ -43,10 +43,10 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
for (i = 0; i < adev->num_rings; i++) { for (i = 0; i < adev->num_rings; i++) {
struct amd_sched_rq *rq; struct amd_sched_rq *rq;
if (kernel) if (kernel)
rq = &adev->rings[i]->scheduler->kernel_rq; rq = &adev->rings[i]->sched->kernel_rq;
else else
rq = &adev->rings[i]->scheduler->sched_rq; rq = &adev->rings[i]->sched->sched_rq;
r = amd_sched_entity_init(adev->rings[i]->scheduler, r = amd_sched_entity_init(adev->rings[i]->sched,
&ctx->rings[i].entity, &ctx->rings[i].entity,
rq, amdgpu_sched_jobs); rq, amdgpu_sched_jobs);
if (r) if (r)
...@@ -55,7 +55,7 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel, ...@@ -55,7 +55,7 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
if (i < adev->num_rings) { if (i < adev->num_rings) {
for (j = 0; j < i; j++) for (j = 0; j < i; j++)
amd_sched_entity_fini(adev->rings[j]->scheduler, amd_sched_entity_fini(adev->rings[j]->sched,
&ctx->rings[j].entity); &ctx->rings[j].entity);
kfree(ctx); kfree(ctx);
return r; return r;
...@@ -75,7 +75,7 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) ...@@ -75,7 +75,7 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
if (amdgpu_enable_scheduler) { if (amdgpu_enable_scheduler) {
for (i = 0; i < adev->num_rings; i++) for (i = 0; i < adev->num_rings; i++)
amd_sched_entity_fini(adev->rings[i]->scheduler, amd_sched_entity_fini(adev->rings[i]->sched,
&ctx->rings[i].entity); &ctx->rings[i].entity);
} }
} }
......
...@@ -626,11 +626,11 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) ...@@ -626,11 +626,11 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
ring->fence_drv.ring = ring; ring->fence_drv.ring = ring;
if (amdgpu_enable_scheduler) { if (amdgpu_enable_scheduler) {
ring->scheduler = amd_sched_create(&amdgpu_sched_ops, ring->sched = amd_sched_create(&amdgpu_sched_ops,
ring->idx, ring->idx,
amdgpu_sched_hw_submission, amdgpu_sched_hw_submission,
(void *)ring->adev); (void *)ring->adev);
if (!ring->scheduler) if (!ring->sched)
DRM_ERROR("Failed to create scheduler on ring %d.\n", DRM_ERROR("Failed to create scheduler on ring %d.\n",
ring->idx); ring->idx);
} }
...@@ -681,8 +681,8 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) ...@@ -681,8 +681,8 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
wake_up_all(&ring->fence_drv.fence_queue); wake_up_all(&ring->fence_drv.fence_queue);
amdgpu_irq_put(adev, ring->fence_drv.irq_src, amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type); ring->fence_drv.irq_type);
if (ring->scheduler) if (ring->sched)
amd_sched_destroy(ring->scheduler); amd_sched_destroy(ring->sched);
ring->fence_drv.initialized = false; ring->fence_drv.initialized = false;
} }
mutex_unlock(&adev->ring_lock); mutex_unlock(&adev->ring_lock);
......
...@@ -146,7 +146,7 @@ static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f) ...@@ -146,7 +146,7 @@ static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f)
s_fence = to_amd_sched_fence(f); s_fence = to_amd_sched_fence(f);
if (s_fence) if (s_fence)
return s_fence->scheduler->ring_id; return s_fence->sched->ring_id;
a_fence = to_amdgpu_fence(f); a_fence = to_amdgpu_fence(f);
if (a_fence) if (a_fence)
return a_fence->ring->idx; return a_fence->ring->idx;
...@@ -437,7 +437,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, ...@@ -437,7 +437,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
if (s_fence) if (s_fence)
seq_printf(m, " protected by 0x%016x on ring %d", seq_printf(m, " protected by 0x%016x on ring %d",
s_fence->base.seqno, s_fence->base.seqno,
s_fence->scheduler->ring_id); s_fence->sched->ring_id);
} }
seq_printf(m, "\n"); seq_printf(m, "\n");
......
...@@ -85,7 +85,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, ...@@ -85,7 +85,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
if (!job) if (!job)
return -ENOMEM; return -ENOMEM;
job->base.sched = ring->scheduler; job->base.sched = ring->sched;
job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
job->adev = adev; job->adev = adev;
job->ibs = ibs; job->ibs = ibs;
......
...@@ -66,7 +66,7 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f) ...@@ -66,7 +66,7 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
if (a_fence) if (a_fence)
return a_fence->ring->adev == adev; return a_fence->ring->adev == adev;
if (s_fence) if (s_fence)
return (struct amdgpu_device *)s_fence->scheduler->priv == adev; return (struct amdgpu_device *)s_fence->sched->priv == adev;
return false; return false;
} }
......
...@@ -326,7 +326,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) ...@@ -326,7 +326,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
{ {
struct amd_sched_fence *s_fence = struct amd_sched_fence *s_fence =
container_of(cb, struct amd_sched_fence, cb); container_of(cb, struct amd_sched_fence, cb);
struct amd_gpu_scheduler *sched = s_fence->scheduler; struct amd_gpu_scheduler *sched = s_fence->sched;
atomic_dec(&sched->hw_rq_count); atomic_dec(&sched->hw_rq_count);
amd_sched_fence_signal(s_fence); amd_sched_fence_signal(s_fence);
......
...@@ -65,7 +65,7 @@ struct amd_sched_rq { ...@@ -65,7 +65,7 @@ struct amd_sched_rq {
struct amd_sched_fence { struct amd_sched_fence {
struct fence base; struct fence base;
struct fence_cb cb; struct fence_cb cb;
struct amd_gpu_scheduler *scheduler; struct amd_gpu_scheduler *sched;
spinlock_t lock; spinlock_t lock;
void *owner; void *owner;
}; };
......
...@@ -36,7 +36,7 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity ...@@ -36,7 +36,7 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
if (fence == NULL) if (fence == NULL)
return NULL; return NULL;
fence->owner = owner; fence->owner = owner;
fence->scheduler = s_entity->sched; fence->sched = s_entity->sched;
spin_lock_init(&fence->lock); spin_lock_init(&fence->lock);
seq = atomic_inc_return(&s_entity->fence_seq); seq = atomic_inc_return(&s_entity->fence_seq);
...@@ -63,7 +63,7 @@ static const char *amd_sched_fence_get_driver_name(struct fence *fence) ...@@ -63,7 +63,7 @@ static const char *amd_sched_fence_get_driver_name(struct fence *fence)
static const char *amd_sched_fence_get_timeline_name(struct fence *f) static const char *amd_sched_fence_get_timeline_name(struct fence *f)
{ {
struct amd_sched_fence *fence = to_amd_sched_fence(f); struct amd_sched_fence *fence = to_amd_sched_fence(f);
return (const char *)fence->scheduler->name; return (const char *)fence->sched->name;
} }
static bool amd_sched_fence_enable_signaling(struct fence *f) static bool amd_sched_fence_enable_signaling(struct fence *f)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment