Commit 69f7dd65 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: remove unused parameters to amd_sched_create

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
parent 1fca766b
...@@ -626,9 +626,8 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) ...@@ -626,9 +626,8 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
ring->fence_drv.ring = ring; ring->fence_drv.ring = ring;
if (amdgpu_enable_scheduler) { if (amdgpu_enable_scheduler) {
ring->scheduler = amd_sched_create((void *)ring->adev, ring->scheduler = amd_sched_create(&amdgpu_sched_ops,
&amdgpu_sched_ops, ring->idx,
ring->idx, 5, 0,
amdgpu_sched_hw_submission); amdgpu_sched_hw_submission);
if (!ring->scheduler) if (!ring->scheduler)
DRM_ERROR("Failed to create scheduler on ring %d.\n", DRM_ERROR("Failed to create scheduler on ring %d.\n",
......
...@@ -325,20 +325,14 @@ static int amd_sched_main(void *param) ...@@ -325,20 +325,14 @@ static int amd_sched_main(void *param)
/** /**
* Create a gpu scheduler * Create a gpu scheduler
* *
* @device The device context for this scheduler * @ops The backend operations for this scheduler.
* @ops The backend operations for this scheduler. * @ring The the ring id for the scheduler.
* @id The scheduler is per ring, here is ring id. * @hw_submissions Number of hw submissions to do.
* @granularity The minumum ms unit the scheduler will scheduled.
* @preemption Indicate whether this ring support preemption, 0 is no.
* *
* return the pointer to scheduler for success, otherwise return NULL * Return the pointer to scheduler for success, otherwise return NULL
*/ */
struct amd_gpu_scheduler *amd_sched_create(void *device, struct amd_gpu_scheduler *amd_sched_create(struct amd_sched_backend_ops *ops,
struct amd_sched_backend_ops *ops, unsigned ring, unsigned hw_submission)
unsigned ring,
unsigned granularity,
unsigned preemption,
unsigned hw_submission)
{ {
struct amd_gpu_scheduler *sched; struct amd_gpu_scheduler *sched;
char name[20]; char name[20];
...@@ -347,11 +341,8 @@ struct amd_gpu_scheduler *amd_sched_create(void *device, ...@@ -347,11 +341,8 @@ struct amd_gpu_scheduler *amd_sched_create(void *device,
if (!sched) if (!sched)
return NULL; return NULL;
sched->device = device;
sched->ops = ops; sched->ops = ops;
sched->granularity = granularity;
sched->ring_id = ring; sched->ring_id = ring;
sched->preemption = preemption;
sched->hw_submission_limit = hw_submission; sched->hw_submission_limit = hw_submission;
snprintf(name, sizeof(name), "gpu_sched[%d]", ring); snprintf(name, sizeof(name), "gpu_sched[%d]", ring);
amd_sched_rq_init(&sched->sched_rq); amd_sched_rq_init(&sched->sched_rq);
......
...@@ -104,25 +104,19 @@ struct amd_sched_backend_ops { ...@@ -104,25 +104,19 @@ struct amd_sched_backend_ops {
* One scheduler is implemented for each hardware ring * One scheduler is implemented for each hardware ring
*/ */
struct amd_gpu_scheduler { struct amd_gpu_scheduler {
void *device;
struct task_struct *thread; struct task_struct *thread;
struct amd_sched_rq sched_rq; struct amd_sched_rq sched_rq;
struct amd_sched_rq kernel_rq; struct amd_sched_rq kernel_rq;
atomic_t hw_rq_count; atomic_t hw_rq_count;
struct amd_sched_backend_ops *ops; struct amd_sched_backend_ops *ops;
uint32_t ring_id; uint32_t ring_id;
uint32_t granularity; /* in ms unit */
uint32_t preemption;
wait_queue_head_t wait_queue; wait_queue_head_t wait_queue;
uint32_t hw_submission_limit; uint32_t hw_submission_limit;
}; };
struct amd_gpu_scheduler *amd_sched_create(void *device, struct amd_gpu_scheduler *
struct amd_sched_backend_ops *ops, amd_sched_create(struct amd_sched_backend_ops *ops,
uint32_t ring, uint32_t ring, uint32_t hw_submission);
uint32_t granularity,
uint32_t preemption,
uint32_t hw_submission);
int amd_sched_destroy(struct amd_gpu_scheduler *sched); int amd_sched_destroy(struct amd_gpu_scheduler *sched);
int amd_sched_push_job(struct amd_sched_job *sched_job); int amd_sched_push_job(struct amd_sched_job *sched_job);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment