Commit aa16b6c6 authored by Nayan Deshmukh's avatar Nayan Deshmukh Committed by Alex Deucher

drm/scheduler: modify args of drm_sched_entity_init

replace run queue by a list of run queues and remove the
sched arg as that is part of run queue itself
Signed-off-by: default avatarNayan Deshmukh <nayan26deshmukh@gmail.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarEric Anholt <eric@anholt.net>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 8dc9fbbf
...@@ -90,8 +90,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, ...@@ -90,8 +90,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
if (ring == &adev->gfx.kiq.ring) if (ring == &adev->gfx.kiq.ring)
continue; continue;
r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity, r = drm_sched_entity_init(&ctx->rings[i].entity,
rq, &ctx->guilty); &rq, 1, &ctx->guilty);
if (r) if (r)
goto failed; goto failed;
} }
......
...@@ -1918,8 +1918,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) ...@@ -1918,8 +1918,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
ring = adev->mman.buffer_funcs_ring; ring = adev->mman.buffer_funcs_ring;
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
r = drm_sched_entity_init(&ring->sched, &adev->mman.entity, r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL);
rq, NULL);
if (r) { if (r) {
DRM_ERROR("Failed setting up TTM BO move entity (%d)\n", DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
r); r);
......
...@@ -266,8 +266,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) ...@@ -266,8 +266,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
ring = &adev->uvd.inst[j].ring; ring = &adev->uvd.inst[j].ring;
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity, r = drm_sched_entity_init(&adev->uvd.inst[j].entity, &rq,
rq, NULL); 1, NULL);
if (r != 0) { if (r != 0) {
DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j); DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j);
return r; return r;
......
...@@ -190,8 +190,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) ...@@ -190,8 +190,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
ring = &adev->vce.ring[0]; ring = &adev->vce.ring[0];
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
r = drm_sched_entity_init(&ring->sched, &adev->vce.entity, r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
rq, NULL);
if (r != 0) { if (r != 0) {
DRM_ERROR("Failed setting up VCE run queue.\n"); DRM_ERROR("Failed setting up VCE run queue.\n");
return r; return r;
......
...@@ -2564,8 +2564,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -2564,8 +2564,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
ring_instance %= adev->vm_manager.vm_pte_num_rings; ring_instance %= adev->vm_manager.vm_pte_num_rings;
ring = adev->vm_manager.vm_pte_rings[ring_instance]; ring = adev->vm_manager.vm_pte_rings[ring_instance];
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
r = drm_sched_entity_init(&ring->sched, &vm->entity, r = drm_sched_entity_init(&vm->entity, &rq, 1, NULL);
rq, NULL);
if (r) if (r)
return r; return r;
......
...@@ -430,8 +430,8 @@ static int uvd_v6_0_sw_init(void *handle) ...@@ -430,8 +430,8 @@ static int uvd_v6_0_sw_init(void *handle)
struct drm_sched_rq *rq; struct drm_sched_rq *rq;
ring = &adev->uvd.inst->ring_enc[0]; ring = &adev->uvd.inst->ring_enc[0];
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc, r = drm_sched_entity_init(&adev->uvd.inst->entity_enc,
rq, NULL); &rq, 1, NULL);
if (r) { if (r) {
DRM_ERROR("Failed setting up UVD ENC run queue.\n"); DRM_ERROR("Failed setting up UVD ENC run queue.\n");
return r; return r;
......
...@@ -432,8 +432,8 @@ static int uvd_v7_0_sw_init(void *handle) ...@@ -432,8 +432,8 @@ static int uvd_v7_0_sw_init(void *handle)
for (j = 0; j < adev->uvd.num_uvd_inst; j++) { for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
ring = &adev->uvd.inst[j].ring_enc[0]; ring = &adev->uvd.inst[j].ring_enc[0];
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity_enc, r = drm_sched_entity_init(&adev->uvd.inst[j].entity_enc,
rq, NULL); &rq, 1, NULL);
if (r) { if (r) {
DRM_ERROR("(%d)Failed setting up UVD ENC run queue.\n", j); DRM_ERROR("(%d)Failed setting up UVD ENC run queue.\n", j);
return r; return r;
......
...@@ -49,12 +49,12 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file) ...@@ -49,12 +49,12 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
for (i = 0; i < ETNA_MAX_PIPES; i++) { for (i = 0; i < ETNA_MAX_PIPES; i++) {
struct etnaviv_gpu *gpu = priv->gpu[i]; struct etnaviv_gpu *gpu = priv->gpu[i];
struct drm_sched_rq *rq;
if (gpu) { if (gpu) {
drm_sched_entity_init(&gpu->sched, rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
&ctx->sched_entity[i], drm_sched_entity_init(&ctx->sched_entity[i],
&gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL], &rq, 1, NULL);
NULL);
} }
} }
......
...@@ -162,26 +162,30 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq) ...@@ -162,26 +162,30 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
* drm_sched_entity_init - Init a context entity used by scheduler when * drm_sched_entity_init - Init a context entity used by scheduler when
* submit to HW ring. * submit to HW ring.
* *
* @sched: scheduler instance
* @entity: scheduler entity to init * @entity: scheduler entity to init
* @rq: the run queue this entity belongs * @rq_list: the list of run queue on which jobs from this
* entity can be submitted
* @num_rq_list: number of run queue in rq_list
* @guilty: atomic_t set to 1 when a job on this queue * @guilty: atomic_t set to 1 when a job on this queue
* is found to be guilty causing a timeout * is found to be guilty causing a timeout
* *
* Note: the rq_list should have atleast one element to schedule
* the entity
*
* Returns 0 on success or a negative error code on failure. * Returns 0 on success or a negative error code on failure.
*/ */
int drm_sched_entity_init(struct drm_gpu_scheduler *sched, int drm_sched_entity_init(struct drm_sched_entity *entity,
struct drm_sched_entity *entity, struct drm_sched_rq **rq_list,
struct drm_sched_rq *rq, unsigned int num_rq_list,
atomic_t *guilty) atomic_t *guilty)
{ {
if (!(sched && entity && rq)) if (!(entity && rq_list && num_rq_list > 0 && rq_list[0]))
return -EINVAL; return -EINVAL;
memset(entity, 0, sizeof(struct drm_sched_entity)); memset(entity, 0, sizeof(struct drm_sched_entity));
INIT_LIST_HEAD(&entity->list); INIT_LIST_HEAD(&entity->list);
entity->rq = rq; entity->rq = rq_list[0];
entity->sched = sched; entity->sched = rq_list[0]->sched;
entity->guilty = guilty; entity->guilty = guilty;
entity->last_scheduled = NULL; entity->last_scheduled = NULL;
......
...@@ -123,6 +123,7 @@ v3d_open(struct drm_device *dev, struct drm_file *file) ...@@ -123,6 +123,7 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
{ {
struct v3d_dev *v3d = to_v3d_dev(dev); struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv; struct v3d_file_priv *v3d_priv;
struct drm_sched_rq *rq;
int i; int i;
v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL); v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL);
...@@ -132,10 +133,8 @@ v3d_open(struct drm_device *dev, struct drm_file *file) ...@@ -132,10 +133,8 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
v3d_priv->v3d = v3d; v3d_priv->v3d = v3d;
for (i = 0; i < V3D_MAX_QUEUES; i++) { for (i = 0; i < V3D_MAX_QUEUES; i++) {
drm_sched_entity_init(&v3d->queue[i].sched, rq = &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
&v3d_priv->sched_entity[i], drm_sched_entity_init(&v3d_priv->sched_entity[i], &rq, 1, NULL);
&v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
NULL);
} }
file->driver_priv = v3d_priv; file->driver_priv = v3d_priv;
......
...@@ -282,9 +282,9 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, ...@@ -282,9 +282,9 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
const char *name); const char *name);
void drm_sched_fini(struct drm_gpu_scheduler *sched); void drm_sched_fini(struct drm_gpu_scheduler *sched);
int drm_sched_entity_init(struct drm_gpu_scheduler *sched, int drm_sched_entity_init(struct drm_sched_entity *entity,
struct drm_sched_entity *entity, struct drm_sched_rq **rq_list,
struct drm_sched_rq *rq, unsigned int num_rq_list,
atomic_t *guilty); atomic_t *guilty);
long drm_sched_entity_flush(struct drm_gpu_scheduler *sched, long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity, long timeout); struct drm_sched_entity *entity, long timeout);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment