Commit f880799d authored by Nirmoy Das's avatar Nirmoy Das Committed by Alex Deucher

amd/amdgpu: add sched array to IPs with multiple run-queues

This sched array can be passed on to entity creation routine
instead of manually creating such sched array on every context creation.

v2: squash in missing break fix
Signed-off-by: default avatarNirmoy Das <nirmoy.das@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 0c88b430
...@@ -74,7 +74,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, ...@@ -74,7 +74,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
struct amdgpu_ctx *ctx) struct amdgpu_ctx *ctx)
{ {
unsigned num_entities = amdgpu_ctx_total_num_entities(); unsigned num_entities = amdgpu_ctx_total_num_entities();
unsigned i, j, k; unsigned i, j;
int r; int r;
if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
...@@ -121,73 +121,56 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, ...@@ -121,73 +121,56 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; struct drm_gpu_scheduler **scheds;
struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS]; struct drm_gpu_scheduler *sched;
unsigned num_rings = 0; unsigned num_scheds = 0;
unsigned num_rqs = 0;
switch (i) { switch (i) {
case AMDGPU_HW_IP_GFX: case AMDGPU_HW_IP_GFX:
rings[0] = &adev->gfx.gfx_ring[0]; scheds = adev->gfx.gfx_sched;
num_rings = 1; num_scheds = 1;
break; break;
case AMDGPU_HW_IP_COMPUTE: case AMDGPU_HW_IP_COMPUTE:
for (j = 0; j < adev->gfx.num_compute_rings; ++j) scheds = adev->gfx.compute_sched;
rings[j] = &adev->gfx.compute_ring[j]; num_scheds = adev->gfx.num_compute_sched;
num_rings = adev->gfx.num_compute_rings;
break; break;
case AMDGPU_HW_IP_DMA: case AMDGPU_HW_IP_DMA:
for (j = 0; j < adev->sdma.num_instances; ++j) scheds = adev->sdma.sdma_sched;
rings[j] = &adev->sdma.instance[j].ring; num_scheds = adev->sdma.num_sdma_sched;
num_rings = adev->sdma.num_instances;
break; break;
case AMDGPU_HW_IP_UVD: case AMDGPU_HW_IP_UVD:
rings[0] = &adev->uvd.inst[0].ring; sched = &adev->uvd.inst[0].ring.sched;
num_rings = 1; scheds = &sched;
num_scheds = 1;
break; break;
case AMDGPU_HW_IP_VCE: case AMDGPU_HW_IP_VCE:
rings[0] = &adev->vce.ring[0]; sched = &adev->vce.ring[0].sched;
num_rings = 1; scheds = &sched;
num_scheds = 1;
break; break;
case AMDGPU_HW_IP_UVD_ENC: case AMDGPU_HW_IP_UVD_ENC:
rings[0] = &adev->uvd.inst[0].ring_enc[0]; sched = &adev->uvd.inst[0].ring_enc[0].sched;
num_rings = 1; scheds = &sched;
num_scheds = 1;
break; break;
case AMDGPU_HW_IP_VCN_DEC: case AMDGPU_HW_IP_VCN_DEC:
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { scheds = adev->vcn.vcn_dec_sched;
if (adev->vcn.harvest_config & (1 << j)) num_scheds = adev->vcn.num_vcn_dec_sched;
continue;
rings[num_rings++] = &adev->vcn.inst[j].ring_dec;
}
break; break;
case AMDGPU_HW_IP_VCN_ENC: case AMDGPU_HW_IP_VCN_ENC:
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { scheds = adev->vcn.vcn_enc_sched;
if (adev->vcn.harvest_config & (1 << j)) num_scheds = adev->vcn.num_vcn_enc_sched;
continue;
for (k = 0; k < adev->vcn.num_enc_rings; ++k)
rings[num_rings++] = &adev->vcn.inst[j].ring_enc[k];
}
break; break;
case AMDGPU_HW_IP_VCN_JPEG: case AMDGPU_HW_IP_VCN_JPEG:
for (j = 0; j < adev->jpeg.num_jpeg_inst; ++j) { scheds = adev->jpeg.jpeg_sched;
if (adev->jpeg.harvest_config & (1 << j)) num_scheds = adev->jpeg.num_jpeg_sched;
continue;
rings[num_rings++] = &adev->jpeg.inst[j].ring_dec;
}
break; break;
} }
for (j = 0; j < num_rings; ++j) {
if (!rings[j]->adev)
continue;
sched_list[num_rqs++] = &rings[j]->sched;
}
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
r = drm_sched_entity_init(&ctx->entities[i][j].entity, r = drm_sched_entity_init(&ctx->entities[i][j].entity,
priority, sched_list, priority, scheds,
num_rqs, &ctx->guilty); num_scheds, &ctx->guilty);
if (r) if (r)
goto error_cleanup_entities; goto error_cleanup_entities;
} }
...@@ -628,3 +611,45 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr) ...@@ -628,3 +611,45 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
idr_destroy(&mgr->ctx_handles); idr_destroy(&mgr->ctx_handles);
mutex_destroy(&mgr->lock); mutex_destroy(&mgr->lock);
} }
void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
{
int i, j;
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
adev->gfx.num_gfx_sched++;
}
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
adev->gfx.compute_sched[i] = &adev->gfx.compute_ring[i].sched;
adev->gfx.num_compute_sched++;
}
for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
adev->sdma.num_sdma_sched++;
}
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
&adev->vcn.inst[i].ring_dec.sched;
}
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
for (j = 0; j < adev->vcn.num_enc_rings; ++j)
adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
&adev->vcn.inst[i].ring_enc[j].sched;
}
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (adev->jpeg.harvest_config & (1 << i))
continue;
adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
&adev->jpeg.inst[i].ring_dec.sched;
}
}
...@@ -87,4 +87,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr); ...@@ -87,4 +87,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout); long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr); void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_init_sched(struct amdgpu_device *adev);
#endif #endif
...@@ -3036,6 +3036,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, ...@@ -3036,6 +3036,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->gfx.config.max_cu_per_sh, adev->gfx.config.max_cu_per_sh,
adev->gfx.cu_info.number); adev->gfx.cu_info.number);
amdgpu_ctx_init_sched(adev);
adev->accel_working = true; adev->accel_working = true;
amdgpu_vm_check_compute_bug(adev); amdgpu_vm_check_compute_bug(adev);
......
...@@ -269,8 +269,12 @@ struct amdgpu_gfx { ...@@ -269,8 +269,12 @@ struct amdgpu_gfx {
bool me_fw_write_wait; bool me_fw_write_wait;
bool cp_fw_write_wait; bool cp_fw_write_wait;
struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
struct drm_gpu_scheduler *gfx_sched[AMDGPU_MAX_GFX_RINGS];
uint32_t num_gfx_sched;
unsigned num_gfx_rings; unsigned num_gfx_rings;
struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
struct drm_gpu_scheduler *compute_sched[AMDGPU_MAX_COMPUTE_RINGS];
uint32_t num_compute_sched;
unsigned num_compute_rings; unsigned num_compute_rings;
struct amdgpu_irq_src eop_irq; struct amdgpu_irq_src eop_irq;
struct amdgpu_irq_src priv_reg_irq; struct amdgpu_irq_src priv_reg_irq;
......
...@@ -43,6 +43,8 @@ struct amdgpu_jpeg { ...@@ -43,6 +43,8 @@ struct amdgpu_jpeg {
uint8_t num_jpeg_inst; uint8_t num_jpeg_inst;
struct amdgpu_jpeg_inst inst[AMDGPU_MAX_JPEG_INSTANCES]; struct amdgpu_jpeg_inst inst[AMDGPU_MAX_JPEG_INSTANCES];
struct amdgpu_jpeg_reg internal; struct amdgpu_jpeg_reg internal;
struct drm_gpu_scheduler *jpeg_sched[AMDGPU_MAX_JPEG_INSTANCES];
uint32_t num_jpeg_sched;
unsigned harvest_config; unsigned harvest_config;
struct delayed_work idle_work; struct delayed_work idle_work;
enum amd_powergating_state cur_state; enum amd_powergating_state cur_state;
......
...@@ -52,6 +52,8 @@ struct amdgpu_sdma_instance { ...@@ -52,6 +52,8 @@ struct amdgpu_sdma_instance {
struct amdgpu_sdma { struct amdgpu_sdma {
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES]; struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
struct drm_gpu_scheduler *sdma_sched[AMDGPU_MAX_SDMA_INSTANCES];
uint32_t num_sdma_sched;
struct amdgpu_irq_src trap_irq; struct amdgpu_irq_src trap_irq;
struct amdgpu_irq_src illegal_inst_irq; struct amdgpu_irq_src illegal_inst_irq;
struct amdgpu_irq_src ecc_irq; struct amdgpu_irq_src ecc_irq;
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#define AMDGPU_VCN_MAX_ENC_RINGS 3 #define AMDGPU_VCN_MAX_ENC_RINGS 3
#define AMDGPU_MAX_VCN_INSTANCES 2 #define AMDGPU_MAX_VCN_INSTANCES 2
#define AMDGPU_MAX_VCN_ENC_RINGS AMDGPU_VCN_MAX_ENC_RINGS * AMDGPU_MAX_VCN_INSTANCES
#define AMDGPU_VCN_HARVEST_VCN0 (1 << 0) #define AMDGPU_VCN_HARVEST_VCN0 (1 << 0)
#define AMDGPU_VCN_HARVEST_VCN1 (1 << 1) #define AMDGPU_VCN_HARVEST_VCN1 (1 << 1)
...@@ -189,8 +190,12 @@ struct amdgpu_vcn { ...@@ -189,8 +190,12 @@ struct amdgpu_vcn {
uint32_t *dpg_sram_curr_addr; uint32_t *dpg_sram_curr_addr;
uint8_t num_vcn_inst; uint8_t num_vcn_inst;
struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES]; struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES];
struct amdgpu_vcn_reg internal; struct amdgpu_vcn_reg internal;
struct drm_gpu_scheduler *vcn_enc_sched[AMDGPU_MAX_VCN_ENC_RINGS];
struct drm_gpu_scheduler *vcn_dec_sched[AMDGPU_MAX_VCN_INSTANCES];
uint32_t num_vcn_enc_sched;
uint32_t num_vcn_dec_sched;
unsigned harvest_config; unsigned harvest_config;
int (*pause_dpg_mode)(struct amdgpu_device *adev, int (*pause_dpg_mode)(struct amdgpu_device *adev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment