Commit 2029b3d7 authored by Jack Xiao's avatar Jack Xiao Committed by Alex Deucher

drm/amdgpu/mes: add multiple mes ring instances support

Add multiple mes ring instances in mes structure to support
multiple mes pipes.
Signed-off-by: default avatarJack Xiao <Jack.Xiao@amd.com>
Acked-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Reviewed-by: default avatarHawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
(cherry picked from commit c7d43556)
parent 278e1865
......@@ -995,7 +995,7 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_
if (amdgpu_device_skip_hw_access(adev))
return 0;
if (adev->mes.ring.sched.ready)
if (adev->mes.ring[0].sched.ready)
return amdgpu_mes_rreg(adev, reg);
BUG_ON(!ring->funcs->emit_rreg);
......@@ -1065,7 +1065,7 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint3
if (amdgpu_device_skip_hw_access(adev))
return;
if (adev->mes.ring.sched.ready) {
if (adev->mes.ring[0].sched.ready) {
amdgpu_mes_wreg(adev, reg, v);
return;
}
......
......@@ -589,7 +589,8 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
ring = adev->rings[i];
vmhub = ring->vm_hub;
if (ring == &adev->mes.ring ||
if (ring == &adev->mes.ring[0] ||
ring == &adev->mes.ring[1] ||
ring == &adev->umsch_mm.ring)
continue;
......@@ -761,7 +762,7 @@ void amdgpu_gmc_fw_reg_write_reg_wait(struct amdgpu_device *adev,
unsigned long flags;
uint32_t seq;
if (adev->mes.ring.sched.ready) {
if (adev->mes.ring[0].sched.ready) {
amdgpu_mes_reg_write_reg_wait(adev, reg0, reg1,
ref, mask);
return;
......
......@@ -135,9 +135,11 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
idr_init(&adev->mes.queue_id_idr);
ida_init(&adev->mes.doorbell_ida);
spin_lock_init(&adev->mes.queue_id_lock);
spin_lock_init(&adev->mes.ring_lock);
mutex_init(&adev->mes.mutex_hidden);
for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++)
spin_lock_init(&adev->mes.ring_lock[i]);
adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
adev->mes.vmid_mask_mmhub = 0xffffff00;
adev->mes.vmid_mask_gfxhub = 0xffffff00;
......
......@@ -82,8 +82,8 @@ struct amdgpu_mes {
uint64_t default_process_quantum;
uint64_t default_gang_quantum;
struct amdgpu_ring ring;
spinlock_t ring_lock;
struct amdgpu_ring ring[AMDGPU_MAX_MES_PIPES];
spinlock_t ring_lock[AMDGPU_MAX_MES_PIPES];
const struct firmware *fw[AMDGPU_MAX_MES_PIPES];
......
......@@ -858,7 +858,7 @@ void amdgpu_virt_post_reset(struct amdgpu_device *adev)
adev->gfx.is_poweron = false;
}
adev->mes.ring.sched.ready = false;
adev->mes.ring[0].sched.ready = false;
}
bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id)
......
......@@ -231,7 +231,7 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
/* This is necessary for SRIOV as well as for GFXOFF to function
* properly under bare metal
*/
if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) &&
if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring[0].sched.ready) &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
1 << vmid, GET_INST(GC, 0));
......
......@@ -299,7 +299,7 @@ static void gmc_v12_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
/* This is necessary for SRIOV as well as for GFXOFF to function
* properly under bare metal
*/
if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) &&
if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring[0].sched.ready) &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
const unsigned eng = 17;
......
......@@ -162,7 +162,7 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
union MESAPI__QUERY_MES_STATUS mes_status_pkt;
signed long timeout = 3000000; /* 3000 ms */
struct amdgpu_device *adev = mes->adev;
struct amdgpu_ring *ring = &mes->ring;
struct amdgpu_ring *ring = &mes->ring[0];
struct MES_API_STATUS *api_status;
union MESAPI__MISC *x_pkt = pkt;
const char *op_str, *misc_op_str;
......@@ -191,7 +191,7 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
status_ptr = (u64 *)&adev->wb.wb[status_offset];
*status_ptr = 0;
spin_lock_irqsave(&mes->ring_lock, flags);
spin_lock_irqsave(&mes->ring_lock[0], flags);
r = amdgpu_ring_alloc(ring, (size + sizeof(mes_status_pkt)) / 4);
if (r)
goto error_unlock_free;
......@@ -221,7 +221,7 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
sizeof(mes_status_pkt) / 4);
amdgpu_ring_commit(ring);
spin_unlock_irqrestore(&mes->ring_lock, flags);
spin_unlock_irqrestore(&mes->ring_lock[0], flags);
op_str = mes_v11_0_get_op_string(x_pkt);
misc_op_str = mes_v11_0_get_misc_op_string(x_pkt);
......@@ -263,7 +263,7 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
amdgpu_ring_undo(ring);
error_unlock_free:
spin_unlock_irqrestore(&mes->ring_lock, flags);
spin_unlock_irqrestore(&mes->ring_lock[0], flags);
error_wb_free:
amdgpu_device_wb_free(adev, status_offset);
......@@ -1025,7 +1025,7 @@ static int mes_v11_0_kiq_enable_queue(struct amdgpu_device *adev)
return r;
}
kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring);
kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring[0]);
return amdgpu_ring_test_helper(kiq_ring);
}
......@@ -1039,7 +1039,7 @@ static int mes_v11_0_queue_init(struct amdgpu_device *adev,
if (pipe == AMDGPU_MES_KIQ_PIPE)
ring = &adev->gfx.kiq[0].ring;
else if (pipe == AMDGPU_MES_SCHED_PIPE)
ring = &adev->mes.ring;
ring = &adev->mes.ring[0];
else
BUG();
......@@ -1081,7 +1081,7 @@ static int mes_v11_0_ring_init(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
ring = &adev->mes.ring;
ring = &adev->mes.ring[0];
ring->funcs = &mes_v11_0_ring_funcs;
......@@ -1134,7 +1134,7 @@ static int mes_v11_0_mqd_sw_init(struct amdgpu_device *adev,
if (pipe == AMDGPU_MES_KIQ_PIPE)
ring = &adev->gfx.kiq[0].ring;
else if (pipe == AMDGPU_MES_SCHED_PIPE)
ring = &adev->mes.ring;
ring = &adev->mes.ring[0];
else
BUG();
......@@ -1226,12 +1226,12 @@ static int mes_v11_0_sw_fini(void *handle)
&adev->gfx.kiq[0].ring.mqd_gpu_addr,
&adev->gfx.kiq[0].ring.mqd_ptr);
amdgpu_bo_free_kernel(&adev->mes.ring.mqd_obj,
&adev->mes.ring.mqd_gpu_addr,
&adev->mes.ring.mqd_ptr);
amdgpu_bo_free_kernel(&adev->mes.ring[0].mqd_obj,
&adev->mes.ring[0].mqd_gpu_addr,
&adev->mes.ring[0].mqd_ptr);
amdgpu_ring_fini(&adev->gfx.kiq[0].ring);
amdgpu_ring_fini(&adev->mes.ring);
amdgpu_ring_fini(&adev->mes.ring[0]);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
mes_v11_0_free_ucode_buffers(adev, AMDGPU_MES_KIQ_PIPE);
......@@ -1342,9 +1342,9 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev)
static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev)
{
if (adev->mes.ring.sched.ready) {
mes_v11_0_kiq_dequeue(&adev->mes.ring);
adev->mes.ring.sched.ready = false;
if (adev->mes.ring[0].sched.ready) {
mes_v11_0_kiq_dequeue(&adev->mes.ring[0]);
adev->mes.ring[0].sched.ready = false;
}
if (amdgpu_sriov_vf(adev)) {
......@@ -1362,7 +1362,7 @@ static int mes_v11_0_hw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->mes.ring.sched.ready)
if (adev->mes.ring[0].sched.ready)
goto out;
if (!adev->enable_mes_kiq) {
......@@ -1407,7 +1407,7 @@ static int mes_v11_0_hw_init(void *handle)
* with MES enabled.
*/
adev->gfx.kiq[0].ring.sched.ready = false;
adev->mes.ring.sched.ready = true;
adev->mes.ring[0].sched.ready = true;
return 0;
......
......@@ -148,7 +148,7 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
union MESAPI__QUERY_MES_STATUS mes_status_pkt;
signed long timeout = 3000000; /* 3000 ms */
struct amdgpu_device *adev = mes->adev;
struct amdgpu_ring *ring = &mes->ring;
struct amdgpu_ring *ring = &mes->ring[0];
struct MES_API_STATUS *api_status;
union MESAPI__MISC *x_pkt = pkt;
const char *op_str, *misc_op_str;
......@@ -177,7 +177,7 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
status_ptr = (u64 *)&adev->wb.wb[status_offset];
*status_ptr = 0;
spin_lock_irqsave(&mes->ring_lock, flags);
spin_lock_irqsave(&mes->ring_lock[0], flags);
r = amdgpu_ring_alloc(ring, (size + sizeof(mes_status_pkt)) / 4);
if (r)
goto error_unlock_free;
......@@ -207,7 +207,7 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
sizeof(mes_status_pkt) / 4);
amdgpu_ring_commit(ring);
spin_unlock_irqrestore(&mes->ring_lock, flags);
spin_unlock_irqrestore(&mes->ring_lock[0], flags);
op_str = mes_v12_0_get_op_string(x_pkt);
misc_op_str = mes_v12_0_get_misc_op_string(x_pkt);
......@@ -249,7 +249,7 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
amdgpu_ring_undo(ring);
error_unlock_free:
spin_unlock_irqrestore(&mes->ring_lock, flags);
spin_unlock_irqrestore(&mes->ring_lock[0], flags);
error_wb_free:
amdgpu_device_wb_free(adev, status_offset);
......@@ -1095,7 +1095,7 @@ static int mes_v12_0_kiq_enable_queue(struct amdgpu_device *adev)
return r;
}
kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring);
kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring[0]);
r = amdgpu_ring_test_ring(kiq_ring);
if (r) {
......@@ -1114,7 +1114,7 @@ static int mes_v12_0_queue_init(struct amdgpu_device *adev,
if (pipe == AMDGPU_MES_KIQ_PIPE)
ring = &adev->gfx.kiq[0].ring;
else if (pipe == AMDGPU_MES_SCHED_PIPE)
ring = &adev->mes.ring;
ring = &adev->mes.ring[0];
else
BUG();
......@@ -1160,7 +1160,7 @@ static int mes_v12_0_ring_init(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
ring = &adev->mes.ring;
ring = &adev->mes.ring[0];
ring->funcs = &mes_v12_0_ring_funcs;
......@@ -1213,7 +1213,7 @@ static int mes_v12_0_mqd_sw_init(struct amdgpu_device *adev,
if (pipe == AMDGPU_MES_KIQ_PIPE)
ring = &adev->gfx.kiq[0].ring;
else if (pipe == AMDGPU_MES_SCHED_PIPE)
ring = &adev->mes.ring;
ring = &adev->mes.ring[0];
else
BUG();
......@@ -1302,12 +1302,12 @@ static int mes_v12_0_sw_fini(void *handle)
&adev->gfx.kiq[0].ring.mqd_gpu_addr,
&adev->gfx.kiq[0].ring.mqd_ptr);
amdgpu_bo_free_kernel(&adev->mes.ring.mqd_obj,
&adev->mes.ring.mqd_gpu_addr,
&adev->mes.ring.mqd_ptr);
amdgpu_bo_free_kernel(&adev->mes.ring[0].mqd_obj,
&adev->mes.ring[0].mqd_gpu_addr,
&adev->mes.ring[0].mqd_ptr);
amdgpu_ring_fini(&adev->gfx.kiq[0].ring);
amdgpu_ring_fini(&adev->mes.ring);
amdgpu_ring_fini(&adev->mes.ring[0]);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
mes_v12_0_free_ucode_buffers(adev, AMDGPU_MES_KIQ_PIPE);
......@@ -1351,7 +1351,7 @@ static void mes_v12_0_kiq_dequeue_sched(struct amdgpu_device *adev)
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
adev->mes.ring.sched.ready = false;
adev->mes.ring[0].sched.ready = false;
}
static void mes_v12_0_kiq_setting(struct amdgpu_ring *ring)
......@@ -1415,9 +1415,9 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev)
static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev)
{
if (adev->mes.ring.sched.ready) {
if (adev->mes.ring[0].sched.ready) {
mes_v12_0_kiq_dequeue_sched(adev);
adev->mes.ring.sched.ready = false;
adev->mes.ring[0].sched.ready = false;
}
mes_v12_0_enable(adev, false);
......@@ -1430,7 +1430,7 @@ static int mes_v12_0_hw_init(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->mes.ring.sched.ready)
if (adev->mes.ring[0].sched.ready)
goto out;
if (!adev->enable_mes_kiq || adev->enable_uni_mes) {
......@@ -1482,7 +1482,7 @@ static int mes_v12_0_hw_init(void *handle)
* with MES enabled.
*/
adev->gfx.kiq[0].ring.sched.ready = false;
adev->mes.ring.sched.ready = true;
adev->mes.ring[0].sched.ready = true;
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment