Commit 17e137f2 authored by Yintian Tao's avatar Yintian Tao Committed by Alex Deucher

drm/amdgpu: skip access sdma_v5_0 registers under SRIOV (v2)

Due to the new L1.0b0c011b policy, many SDMA registers are blocked which raise
the violation warning. There are total 6 pair register needed to be skipped
when driver init and de-init.
mmSDMA0/1_CNTL
mmSDMA0/1_F32_CNTL
mmSDMA0/1_UTCL1_PAGE
mmSDMA0/1_UTCL1_CNTL
mmSDMA0/1_CHICKEN_BITS,
mmSDMA0/1_SEM_WAIT_FAIL_TIMER_CNTL

v2: squash in warning fix
Signed-off-by: default avatarYintian Tao <yttao@amd.com>
Reviewed-by: default avatarEmily Deng <Emily.Deng@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 1675c3a2
...@@ -88,6 +88,29 @@ static const struct soc15_reg_golden golden_settings_sdma_5[] = { ...@@ -88,6 +88,29 @@ static const struct soc15_reg_golden golden_settings_sdma_5[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_UTCL1_PAGE, 0x00ffffff, 0x000c5c00) SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_UTCL1_PAGE, 0x00ffffff, 0x000c5c00)
}; };
static const struct soc15_reg_golden golden_settings_sdma_5_sriov[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
};
static const struct soc15_reg_golden golden_settings_sdma_nv10[] = { static const struct soc15_reg_golden golden_settings_sdma_nv10[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
...@@ -141,6 +164,11 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev) ...@@ -141,6 +164,11 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
(const u32)ARRAY_SIZE(golden_settings_sdma_nv14)); (const u32)ARRAY_SIZE(golden_settings_sdma_nv14));
break; break;
case CHIP_NAVI12: case CHIP_NAVI12:
if (amdgpu_sriov_vf(adev))
soc15_program_register_sequence(adev,
golden_settings_sdma_5_sriov,
(const u32)ARRAY_SIZE(golden_settings_sdma_5_sriov));
else
soc15_program_register_sequence(adev, soc15_program_register_sequence(adev,
golden_settings_sdma_5, golden_settings_sdma_5,
(const u32)ARRAY_SIZE(golden_settings_sdma_5)); (const u32)ARRAY_SIZE(golden_settings_sdma_5));
...@@ -526,7 +554,7 @@ static void sdma_v5_0_rlc_stop(struct amdgpu_device *adev) ...@@ -526,7 +554,7 @@ static void sdma_v5_0_rlc_stop(struct amdgpu_device *adev)
*/ */
static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
{ {
u32 f32_cntl, phase_quantum = 0; u32 f32_cntl = 0, phase_quantum = 0;
int i; int i;
if (amdgpu_sdma_phase_quantum) { if (amdgpu_sdma_phase_quantum) {
...@@ -554,9 +582,12 @@ static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) ...@@ -554,9 +582,12 @@ static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
} }
for (i = 0; i < adev->sdma.num_instances; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
if (!amdgpu_sriov_vf(adev)) {
f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
AUTO_CTXSW_ENABLE, enable ? 1 : 0); AUTO_CTXSW_ENABLE, enable ? 1 : 0);
}
if (enable && amdgpu_sdma_phase_quantum) { if (enable && amdgpu_sdma_phase_quantum) {
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM), WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
phase_quantum); phase_quantum);
...@@ -565,6 +596,7 @@ static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) ...@@ -565,6 +596,7 @@ static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM), WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
phase_quantum); phase_quantum);
} }
if (!amdgpu_sriov_vf(adev))
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl); WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
} }
...@@ -588,6 +620,9 @@ static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable) ...@@ -588,6 +620,9 @@ static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable)
sdma_v5_0_rlc_stop(adev); sdma_v5_0_rlc_stop(adev);
} }
if (amdgpu_sriov_vf(adev))
return;
for (i = 0; i < adev->sdma.num_instances; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1); f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
...@@ -620,6 +655,7 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) ...@@ -620,6 +655,7 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
ring = &adev->sdma.instance[i].ring; ring = &adev->sdma.instance[i].ring;
wb_offset = (ring->rptr_offs * 4); wb_offset = (ring->rptr_offs * 4);
if (!amdgpu_sriov_vf(adev))
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
/* Set ring buffer size in dwords */ /* Set ring buffer size in dwords */
...@@ -696,6 +732,7 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) ...@@ -696,6 +732,7 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
/* set minor_ptr_update to 0 after wptr programed */ /* set minor_ptr_update to 0 after wptr programed */
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
if (!amdgpu_sriov_vf(adev)) {
/* set utc l1 enable flag always to 1 */ /* set utc l1 enable flag always to 1 */
temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1); temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
...@@ -716,6 +753,7 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) ...@@ -716,6 +753,7 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
temp &= 0xFF0FFF; temp &= 0xFF0FFF;
temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14)); temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp); WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
}
if (!amdgpu_sriov_vf(adev)) { if (!amdgpu_sriov_vf(adev)) {
/* unhalt engine */ /* unhalt engine */
...@@ -1385,6 +1423,7 @@ static int sdma_v5_0_set_trap_irq_state(struct amdgpu_device *adev, ...@@ -1385,6 +1423,7 @@ static int sdma_v5_0_set_trap_irq_state(struct amdgpu_device *adev,
{ {
u32 sdma_cntl; u32 sdma_cntl;
if (!amdgpu_sriov_vf(adev)) {
u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ? u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ?
sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) : sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL); sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
...@@ -1393,6 +1432,7 @@ static int sdma_v5_0_set_trap_irq_state(struct amdgpu_device *adev, ...@@ -1393,6 +1432,7 @@ static int sdma_v5_0_set_trap_irq_state(struct amdgpu_device *adev,
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
WREG32(reg_offset, sdma_cntl); WREG32(reg_offset, sdma_cntl);
}
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment