Commit 68a9fb4d authored by Boyuan Zhang's avatar Boyuan Zhang Committed by Alex Deucher

drm/amdgpu: change HWIP from UVD to VCN for VCN2.5

Rename hardware IP name from UVD to VCN to reduce confusion.
Hardware IP name UVD and VCN are equivalent for VCN2.5 asics.
Use name VCN for future VCN based asics.

V2: update description
V3: rebase
Signed-off-by: default avatarBoyuan Zhang <boyuan.zhang@amd.com>
Reviewed-by: default avatarJames Zhu <james.zhu@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 6e696ae3
...@@ -86,7 +86,7 @@ static int vcn_v2_5_early_init(void *handle) ...@@ -86,7 +86,7 @@ static int vcn_v2_5_early_init(void *handle)
adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS; adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) { for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING); harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
adev->vcn.harvest_config |= 1 << i; adev->vcn.harvest_config |= 1 << i;
} }
...@@ -177,15 +177,15 @@ static int vcn_v2_5_sw_init(void *handle) ...@@ -177,15 +177,15 @@ static int vcn_v2_5_sw_init(void *handle)
adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET; adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET; adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(UVD, j, mmUVD_SCRATCH9); adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(VCN, j, mmUVD_SCRATCH9);
adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET; adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA0); adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA0);
adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET; adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA1); adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA1);
adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET; adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_CMD); adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_CMD);
adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET; adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(UVD, j, mmUVD_NO_OP); adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(VCN, j, mmUVD_NO_OP);
ring = &adev->vcn.inst[j].ring_dec; ring = &adev->vcn.inst[j].ring_dec;
ring->use_doorbell = true; ring->use_doorbell = true;
...@@ -399,46 +399,46 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev) ...@@ -399,46 +399,46 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
continue; continue;
/* cache window 0: fw */ /* cache window 0: fw */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo)); (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi)); (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0); WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
offset = 0; offset = 0;
} else { } else {
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
lower_32_bits(adev->vcn.inst[i].gpu_addr)); lower_32_bits(adev->vcn.inst[i].gpu_addr));
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.inst[i].gpu_addr)); upper_32_bits(adev->vcn.inst[i].gpu_addr));
offset = size; offset = size;
WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0,
AMDGPU_UVD_FIRMWARE_OFFSET >> 3); AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
} }
WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size); WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE0, size);
/* cache window 1: stack */ /* cache window 1: stack */
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
lower_32_bits(adev->vcn.inst[i].gpu_addr + offset)); lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.inst[i].gpu_addr + offset)); upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, 0); WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
/* cache window 2: context */ /* cache window 2: context */
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, 0); WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
/* non-cache window */ /* non-cache window */
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW, WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
lower_32_bits(adev->vcn.inst[i].fw_shared_gpu_addr)); lower_32_bits(adev->vcn.inst[i].fw_shared_gpu_addr));
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH, WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.inst[i].fw_shared_gpu_addr)); upper_32_bits(adev->vcn.inst[i].fw_shared_gpu_addr));
WREG32_SOC15(UVD, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0); WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
WREG32_SOC15(UVD, i, mmUVD_VCPU_NONCACHE_SIZE0, WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_SIZE0,
AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared))); AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
} }
} }
...@@ -452,91 +452,91 @@ static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx ...@@ -452,91 +452,91 @@ static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
if (!indirect) { if (!indirect) {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect); (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect); (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
} else { } else {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect); VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect); VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
} }
offset = 0; offset = 0;
} else { } else {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
offset = size; offset = size;
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), VCN, 0, mmUVD_VCPU_CACHE_OFFSET0),
AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect); AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
} }
if (!indirect) if (!indirect)
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect); VCN, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
else else
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect); VCN, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
/* cache window 1: stack */ /* cache window 1: stack */
if (!indirect) { if (!indirect) {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
} else { } else {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect); VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect); VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
} }
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect); VCN, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
/* cache window 2: context */ /* cache window 2: context */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect); VCN, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect); VCN, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
/* non-cache window */ /* non-cache window */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect); lower_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect); upper_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); VCN, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), VCN, 0, mmUVD_VCPU_NONCACHE_SIZE0),
AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect); AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
/* VCN global tiling registers */ /* VCN global tiling registers */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); VCN, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
} }
/** /**
...@@ -690,19 +690,19 @@ static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev, ...@@ -690,19 +690,19 @@ static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
UVD_CGC_CTRL__VCPU_MODE_MASK | UVD_CGC_CTRL__VCPU_MODE_MASK |
UVD_CGC_CTRL__MMSCH_MODE_MASK); UVD_CGC_CTRL__MMSCH_MODE_MASK);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect); VCN, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
/* turn off clock gating */ /* turn off clock gating */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect); VCN, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
/* turn on SUVD clock gating */ /* turn on SUVD clock gating */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect); VCN, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
/* turn on sw mode in UVD_SUVD_CGC_CTRL */ /* turn on sw mode in UVD_SUVD_CGC_CTRL */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect); VCN, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
} }
/** /**
...@@ -774,13 +774,13 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo ...@@ -774,13 +774,13 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
uint32_t rb_bufsz, tmp; uint32_t rb_bufsz, tmp;
/* disable register anti-hang mechanism */ /* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 1, WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
/* enable dynamic power gating mode */ /* enable dynamic power gating mode */
tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS); tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS);
tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK; tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK; tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
WREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS, tmp); WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp);
if (indirect) if (indirect)
adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t*)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr; adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t*)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
...@@ -793,11 +793,11 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo ...@@ -793,11 +793,11 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
tmp |= UVD_VCPU_CNTL__BLK_RST_MASK; tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect); VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
/* disable master interupt */ /* disable master interupt */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect); VCN, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
/* setup mmUVD_LMI_CTRL */ /* setup mmUVD_LMI_CTRL */
tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
...@@ -809,28 +809,28 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo ...@@ -809,28 +809,28 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
0x00100000L); 0x00100000L);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect); VCN, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_MPC_CNTL), VCN, 0, mmUVD_MPC_CNTL),
0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect); 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_MPC_SET_MUXA0), VCN, 0, mmUVD_MPC_SET_MUXA0),
((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect); (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_MPC_SET_MUXB0), VCN, 0, mmUVD_MPC_SET_MUXB0),
((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect); (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_MPC_SET_MUX), VCN, 0, mmUVD_MPC_SET_MUX),
((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect); (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
...@@ -838,26 +838,26 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo ...@@ -838,26 +838,26 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect); vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect); VCN, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect); VCN, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
/* enable LMI MC and UMC channels */ /* enable LMI MC and UMC channels */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_LMI_CTRL2), 0, 0, indirect); VCN, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
/* unblock VCPU register access */ /* unblock VCPU register access */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect); VCN, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect); VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
/* enable master interrupt */ /* enable master interrupt */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_MASTINT_EN), VCN, 0, mmUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
if (indirect) if (indirect)
...@@ -873,39 +873,39 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo ...@@ -873,39 +873,39 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_CNTL, tmp); WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
/* Stall DPG before WPTR/RPTR reset */ /* Stall DPG before WPTR/RPTR reset */
WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET; fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
/* set the write pointer delay */ /* set the write pointer delay */
WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0); WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
/* set the wb address */ /* set the wb address */
WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR_ADDR, WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
(upper_32_bits(ring->gpu_addr) >> 2)); (upper_32_bits(ring->gpu_addr) >> 2));
/* programm the RB_BASE for ring buffer */ /* programm the RB_BASE for ring buffer */
WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr)); lower_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
upper_32_bits(ring->gpu_addr)); upper_32_bits(ring->gpu_addr));
/* Initialize the ring buffer's read and write pointers */ /* Initialize the ring buffer's read and write pointers */
WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR, 0); WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
WREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2, 0); WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0);
ring->wptr = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR); ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR);
WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR, WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr)); lower_32_bits(ring->wptr));
fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET; fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
/* Unstall DPG */ /* Unstall DPG */
WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
return 0; return 0;
...@@ -929,12 +929,12 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) ...@@ -929,12 +929,12 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
} }
/* disable register anti-hang mechanism */ /* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0, WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
/* set uvd status busy */ /* set uvd status busy */
tmp = RREG32_SOC15(UVD, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY; tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp); WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
} }
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
...@@ -947,44 +947,44 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) ...@@ -947,44 +947,44 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i)) if (adev->vcn.harvest_config & (1 << i))
continue; continue;
/* enable VCPU clock */ /* enable VCPU clock */
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK); UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
/* disable master interrupt */ /* disable master interrupt */
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), 0, WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
~UVD_MASTINT_EN__VCPU_EN_MASK); ~UVD_MASTINT_EN__VCPU_EN_MASK);
/* setup mmUVD_LMI_CTRL */ /* setup mmUVD_LMI_CTRL */
tmp = RREG32_SOC15(UVD, i, mmUVD_LMI_CTRL); tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
tmp &= ~0xff; tmp &= ~0xff;
WREG32_SOC15(UVD, i, mmUVD_LMI_CTRL, tmp | 0x8| WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | 0x8|
UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
UVD_LMI_CTRL__MASK_MC_URGENT_MASK | UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
/* setup mmUVD_MPC_CNTL */ /* setup mmUVD_MPC_CNTL */
tmp = RREG32_SOC15(UVD, i, mmUVD_MPC_CNTL); tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK; tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT; tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp); WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
/* setup UVD_MPC_SET_MUXA0 */ /* setup UVD_MPC_SET_MUXA0 */
WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXA0, WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT))); (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
/* setup UVD_MPC_SET_MUXB0 */ /* setup UVD_MPC_SET_MUXB0 */
WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXB0, WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT))); (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
/* setup mmUVD_MPC_SET_MUX */ /* setup mmUVD_MPC_SET_MUX */
WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUX, WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
...@@ -997,27 +997,27 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) ...@@ -997,27 +997,27 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i)) if (adev->vcn.harvest_config & (1 << i))
continue; continue;
/* VCN global tiling registers */ /* VCN global tiling registers */
WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG, WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
adev->gfx.config.gb_addr_config); adev->gfx.config.gb_addr_config);
WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG, WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
adev->gfx.config.gb_addr_config); adev->gfx.config.gb_addr_config);
/* enable LMI MC and UMC channels */ /* enable LMI MC and UMC channels */
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0, WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
/* unblock VCPU register access */ /* unblock VCPU register access */
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL), 0, WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0, WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
~UVD_VCPU_CNTL__BLK_RST_MASK); ~UVD_VCPU_CNTL__BLK_RST_MASK);
for (k = 0; k < 10; ++k) { for (k = 0; k < 10; ++k) {
uint32_t status; uint32_t status;
for (j = 0; j < 100; ++j) { for (j = 0; j < 100; ++j) {
status = RREG32_SOC15(UVD, i, mmUVD_STATUS); status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
if (status & 2) if (status & 2)
break; break;
if (amdgpu_emu_mode == 1) if (amdgpu_emu_mode == 1)
...@@ -1030,11 +1030,11 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) ...@@ -1030,11 +1030,11 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
break; break;
DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n"); DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
UVD_VCPU_CNTL__BLK_RST_MASK, UVD_VCPU_CNTL__BLK_RST_MASK,
~UVD_VCPU_CNTL__BLK_RST_MASK); ~UVD_VCPU_CNTL__BLK_RST_MASK);
mdelay(10); mdelay(10);
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0, WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
~UVD_VCPU_CNTL__BLK_RST_MASK); ~UVD_VCPU_CNTL__BLK_RST_MASK);
mdelay(10); mdelay(10);
...@@ -1047,15 +1047,15 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) ...@@ -1047,15 +1047,15 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
} }
/* enable master interrupt */ /* enable master interrupt */
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, UVD_MASTINT_EN__VCPU_EN_MASK,
~UVD_MASTINT_EN__VCPU_EN_MASK); ~UVD_MASTINT_EN__VCPU_EN_MASK);
/* clear the busy bit of VCN_STATUS */ /* clear the busy bit of VCN_STATUS */
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0, WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_VMID, 0); WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
ring = &adev->vcn.inst[i].ring_dec; ring = &adev->vcn.inst[i].ring_dec;
/* force RBC into idle state */ /* force RBC into idle state */
...@@ -1065,39 +1065,39 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) ...@@ -1065,39 +1065,39 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, tmp); WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET; fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
/* programm the RB_BASE for ring buffer */ /* programm the RB_BASE for ring buffer */
WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr)); lower_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
upper_32_bits(ring->gpu_addr)); upper_32_bits(ring->gpu_addr));
/* Initialize the ring buffer's read and write pointers */ /* Initialize the ring buffer's read and write pointers */
WREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR, 0); WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
ring->wptr = RREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR); ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
WREG32_SOC15(UVD, i, mmUVD_RBC_RB_WPTR, WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr)); lower_32_bits(ring->wptr));
fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET; fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET; fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst[i].ring_enc[0]; ring = &adev->vcn.inst[i].ring_enc[0];
WREG32_SOC15(UVD, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO, ring->gpu_addr); WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, i, mmUVD_RB_SIZE, ring->ring_size / 4); WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET; fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET; fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst[i].ring_enc[1]; ring = &adev->vcn.inst[i].ring_enc[1];
WREG32_SOC15(UVD, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO2, ring->gpu_addr); WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, i, mmUVD_RB_SIZE2, ring->ring_size / 4); WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET; fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
} }
...@@ -1118,33 +1118,33 @@ static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev, ...@@ -1118,33 +1118,33 @@ static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of * 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of
* memory descriptor location * memory descriptor location
*/ */
WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr)); WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr)); WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
/* 2, update vmid of descriptor */ /* 2, update vmid of descriptor */
data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID); data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID);
data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK; data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
/* use domain0 for MM scheduler */ /* use domain0 for MM scheduler */
data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data); WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, data);
/* 3, notify mmsch about the size of this descriptor */ /* 3, notify mmsch about the size of this descriptor */
WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size); WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size);
/* 4, set resp to zero */ /* 4, set resp to zero */
WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0); WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
/* /*
* 5, kick off the initialization and wait until * 5, kick off the initialization and wait until
* VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
*/ */
WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001); WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP); data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
loop = 10; loop = 10;
while ((data & 0x10000002) != 0x10000002) { while ((data & 0x10000002) != 0x10000002) {
udelay(100); udelay(100);
data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP); data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
loop--; loop--;
if (!loop) if (!loop)
break; break;
...@@ -1187,93 +1187,93 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev) ...@@ -1187,93 +1187,93 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
table_size = 0; table_size = 0;
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT( MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY); ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
/* mc resume*/ /* mc resume*/
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V1_0_INSERT_DIRECT_WT( MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo); adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
MMSCH_V1_0_INSERT_DIRECT_WT( MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi); adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
offset = 0; offset = 0;
MMSCH_V1_0_INSERT_DIRECT_WT( MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), 0); SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
} else { } else {
MMSCH_V1_0_INSERT_DIRECT_WT( MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[i].gpu_addr)); lower_32_bits(adev->vcn.inst[i].gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT( MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[i].gpu_addr)); upper_32_bits(adev->vcn.inst[i].gpu_addr));
offset = size; offset = size;
MMSCH_V1_0_INSERT_DIRECT_WT( MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0),
AMDGPU_UVD_FIRMWARE_OFFSET >> 3); AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
} }
MMSCH_V1_0_INSERT_DIRECT_WT( MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE0),
size); size);
MMSCH_V1_0_INSERT_DIRECT_WT( MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[i].gpu_addr + offset)); lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
MMSCH_V1_0_INSERT_DIRECT_WT( MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[i].gpu_addr + offset)); upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
MMSCH_V1_0_INSERT_DIRECT_WT( MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET1),
0); 0);
MMSCH_V1_0_INSERT_DIRECT_WT( MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE1),
AMDGPU_VCN_STACK_SIZE); AMDGPU_VCN_STACK_SIZE);
MMSCH_V1_0_INSERT_DIRECT_WT( MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + lower_32_bits(adev->vcn.inst[i].gpu_addr + offset +
AMDGPU_VCN_STACK_SIZE)); AMDGPU_VCN_STACK_SIZE));
MMSCH_V1_0_INSERT_DIRECT_WT( MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + upper_32_bits(adev->vcn.inst[i].gpu_addr + offset +
AMDGPU_VCN_STACK_SIZE)); AMDGPU_VCN_STACK_SIZE));
MMSCH_V1_0_INSERT_DIRECT_WT( MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET2),
0); 0);
MMSCH_V1_0_INSERT_DIRECT_WT( MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2), SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE2),
AMDGPU_VCN_CONTEXT_SIZE); AMDGPU_VCN_CONTEXT_SIZE);
ring = &adev->vcn.inst[i].ring_enc[0]; ring = &adev->vcn.inst[i].ring_enc[0];
ring->wptr = 0; ring->wptr = 0;
MMSCH_V1_0_INSERT_DIRECT_WT( MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_LO),
lower_32_bits(ring->gpu_addr)); lower_32_bits(ring->gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT( MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_HI),
upper_32_bits(ring->gpu_addr)); upper_32_bits(ring->gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT( MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), SOC15_REG_OFFSET(VCN, i, mmUVD_RB_SIZE),
ring->ring_size / 4); ring->ring_size / 4);
ring = &adev->vcn.inst[i].ring_dec; ring = &adev->vcn.inst[i].ring_dec;
ring->wptr = 0; ring->wptr = 0;
MMSCH_V1_0_INSERT_DIRECT_WT( MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_RBC_RB_64BIT_BAR_LOW), mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
lower_32_bits(ring->gpu_addr)); lower_32_bits(ring->gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT( MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH), mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
upper_32_bits(ring->gpu_addr)); upper_32_bits(ring->gpu_addr));
...@@ -1285,7 +1285,7 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev) ...@@ -1285,7 +1285,7 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
MMSCH_V1_0_INSERT_DIRECT_WT( MMSCH_V1_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp); SOC15_REG_OFFSET(VCN, i, mmUVD_RBC_RB_CNTL), tmp);
/* add end packet */ /* add end packet */
memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end)); memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
...@@ -1306,24 +1306,24 @@ static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) ...@@ -1306,24 +1306,24 @@ static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
uint32_t tmp; uint32_t tmp;
/* Wait for power status to be 1 */ /* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1, SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
/* wait for read ptr to be equal to write ptr */ /* wait for read ptr to be equal to write ptr */
tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR); tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2); tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code); SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF; tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1, SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
/* disable dynamic power gating mode */ /* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 0, WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK); ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
return 0; return 0;
...@@ -1367,17 +1367,17 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev) ...@@ -1367,17 +1367,17 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
return r; return r;
/* block VCPU register access */ /* block VCPU register access */
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL), WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
UVD_RB_ARB_CTRL__VCPU_DIS_MASK, UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
/* reset VCPU */ /* reset VCPU */
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
UVD_VCPU_CNTL__BLK_RST_MASK, UVD_VCPU_CNTL__BLK_RST_MASK,
~UVD_VCPU_CNTL__BLK_RST_MASK); ~UVD_VCPU_CNTL__BLK_RST_MASK);
/* disable VCPU clock */ /* disable VCPU clock */
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0, WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
~(UVD_VCPU_CNTL__CLK_EN_MASK)); ~(UVD_VCPU_CNTL__CLK_EN_MASK));
/* clear status */ /* clear status */
...@@ -1386,7 +1386,7 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev) ...@@ -1386,7 +1386,7 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
vcn_v2_5_enable_clock_gating(adev); vcn_v2_5_enable_clock_gating(adev);
/* enable register anti-hang mechanism */ /* enable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS),
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
} }
...@@ -1408,11 +1408,11 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev, ...@@ -1408,11 +1408,11 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) { if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
DRM_DEBUG("dpg pause state changed %d -> %d", DRM_DEBUG("dpg pause state changed %d -> %d",
adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based); adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
reg_data = RREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE) & reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) &
(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
if (new_state->fw_based == VCN_DPG_STATE__PAUSE) { if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 0x1, SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
if (!ret_code) { if (!ret_code) {
...@@ -1420,15 +1420,15 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev, ...@@ -1420,15 +1420,15 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
/* pause DPG */ /* pause DPG */
reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data); WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
/* wait for ACK */ /* wait for ACK */
SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_DPG_PAUSE, SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code); UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
/* Stall DPG before WPTR/RPTR reset */ /* Stall DPG before WPTR/RPTR reset */
WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
...@@ -1436,39 +1436,39 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev, ...@@ -1436,39 +1436,39 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET; fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst[inst_idx].ring_enc[0]; ring = &adev->vcn.inst[inst_idx].ring_enc[0];
ring->wptr = 0; ring->wptr = 0;
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr); WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4); WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET; fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET; fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst[inst_idx].ring_enc[1]; ring = &adev->vcn.inst[inst_idx].ring_enc[1];
ring->wptr = 0; ring->wptr = 0;
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr); WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4); WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET; fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET; fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR, WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
RREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF); RREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF);
fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET; fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
/* Unstall DPG */ /* Unstall DPG */
WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
} }
} else { } else {
reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data); WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 0x1, SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
} }
adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based; adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
...@@ -1488,7 +1488,7 @@ static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring) ...@@ -1488,7 +1488,7 @@ static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR); return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR);
} }
/** /**
...@@ -1505,7 +1505,7 @@ static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring) ...@@ -1505,7 +1505,7 @@ static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
if (ring->use_doorbell) if (ring->use_doorbell)
return adev->wb.wb[ring->wptr_offs]; return adev->wb.wb[ring->wptr_offs];
else else
return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR); return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR);
} }
/** /**
...@@ -1520,14 +1520,14 @@ static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring) ...@@ -1520,14 +1520,14 @@ static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
WREG32_SOC15(UVD, ring->me, mmUVD_SCRATCH2, WREG32_SOC15(VCN, ring->me, mmUVD_SCRATCH2,
lower_32_bits(ring->wptr) | 0x80000000); lower_32_bits(ring->wptr) | 0x80000000);
if (ring->use_doorbell) { if (ring->use_doorbell) {
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
} else { } else {
WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
} }
} }
...@@ -1573,9 +1573,9 @@ static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring) ...@@ -1573,9 +1573,9 @@ static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR); return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR);
else else
return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2); return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2);
} }
/** /**
...@@ -1593,12 +1593,12 @@ static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring) ...@@ -1593,12 +1593,12 @@ static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
if (ring->use_doorbell) if (ring->use_doorbell)
return adev->wb.wb[ring->wptr_offs]; return adev->wb.wb[ring->wptr_offs];
else else
return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR); return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR);
} else { } else {
if (ring->use_doorbell) if (ring->use_doorbell)
return adev->wb.wb[ring->wptr_offs]; return adev->wb.wb[ring->wptr_offs];
else else
return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2); return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2);
} }
} }
...@@ -1618,14 +1618,14 @@ static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring) ...@@ -1618,14 +1618,14 @@ static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
} else { } else {
WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
} }
} else { } else {
if (ring->use_doorbell) { if (ring->use_doorbell) {
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
} else { } else {
WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
} }
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment