Commit 4661482b authored by Lang Yu's avatar Lang Yu Committed by Alex Deucher

drm/amdgpu: correct NBIO v7.11 programing

Use v7.7 before, switch to v7.11 now.
Fix incorrect programing.
Signed-off-by: default avatarLang Yu <Lang.Yu@amd.com>
Acked-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Reviewed-by: default avatarYifan Zhang <yifan1.zhang@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 2c1fe3c4
...@@ -66,19 +66,19 @@ static void nbio_v7_11_sdma_doorbell_range(struct amdgpu_device *adev, int insta ...@@ -66,19 +66,19 @@ static void nbio_v7_11_sdma_doorbell_range(struct amdgpu_device *adev, int insta
bool use_doorbell, int doorbell_index, bool use_doorbell, int doorbell_index,
int doorbell_size) int doorbell_size)
{ {
u32 reg = SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_SDMA0_DOORBELL_RANGE); u32 reg = SOC15_REG_OFFSET(NBIO, 0, regGDC0_BIF_CSDMA_DOORBELL_RANGE);
u32 doorbell_range = RREG32_PCIE_PORT(reg); u32 doorbell_range = RREG32_PCIE_PORT(reg);
if (use_doorbell) { if (use_doorbell) {
doorbell_range = REG_SET_FIELD(doorbell_range, doorbell_range = REG_SET_FIELD(doorbell_range,
GDC0_BIF_SDMA0_DOORBELL_RANGE, GDC0_BIF_CSDMA_DOORBELL_RANGE,
OFFSET, doorbell_index); OFFSET, doorbell_index);
doorbell_range = REG_SET_FIELD(doorbell_range, doorbell_range = REG_SET_FIELD(doorbell_range,
GDC0_BIF_SDMA0_DOORBELL_RANGE, GDC0_BIF_CSDMA_DOORBELL_RANGE,
SIZE, doorbell_size); SIZE, doorbell_size);
} else { } else {
doorbell_range = REG_SET_FIELD(doorbell_range, doorbell_range = REG_SET_FIELD(doorbell_range,
GDC0_BIF_SDMA0_DOORBELL_RANGE, GDC0_BIF_CSDMA_DOORBELL_RANGE,
SIZE, 0); SIZE, 0);
} }
...@@ -145,27 +145,25 @@ static void nbio_v7_11_enable_doorbell_aperture(struct amdgpu_device *adev, ...@@ -145,27 +145,25 @@ static void nbio_v7_11_enable_doorbell_aperture(struct amdgpu_device *adev,
static void nbio_v7_11_enable_doorbell_selfring_aperture(struct amdgpu_device *adev, static void nbio_v7_11_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
bool enable) bool enable)
{ {
/* u32 tmp = 0; u32 tmp = 0;
if (enable) { if (enable) {
tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, tmp = REG_SET_FIELD(tmp, BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_CNTL,
DOORBELL_SELFRING_GPA_APER_EN, 1) | DOORBELL_SELFRING_GPA_APER_EN, 1) |
REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, REG_SET_FIELD(tmp, BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_CNTL,
DOORBELL_SELFRING_GPA_APER_MODE, 1) | DOORBELL_SELFRING_GPA_APER_MODE, 1) |
REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, REG_SET_FIELD(tmp, BIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_CNTL,
DOORBELL_SELFRING_GPA_APER_SIZE, 0); DOORBELL_SELFRING_GPA_APER_SIZE, 0);
WREG32_SOC15(NBIO, 0, WREG32_SOC15(NBIO, 0,
regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW, regBIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_BASE_LOW,
lower_32_bits(adev->doorbell.base)); lower_32_bits(adev->doorbell.base));
WREG32_SOC15(NBIO, 0, WREG32_SOC15(NBIO, 0,
regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH, regBIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_BASE_HIGH,
upper_32_bits(adev->doorbell.base)); upper_32_bits(adev->doorbell.base));
} }
WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, WREG32_SOC15(NBIO, 0, regBIF_BX_PF1_DOORBELL_SELFRING_GPA_APER_CNTL, tmp);
tmp);
*/
} }
...@@ -216,12 +214,12 @@ static void nbio_v7_11_ih_control(struct amdgpu_device *adev) ...@@ -216,12 +214,12 @@ static void nbio_v7_11_ih_control(struct amdgpu_device *adev)
static u32 nbio_v7_11_get_hdp_flush_req_offset(struct amdgpu_device *adev) static u32 nbio_v7_11_get_hdp_flush_req_offset(struct amdgpu_device *adev)
{ {
return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_GPU_HDP_FLUSH_REQ); return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF1_GPU_HDP_FLUSH_REQ);
} }
static u32 nbio_v7_11_get_hdp_flush_done_offset(struct amdgpu_device *adev) static u32 nbio_v7_11_get_hdp_flush_done_offset(struct amdgpu_device *adev)
{ {
return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_GPU_HDP_FLUSH_DONE); return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF1_GPU_HDP_FLUSH_DONE);
} }
static u32 nbio_v7_11_get_pcie_index_offset(struct amdgpu_device *adev) static u32 nbio_v7_11_get_pcie_index_offset(struct amdgpu_device *adev)
...@@ -236,27 +234,27 @@ static u32 nbio_v7_11_get_pcie_data_offset(struct amdgpu_device *adev) ...@@ -236,27 +234,27 @@ static u32 nbio_v7_11_get_pcie_data_offset(struct amdgpu_device *adev)
static u32 nbio_v7_11_get_pcie_port_index_offset(struct amdgpu_device *adev) static u32 nbio_v7_11_get_pcie_port_index_offset(struct amdgpu_device *adev)
{ {
return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_RSMU_INDEX); return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF1_RSMU_INDEX);
} }
static u32 nbio_v7_11_get_pcie_port_data_offset(struct amdgpu_device *adev) static u32 nbio_v7_11_get_pcie_port_data_offset(struct amdgpu_device *adev)
{ {
return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_RSMU_DATA); return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF1_RSMU_DATA);
} }
const struct nbio_hdp_flush_reg nbio_v7_11_hdp_flush_reg = { const struct nbio_hdp_flush_reg nbio_v7_11_hdp_flush_reg = {
.ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK, .ref_and_mask_cp0 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP0_MASK,
.ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK, .ref_and_mask_cp1 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP1_MASK,
.ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK, .ref_and_mask_cp2 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP2_MASK,
.ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK, .ref_and_mask_cp3 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP3_MASK,
.ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK, .ref_and_mask_cp4 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP4_MASK,
.ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK, .ref_and_mask_cp5 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP5_MASK,
.ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK, .ref_and_mask_cp6 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP6_MASK,
.ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK, .ref_and_mask_cp7 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP7_MASK,
.ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK, .ref_and_mask_cp8 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP8_MASK,
.ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK, .ref_and_mask_cp9 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__CP9_MASK,
.ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK, .ref_and_mask_sdma0 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__SDMA0_MASK,
.ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK, .ref_and_mask_sdma1 = BIF_BX_PF1_GPU_HDP_FLUSH_DONE__SDMA1_MASK,
}; };
static void nbio_v7_11_init_registers(struct amdgpu_device *adev) static void nbio_v7_11_init_registers(struct amdgpu_device *adev)
......
...@@ -8187,9 +8187,9 @@ ...@@ -8187,9 +8187,9 @@
#define regBIF_BX0_PCIE_INDEX_BASE_IDX 5 #define regBIF_BX0_PCIE_INDEX_BASE_IDX 5
#define regBIF_BX0_PCIE_DATA 0x800d #define regBIF_BX0_PCIE_DATA 0x800d
#define regBIF_BX0_PCIE_DATA_BASE_IDX 5 #define regBIF_BX0_PCIE_DATA_BASE_IDX 5
#define regBIF_BX0_PCIE_INDEX2 0xe #define regBIF_BX0_PCIE_INDEX2 0x800e
#define regBIF_BX0_PCIE_INDEX2_BASE_IDX 0 #define regBIF_BX0_PCIE_INDEX2_BASE_IDX 0
#define regBIF_BX0_PCIE_DATA2 0xf #define regBIF_BX0_PCIE_DATA2 0x800f
#define regBIF_BX0_PCIE_DATA2_BASE_IDX 0 #define regBIF_BX0_PCIE_DATA2_BASE_IDX 0
#define regBIF_BX0_SBIOS_SCRATCH_0 0x8048 #define regBIF_BX0_SBIOS_SCRATCH_0 0x8048
#define regBIF_BX0_SBIOS_SCRATCH_0_BASE_IDX 5 #define regBIF_BX0_SBIOS_SCRATCH_0_BASE_IDX 5
...@@ -8678,7 +8678,10 @@ ...@@ -8678,7 +8678,10 @@
#define regBIF_BX_PF1_MM_DATA_BASE_IDX 0 #define regBIF_BX_PF1_MM_DATA_BASE_IDX 0
#define regBIF_BX_PF1_MM_INDEX_HI 0x0006 #define regBIF_BX_PF1_MM_INDEX_HI 0x0006
#define regBIF_BX_PF1_MM_INDEX_HI_BASE_IDX 0 #define regBIF_BX_PF1_MM_INDEX_HI_BASE_IDX 0
#define regBIF_BX_PF1_RSMU_INDEX 0x0000
#define regBIF_BX_PF1_RSMU_INDEX_BASE_IDX 1
#define regBIF_BX_PF1_RSMU_DATA 0x0001
#define regBIF_BX_PF1_RSMU_DATA_BASE_IDX 1
// addressBlock: nbio_nbif0_bif_bx_BIFDEC1:1 // addressBlock: nbio_nbif0_bif_bx_BIFDEC1:1
// base address: 0x0 // base address: 0x0
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment