Commit 3aa0115d authored by Monk Liu's avatar Monk Liu Committed by Alex Deucher

drm/amdgpu: cleanup all virtualization detection routine

we need to move virt detection much earlier because:
1) HW team confirms us that RCC_IOV_FUNC_IDENTIFIER will always
be at DE5 (dw) mmio offset from vega10, this way there is no
need to implement detect_hw_virt() routine in each nbio/chip file.
for VI SRIOV chip (tonga & fiji), the BIF_IOV_FUNC_IDENTIFIER is at
0x1503

2) we need to acknowledged we are SRIOV VF before we do IP discovery because
the IP discovery content will be updated by host everytime after it recieved
a new coming "REQ_GPU_INIT_DATA" request from guest (there will be patches
for this new handshake soon).
Signed-off-by: default avatarMonk Liu <Monk.Liu@amd.com>
Reviewed-by: default avatarEmily Deng <Emily.Deng@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent b89659b7
......@@ -3055,6 +3055,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
adev->enable_mes = true;
/* detect hw virtualization here */
amdgpu_detect_virtualization(adev);
if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) {
r = amdgpu_discovery_init(adev);
if (r) {
......
......@@ -77,7 +77,6 @@ struct amdgpu_nbio_funcs {
u32 *flags);
void (*ih_control)(struct amdgpu_device *adev);
void (*init_registers)(struct amdgpu_device *adev);
void (*detect_hw_virt)(struct amdgpu_device *adev);
void (*remap_hdp_registers)(struct amdgpu_device *adev);
void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev);
void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev);
......
......@@ -287,3 +287,36 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
}
}
}
void amdgpu_detect_virtualization(struct amdgpu_device *adev)
{
uint32_t reg;
switch (adev->asic_type) {
case CHIP_TONGA:
case CHIP_FIJI:
reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
break;
case CHIP_VEGA10:
case CHIP_VEGA20:
case CHIP_NAVI10:
case CHIP_NAVI12:
case CHIP_ARCTURUS:
reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
break;
default: /* other chip doesn't support SRIOV */
reg = 0;
break;
}
if (reg & 1)
adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
if (reg & 0x80000000)
adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
if (!reg) {
if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}
}
......@@ -30,6 +30,11 @@
#define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */
#define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */
/* all asic after AI use this offset */
#define mmRCC_IOV_FUNC_IDENTIFIER 0xDE5
/* tonga/fiji use this offset */
#define mmBIF_IOV_FUNC_IDENTIFIER 0x1503
struct amdgpu_mm_table {
struct amdgpu_bo *bo;
uint32_t *cpu_addr;
......@@ -305,4 +310,5 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
unsigned int key,
unsigned int chksum);
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
void amdgpu_detect_virtualization(struct amdgpu_device *adev);
#endif
......@@ -1811,12 +1811,6 @@ static uint32_t cik_get_rev_id(struct amdgpu_device *adev)
>> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
}
static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
{
if (is_virtual_machine()) /* passthrough mode */
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}
static void cik_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg) {
......@@ -2179,8 +2173,6 @@ static const struct amdgpu_ip_block_version cik_common_ip_block =
int cik_set_ip_blocks(struct amdgpu_device *adev)
{
cik_detect_hw_virtualization(adev);
switch (adev->asic_type) {
case CHIP_BONAIRE:
amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
......
......@@ -290,23 +290,6 @@ const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg = {
.ref_and_mask_sdma1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA1_MASK,
};
static void nbio_v2_3_detect_hw_virt(struct amdgpu_device *adev)
{
uint32_t reg;
reg = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_RCC_IOV_FUNC_IDENTIFIER);
if (reg & 1)
adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
if (reg & 0x80000000)
adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
if (!reg) {
if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}
}
static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
{
uint32_t def, data;
......@@ -338,6 +321,5 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
.get_clockgating_state = nbio_v2_3_get_clockgating_state,
.ih_control = nbio_v2_3_ih_control,
.init_registers = nbio_v2_3_init_registers,
.detect_hw_virt = nbio_v2_3_detect_hw_virt,
.remap_hdp_registers = nbio_v2_3_remap_hdp_registers,
};
......@@ -241,23 +241,6 @@ const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
.ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK
};
static void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
{
uint32_t reg;
reg = RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_IOV_FUNC_IDENTIFIER);
if (reg & 1)
adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
if (reg & 0x80000000)
adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
if (!reg) {
if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}
}
static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
{
uint32_t def, data;
......@@ -294,5 +277,4 @@ const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
.get_clockgating_state = nbio_v6_1_get_clockgating_state,
.ih_control = nbio_v6_1_ih_control,
.init_registers = nbio_v6_1_init_registers,
.detect_hw_virt = nbio_v6_1_detect_hw_virt,
};
......@@ -280,12 +280,6 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
};
static void nbio_v7_0_detect_hw_virt(struct amdgpu_device *adev)
{
if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}
static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
{
......@@ -310,6 +304,5 @@ const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
.get_clockgating_state = nbio_v7_0_get_clockgating_state,
.ih_control = nbio_v7_0_ih_control,
.init_registers = nbio_v7_0_init_registers,
.detect_hw_virt = nbio_v7_0_detect_hw_virt,
.remap_hdp_registers = nbio_v7_0_remap_hdp_registers,
};
......@@ -292,23 +292,6 @@ const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
.ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK,
};
static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev)
{
uint32_t reg;
reg = RREG32_SOC15(NBIO, 0, mmRCC_IOV_FUNC_IDENTIFIER);
if (reg & 1)
adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
if (reg & 0x80000000)
adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
if (!reg) {
if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}
}
static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
{
......@@ -561,7 +544,6 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
.get_clockgating_state = nbio_v7_4_get_clockgating_state,
.ih_control = nbio_v7_4_ih_control,
.init_registers = nbio_v7_4_init_registers,
.detect_hw_virt = nbio_v7_4_detect_hw_virt,
.remap_hdp_registers = nbio_v7_4_remap_hdp_registers,
.handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring,
.handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring,
......
......@@ -465,8 +465,6 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
adev->nbio.funcs = &nbio_v2_3_funcs;
adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
adev->nbio.funcs->detect_hw_virt(adev);
if (amdgpu_sriov_vf(adev))
adev->virt.ops = &xgpu_nv_virt_ops;
......
......@@ -1249,12 +1249,6 @@ static int si_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
return 0;
}
static void si_detect_hw_virtualization(struct amdgpu_device *adev)
{
if (is_virtual_machine()) /* passthrough mode */
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}
static void si_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg) {
......@@ -2165,8 +2159,6 @@ static const struct amdgpu_ip_block_version si_common_ip_block =
int si_set_ip_blocks(struct amdgpu_device *adev)
{
si_detect_hw_virtualization(adev);
switch (adev->asic_type) {
case CHIP_VERDE:
case CHIP_TAHITI:
......
......@@ -712,7 +712,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
adev->df.funcs = &df_v1_7_funcs;
adev->rev_id = soc15_get_rev_id(adev);
adev->nbio.funcs->detect_hw_virt(adev);
if (amdgpu_sriov_vf(adev))
adev->virt.ops = &xgpu_ai_virt_ops;
......
......@@ -448,27 +448,6 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
return true;
}
static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
{
uint32_t reg = 0;
if (adev->asic_type == CHIP_TONGA ||
adev->asic_type == CHIP_FIJI) {
reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
/* bit0: 0 means pf and 1 means vf */
if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
/* bit31: 0 means disable IOV and 1 means enable */
if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
}
if (reg == 0) {
if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}
}
static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
{mmGRBM_STATUS},
{mmGRBM_STATUS2},
......@@ -1730,9 +1709,6 @@ static const struct amdgpu_ip_block_version vi_common_ip_block =
int vi_set_ip_blocks(struct amdgpu_device *adev)
{
/* in early init stage, vbios code won't work */
vi_detect_hw_virtualization(adev);
if (amdgpu_sriov_vf(adev))
adev->virt.ops = &xgpu_vi_virt_ops;
......
......@@ -1162,8 +1162,10 @@
#define mmRCC_CONFIG_MEMSIZE_BASE_IDX 0
#define mmRCC_CONFIG_RESERVED 0x0de4 // duplicate
#define mmRCC_CONFIG_RESERVED_BASE_IDX 0
#ifndef mmRCC_IOV_FUNC_IDENTIFIER
#define mmRCC_IOV_FUNC_IDENTIFIER 0x0de5 // duplicate
#define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX 0
#endif
// addressBlock: syshub_mmreg_ind_syshubdec
......
......@@ -4251,8 +4251,10 @@
#define mmRCC_CONFIG_MEMSIZE_BASE_IDX 2
#define mmRCC_CONFIG_RESERVED 0x00c4
#define mmRCC_CONFIG_RESERVED_BASE_IDX 2
#ifndef mmRCC_IOV_FUNC_IDENTIFIER
#define mmRCC_IOV_FUNC_IDENTIFIER 0x00c5
#define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
#endif
// addressBlock: nbio_nbif0_rcc_dev0_BIFDEC1
......
......@@ -2687,8 +2687,10 @@
#define mmRCC_CONFIG_MEMSIZE_BASE_IDX 2
#define mmRCC_CONFIG_RESERVED 0x00c4
#define mmRCC_CONFIG_RESERVED_BASE_IDX 2
#ifndef mmRCC_IOV_FUNC_IDENTIFIER
#define mmRCC_IOV_FUNC_IDENTIFIER 0x00c5
#define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
#endif
// addressBlock: nbio_nbif0_rcc_dev0_BIFDEC1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment