Commit 4cd4c5c0 authored by Monk Liu's avatar Monk Liu Committed by Alex Deucher

drm/amdgpu: cleanup vega10 SRIOV code path

we can simplify all those unnecessary function under
SRIOV for vega10 since:
1) PSP L1 policy is by force enabled in SRIOV
2) original logic always set all flags which make itself
   a dummy step

besides,
1) the ih_doorbell_range set should also be skipped
for VEGA10 SRIOV.
2) the gfx_common registers should also be skipped
for VEGA10 SRIOV.
Signed-off-by: default avatarMonk Liu <Monk.Liu@amd.com>
Reviewed-by: default avatarEmily Deng <Emily.Deng@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 67194518
...@@ -1643,9 +1643,6 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) ...@@ -1643,9 +1643,6 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
r = amdgpu_virt_request_full_gpu(adev, true); r = amdgpu_virt_request_full_gpu(adev, true);
if (r) if (r)
return -EAGAIN; return -EAGAIN;
/* query the reg access mode at the very beginning */
amdgpu_virt_init_reg_access_mode(adev);
} }
adev->pm.pp_feature = amdgpu_pp_feature_mask; adev->pm.pp_feature = amdgpu_pp_feature_mask;
......
...@@ -430,48 +430,3 @@ uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest) ...@@ -430,48 +430,3 @@ uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest)
return clk; return clk;
} }
void amdgpu_virt_init_reg_access_mode(struct amdgpu_device *adev)
{
struct amdgpu_virt *virt = &adev->virt;
if (virt->ops && virt->ops->init_reg_access_mode)
virt->ops->init_reg_access_mode(adev);
}
bool amdgpu_virt_support_psp_prg_ih_reg(struct amdgpu_device *adev)
{
bool ret = false;
struct amdgpu_virt *virt = &adev->virt;
if (amdgpu_sriov_vf(adev)
&& (virt->reg_access_mode & AMDGPU_VIRT_REG_ACCESS_PSP_PRG_IH))
ret = true;
return ret;
}
bool amdgpu_virt_support_rlc_prg_reg(struct amdgpu_device *adev)
{
bool ret = false;
struct amdgpu_virt *virt = &adev->virt;
if (amdgpu_sriov_vf(adev)
&& (virt->reg_access_mode & AMDGPU_VIRT_REG_ACCESS_RLC)
&& !(amdgpu_sriov_runtime(adev)))
ret = true;
return ret;
}
bool amdgpu_virt_support_skip_setting(struct amdgpu_device *adev)
{
bool ret = false;
struct amdgpu_virt *virt = &adev->virt;
if (amdgpu_sriov_vf(adev)
&& (virt->reg_access_mode & AMDGPU_VIRT_REG_SKIP_SEETING))
ret = true;
return ret;
}
...@@ -48,12 +48,6 @@ struct amdgpu_vf_error_buffer { ...@@ -48,12 +48,6 @@ struct amdgpu_vf_error_buffer {
uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE]; uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
}; };
/* According to the fw feature, some new reg access modes are supported */
#define AMDGPU_VIRT_REG_ACCESS_LEGACY (1 << 0) /* directly mmio */
#define AMDGPU_VIRT_REG_ACCESS_PSP_PRG_IH (1 << 1) /* by PSP */
#define AMDGPU_VIRT_REG_ACCESS_RLC (1 << 2) /* by RLC */
#define AMDGPU_VIRT_REG_SKIP_SEETING (1 << 3) /* Skip setting reg */
/** /**
* struct amdgpu_virt_ops - amdgpu device virt operations * struct amdgpu_virt_ops - amdgpu device virt operations
*/ */
...@@ -65,7 +59,6 @@ struct amdgpu_virt_ops { ...@@ -65,7 +59,6 @@ struct amdgpu_virt_ops {
void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3); void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf); int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
int (*force_dpm_level)(struct amdgpu_device *adev, u32 level); int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
void (*init_reg_access_mode)(struct amdgpu_device *adev);
}; };
/* /*
...@@ -315,10 +308,4 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size, ...@@ -315,10 +308,4 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev); void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest); uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest);
uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest); uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);
void amdgpu_virt_init_reg_access_mode(struct amdgpu_device *adev);
bool amdgpu_virt_support_psp_prg_ih_reg(struct amdgpu_device *adev);
bool amdgpu_virt_support_rlc_prg_reg(struct amdgpu_device *adev);
bool amdgpu_virt_support_skip_setting(struct amdgpu_device *adev);
#endif #endif
...@@ -715,14 +715,12 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) ...@@ -715,14 +715,12 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
{ {
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_VEGA10: case CHIP_VEGA10:
if (!amdgpu_virt_support_skip_setting(adev)) { soc15_program_register_sequence(adev,
soc15_program_register_sequence(adev, golden_settings_gc_9_0,
golden_settings_gc_9_0, ARRAY_SIZE(golden_settings_gc_9_0));
ARRAY_SIZE(golden_settings_gc_9_0)); soc15_program_register_sequence(adev,
soc15_program_register_sequence(adev, golden_settings_gc_9_0_vg10,
golden_settings_gc_9_0_vg10, ARRAY_SIZE(golden_settings_gc_9_0_vg10));
ARRAY_SIZE(golden_settings_gc_9_0_vg10));
}
break; break;
case CHIP_VEGA12: case CHIP_VEGA12:
soc15_program_register_sequence(adev, soc15_program_register_sequence(adev,
...@@ -3801,7 +3799,8 @@ static int gfx_v9_0_hw_init(void *handle) ...@@ -3801,7 +3799,8 @@ static int gfx_v9_0_hw_init(void *handle)
int r; int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gfx_v9_0_init_golden_registers(adev); if (!amdgpu_sriov_vf(adev))
gfx_v9_0_init_golden_registers(adev);
gfx_v9_0_constants_init(adev); gfx_v9_0_constants_init(adev);
......
...@@ -1201,7 +1201,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) ...@@ -1201,7 +1201,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_VEGA10: case CHIP_VEGA10:
if (amdgpu_virt_support_skip_setting(adev)) if (amdgpu_sriov_vf(adev))
break; break;
/* fall through */ /* fall through */
case CHIP_VEGA20: case CHIP_VEGA20:
......
...@@ -111,7 +111,7 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) ...@@ -111,7 +111,7 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
if (amdgpu_virt_support_skip_setting(adev)) if (amdgpu_sriov_vf(adev))
return; return;
/* Set default page address. */ /* Set default page address. */
...@@ -159,7 +159,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) ...@@ -159,7 +159,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
{ {
uint32_t tmp; uint32_t tmp;
if (amdgpu_virt_support_skip_setting(adev)) if (amdgpu_sriov_vf(adev))
return; return;
/* Setup L2 cache */ /* Setup L2 cache */
...@@ -208,7 +208,7 @@ static void mmhub_v1_0_enable_system_domain(struct amdgpu_device *adev) ...@@ -208,7 +208,7 @@ static void mmhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev) static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
{ {
if (amdgpu_virt_support_skip_setting(adev)) if (amdgpu_sriov_vf(adev))
return; return;
WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
...@@ -348,7 +348,7 @@ void mmhub_v1_0_gart_disable(struct amdgpu_device *adev) ...@@ -348,7 +348,7 @@ void mmhub_v1_0_gart_disable(struct amdgpu_device *adev)
0); 0);
WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp); WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp);
if (!amdgpu_virt_support_skip_setting(adev)) { if (!amdgpu_sriov_vf(adev)) {
/* Setup L2 cache */ /* Setup L2 cache */
tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL); tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
...@@ -367,7 +367,7 @@ void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) ...@@ -367,7 +367,7 @@ void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
{ {
u32 tmp; u32 tmp;
if (amdgpu_virt_support_skip_setting(adev)) if (amdgpu_sriov_vf(adev))
return; return;
tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL); tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
......
...@@ -449,20 +449,6 @@ void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev) ...@@ -449,20 +449,6 @@ void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
} }
static void xgpu_ai_init_reg_access_mode(struct amdgpu_device *adev)
{
adev->virt.reg_access_mode = AMDGPU_VIRT_REG_ACCESS_LEGACY;
/* Enable L1 security reg access mode by defaul, as non-security VF
* will no longer be supported.
*/
adev->virt.reg_access_mode |= AMDGPU_VIRT_REG_ACCESS_RLC;
adev->virt.reg_access_mode |= AMDGPU_VIRT_REG_ACCESS_PSP_PRG_IH;
adev->virt.reg_access_mode |= AMDGPU_VIRT_REG_SKIP_SEETING;
}
const struct amdgpu_virt_ops xgpu_ai_virt_ops = { const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.req_full_gpu = xgpu_ai_request_full_gpu_access, .req_full_gpu = xgpu_ai_request_full_gpu_access,
.rel_full_gpu = xgpu_ai_release_full_gpu_access, .rel_full_gpu = xgpu_ai_release_full_gpu_access,
...@@ -471,5 +457,4 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = { ...@@ -471,5 +457,4 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.trans_msg = xgpu_ai_mailbox_trans_msg, .trans_msg = xgpu_ai_mailbox_trans_msg,
.get_pp_clk = xgpu_ai_get_pp_clk, .get_pp_clk = xgpu_ai_get_pp_clk,
.force_dpm_level = xgpu_ai_force_dpm_level, .force_dpm_level = xgpu_ai_force_dpm_level,
.init_reg_access_mode = xgpu_ai_init_reg_access_mode,
}; };
...@@ -330,14 +330,12 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev) ...@@ -330,14 +330,12 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
{ {
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_VEGA10: case CHIP_VEGA10:
if (!amdgpu_virt_support_skip_setting(adev)) { soc15_program_register_sequence(adev,
soc15_program_register_sequence(adev, golden_settings_sdma_4,
golden_settings_sdma_4, ARRAY_SIZE(golden_settings_sdma_4));
ARRAY_SIZE(golden_settings_sdma_4)); soc15_program_register_sequence(adev,
soc15_program_register_sequence(adev, golden_settings_sdma_vg10,
golden_settings_sdma_vg10, ARRAY_SIZE(golden_settings_sdma_vg10));
ARRAY_SIZE(golden_settings_sdma_vg10));
}
break; break;
case CHIP_VEGA12: case CHIP_VEGA12:
soc15_program_register_sequence(adev, soc15_program_register_sequence(adev,
...@@ -1833,7 +1831,8 @@ static int sdma_v4_0_hw_init(void *handle) ...@@ -1833,7 +1831,8 @@ static int sdma_v4_0_hw_init(void *handle)
adev->powerplay.pp_funcs->set_powergating_by_smu) adev->powerplay.pp_funcs->set_powergating_by_smu)
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false); amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
sdma_v4_0_init_golden_registers(adev); if (!amdgpu_sriov_vf(adev))
sdma_v4_0_init_golden_registers(adev);
r = sdma_v4_0_start(adev); r = sdma_v4_0_start(adev);
......
...@@ -1123,21 +1123,18 @@ static void soc15_doorbell_range_init(struct amdgpu_device *adev) ...@@ -1123,21 +1123,18 @@ static void soc15_doorbell_range_init(struct amdgpu_device *adev)
int i; int i;
struct amdgpu_ring *ring; struct amdgpu_ring *ring;
/* Two reasons to skip /* sdma/ih doorbell range are programed by hypervisor */
* 1, Host driver already programmed them if (!amdgpu_sriov_vf(adev)) {
* 2, To avoid registers program violations in SR-IOV
*/
if (!amdgpu_virt_support_skip_setting(adev)) {
for (i = 0; i < adev->sdma.num_instances; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring; ring = &adev->sdma.instance[i].ring;
adev->nbio_funcs->sdma_doorbell_range(adev, i, adev->nbio_funcs->sdma_doorbell_range(adev, i,
ring->use_doorbell, ring->doorbell_index, ring->use_doorbell, ring->doorbell_index,
adev->doorbell_index.sdma_doorbell_range); adev->doorbell_index.sdma_doorbell_range);
} }
}
adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
adev->irq.ih.doorbell_index); adev->irq.ih.doorbell_index);
}
} }
static int soc15_common_hw_init(void *handle) static int soc15_common_hw_init(void *handle)
......
...@@ -69,9 +69,10 @@ ...@@ -69,9 +69,10 @@
} \ } \
} while (0) } while (0)
#define AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(a) (amdgpu_sriov_vf((a)) && !amdgpu_sriov_runtime((a)))
#define WREG32_RLC(reg, value) \ #define WREG32_RLC(reg, value) \
do { \ do { \
if (amdgpu_virt_support_rlc_prg_reg(adev)) { \ if (AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(adev)) { \
uint32_t i = 0; \ uint32_t i = 0; \
uint32_t retries = 50000; \ uint32_t retries = 50000; \
uint32_t r0 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0; \ uint32_t r0 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0; \
...@@ -96,7 +97,7 @@ ...@@ -96,7 +97,7 @@
#define WREG32_SOC15_RLC_SHADOW(ip, inst, reg, value) \ #define WREG32_SOC15_RLC_SHADOW(ip, inst, reg, value) \
do { \ do { \
uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\ uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\
if (amdgpu_virt_support_rlc_prg_reg(adev)) { \ if (AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(adev)) { \
uint32_t r2 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2; \ uint32_t r2 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2; \
uint32_t r3 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3; \ uint32_t r3 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3; \
uint32_t grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL; \ uint32_t grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL; \
......
...@@ -50,7 +50,7 @@ static void vega10_ih_enable_interrupts(struct amdgpu_device *adev) ...@@ -50,7 +50,7 @@ static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1); ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1); ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
if (amdgpu_virt_support_psp_prg_ih_reg(adev)) { if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) { if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
DRM_ERROR("PSP program IH_RB_CNTL failed!\n"); DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
return; return;
...@@ -64,7 +64,7 @@ static void vega10_ih_enable_interrupts(struct amdgpu_device *adev) ...@@ -64,7 +64,7 @@ static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1); ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1, ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
RB_ENABLE, 1); RB_ENABLE, 1);
if (amdgpu_virt_support_psp_prg_ih_reg(adev)) { if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
ih_rb_cntl)) { ih_rb_cntl)) {
DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n"); DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
...@@ -80,7 +80,7 @@ static void vega10_ih_enable_interrupts(struct amdgpu_device *adev) ...@@ -80,7 +80,7 @@ static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2); ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2, ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
RB_ENABLE, 1); RB_ENABLE, 1);
if (amdgpu_virt_support_psp_prg_ih_reg(adev)) { if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2, if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
ih_rb_cntl)) { ih_rb_cntl)) {
DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n"); DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
...@@ -106,7 +106,7 @@ static void vega10_ih_disable_interrupts(struct amdgpu_device *adev) ...@@ -106,7 +106,7 @@ static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0); ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0); ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
if (amdgpu_virt_support_psp_prg_ih_reg(adev)) { if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) { if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
DRM_ERROR("PSP program IH_RB_CNTL failed!\n"); DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
return; return;
...@@ -125,7 +125,7 @@ static void vega10_ih_disable_interrupts(struct amdgpu_device *adev) ...@@ -125,7 +125,7 @@ static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1); ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1, ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
RB_ENABLE, 0); RB_ENABLE, 0);
if (amdgpu_virt_support_psp_prg_ih_reg(adev)) { if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
ih_rb_cntl)) { ih_rb_cntl)) {
DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n"); DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
...@@ -145,7 +145,7 @@ static void vega10_ih_disable_interrupts(struct amdgpu_device *adev) ...@@ -145,7 +145,7 @@ static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2); ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2, ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
RB_ENABLE, 0); RB_ENABLE, 0);
if (amdgpu_virt_support_psp_prg_ih_reg(adev)) { if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2, if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
ih_rb_cntl)) { ih_rb_cntl)) {
DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n"); DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
...@@ -238,7 +238,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev) ...@@ -238,7 +238,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM, ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
!!adev->irq.msi_enabled); !!adev->irq.msi_enabled);
if (amdgpu_virt_support_psp_prg_ih_reg(adev)) { if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) { if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
DRM_ERROR("PSP program IH_RB_CNTL failed!\n"); DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
return -ETIMEDOUT; return -ETIMEDOUT;
...@@ -281,7 +281,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev) ...@@ -281,7 +281,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
WPTR_OVERFLOW_ENABLE, 0); WPTR_OVERFLOW_ENABLE, 0);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
RB_FULL_DRAIN_ENABLE, 1); RB_FULL_DRAIN_ENABLE, 1);
if (amdgpu_virt_support_psp_prg_ih_reg(adev)) { if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
ih_rb_cntl)) { ih_rb_cntl)) {
DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n"); DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
...@@ -308,7 +308,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev) ...@@ -308,7 +308,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2); ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl); ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
if (amdgpu_virt_support_psp_prg_ih_reg(adev)) { if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2, if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
ih_rb_cntl)) { ih_rb_cntl)) {
DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n"); DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment