Commit 017634a6 authored by Bokun Zhang's avatar Bokun Zhang Committed by Alex Deucher

drm/amd/amdgpu/vcn: Add RB decouple feature under SRIOV - P4

- In VCN 4 SRIOV code path, add code to enable RB decouple feature
Signed-off-by: default avatarBokun Zhang <bokun.zhang@amd.com>
Reviewed-by: default avatarLeo Liu <leo.liu@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent eb9d6256
...@@ -176,9 +176,6 @@ static int vcn_v4_0_sw_init(void *handle) ...@@ -176,9 +176,6 @@ static int vcn_v4_0_sw_init(void *handle)
AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING; AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING;
} }
if (amdgpu_sriov_vf(adev))
fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
if (amdgpu_vcnfw_log) if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]); amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
} }
...@@ -1209,6 +1206,24 @@ static int vcn_v4_0_start(struct amdgpu_device *adev) ...@@ -1209,6 +1206,24 @@ static int vcn_v4_0_start(struct amdgpu_device *adev)
return 0; return 0;
} }
static int vcn_v4_0_init_ring_metadata(struct amdgpu_device *adev, uint32_t vcn_inst, struct amdgpu_ring *ring_enc)
{
struct amdgpu_vcn_rb_metadata *rb_metadata = NULL;
uint8_t *rb_ptr = (uint8_t *)ring_enc->ring;
rb_ptr += ring_enc->ring_size;
rb_metadata = (struct amdgpu_vcn_rb_metadata *)rb_ptr;
memset(rb_metadata, 0, sizeof(struct amdgpu_vcn_rb_metadata));
rb_metadata->size = sizeof(struct amdgpu_vcn_rb_metadata);
rb_metadata->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
rb_metadata->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_DECOUPLE_FLAG);
rb_metadata->version = 1;
rb_metadata->ring_id = vcn_inst & 0xFF;
return 0;
}
static int vcn_v4_0_start_sriov(struct amdgpu_device *adev) static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
{ {
int i; int i;
...@@ -1331,11 +1346,30 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev) ...@@ -1331,11 +1346,30 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
rb_enc_addr = ring_enc->gpu_addr; rb_enc_addr = ring_enc->gpu_addr;
rb_setup->is_rb_enabled_flags |= RB_ENABLED; rb_setup->is_rb_enabled_flags |= RB_ENABLED;
rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr);
rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr);
rb_setup->rb_size = ring_enc->ring_size / 4;
fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG); fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
if (amdgpu_sriov_is_vcn_rb_decouple(adev)) {
vcn_v4_0_init_ring_metadata(adev, i, ring_enc);
memset((void *)&rb_setup->rb_info, 0, sizeof(struct amdgpu_vcn_rb_setup_info) * MAX_NUM_VCN_RB_SETUP);
if (!(adev->vcn.harvest_config & (1 << 0))) {
rb_setup->rb_info[0].rb_addr_lo = lower_32_bits(adev->vcn.inst[0].ring_enc[0].gpu_addr);
rb_setup->rb_info[0].rb_addr_hi = upper_32_bits(adev->vcn.inst[0].ring_enc[0].gpu_addr);
rb_setup->rb_info[0].rb_size = adev->vcn.inst[0].ring_enc[0].ring_size / 4;
}
if (!(adev->vcn.harvest_config & (1 << 1))) {
rb_setup->rb_info[2].rb_addr_lo = lower_32_bits(adev->vcn.inst[1].ring_enc[0].gpu_addr);
rb_setup->rb_info[2].rb_addr_hi = upper_32_bits(adev->vcn.inst[1].ring_enc[0].gpu_addr);
rb_setup->rb_info[2].rb_size = adev->vcn.inst[1].ring_enc[0].ring_size / 4;
}
fw_shared->decouple.is_enabled = 1;
fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_DECOUPLE_FLAG);
} else {
rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr);
rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr);
rb_setup->rb_size = ring_enc->ring_size / 4;
}
MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr)); lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
...@@ -1807,6 +1841,7 @@ static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = { ...@@ -1807,6 +1841,7 @@ static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC, .type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f, .align_mask = 0x3f,
.nop = VCN_ENC_CMD_NO_OP, .nop = VCN_ENC_CMD_NO_OP,
.extra_dw = sizeof(struct amdgpu_vcn_rb_metadata),
.get_rptr = vcn_v4_0_unified_ring_get_rptr, .get_rptr = vcn_v4_0_unified_ring_get_rptr,
.get_wptr = vcn_v4_0_unified_ring_get_wptr, .get_wptr = vcn_v4_0_unified_ring_get_wptr,
.set_wptr = vcn_v4_0_unified_ring_set_wptr, .set_wptr = vcn_v4_0_unified_ring_set_wptr,
...@@ -2020,16 +2055,20 @@ static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_ ...@@ -2020,16 +2055,20 @@ static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_
{ {
uint32_t ip_instance; uint32_t ip_instance;
switch (entry->client_id) { if (amdgpu_sriov_is_vcn_rb_decouple(adev)) {
case SOC15_IH_CLIENTID_VCN: ip_instance = entry->ring_id;
ip_instance = 0; } else {
break; switch (entry->client_id) {
case SOC15_IH_CLIENTID_VCN1: case SOC15_IH_CLIENTID_VCN:
ip_instance = 1; ip_instance = 0;
break; break;
default: case SOC15_IH_CLIENTID_VCN1:
DRM_ERROR("Unhandled client id: %d\n", entry->client_id); ip_instance = 1;
return 0; break;
default:
DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
return 0;
}
} }
DRM_DEBUG("IH: VCN TRAP\n"); DRM_DEBUG("IH: VCN TRAP\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment