Commit f1e582eb authored by Alex Deucher's avatar Alex Deucher

drm/amdgpu: implement harvesting support for UVD 7.2 (v3)

Properly handle cases where one or more instance of the IP
block may be harvested.

v2: make sure ip_num_rings is initialized amdgpu_queue_mgr.c
v3: rebase on Christian's UVD changes, drop unused var
Reviewed-by: default avatarJames Zhu <James.Zhu@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent d04cc604
...@@ -286,7 +286,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ...@@ -286,7 +286,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
struct drm_crtc *crtc; struct drm_crtc *crtc;
uint32_t ui32 = 0; uint32_t ui32 = 0;
uint64_t ui64 = 0; uint64_t ui64 = 0;
int i, found; int i, j, found;
int ui32_size = sizeof(ui32); int ui32_size = sizeof(ui32);
if (!info->return_size || !info->return_pointer) if (!info->return_size || !info->return_pointer)
...@@ -348,7 +348,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ...@@ -348,7 +348,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
break; break;
case AMDGPU_HW_IP_UVD: case AMDGPU_HW_IP_UVD:
type = AMD_IP_BLOCK_TYPE_UVD; type = AMD_IP_BLOCK_TYPE_UVD;
ring_mask |= adev->uvd.inst[0].ring.ready; for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
if (adev->uvd.harvest_config & (1 << i))
continue;
ring_mask |= adev->uvd.inst[i].ring.ready;
}
ib_start_alignment = 64; ib_start_alignment = 64;
ib_size_alignment = 64; ib_size_alignment = 64;
break; break;
...@@ -361,9 +365,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ...@@ -361,9 +365,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
break; break;
case AMDGPU_HW_IP_UVD_ENC: case AMDGPU_HW_IP_UVD_ENC:
type = AMD_IP_BLOCK_TYPE_UVD; type = AMD_IP_BLOCK_TYPE_UVD;
for (i = 0; i < adev->uvd.num_enc_rings; i++) for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
ring_mask |= if (adev->uvd.harvest_config & (1 << i))
adev->uvd.inst[0].ring_enc[i].ready << i; continue;
for (j = 0; j < adev->uvd.num_enc_rings; j++)
ring_mask |= adev->uvd.inst[i].ring_enc[j].ready << j;
}
ib_start_alignment = 64; ib_start_alignment = 64;
ib_size_alignment = 64; ib_size_alignment = 64;
break; break;
......
...@@ -214,7 +214,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev, ...@@ -214,7 +214,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
u32 hw_ip, u32 instance, u32 ring, u32 hw_ip, u32 instance, u32 ring,
struct amdgpu_ring **out_ring) struct amdgpu_ring **out_ring)
{ {
int r, ip_num_rings; int i, r, ip_num_rings = 0;
struct amdgpu_queue_mapper *mapper = &mgr->mapper[hw_ip]; struct amdgpu_queue_mapper *mapper = &mgr->mapper[hw_ip];
if (!adev || !mgr || !out_ring) if (!adev || !mgr || !out_ring)
...@@ -243,14 +243,21 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev, ...@@ -243,14 +243,21 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
ip_num_rings = adev->sdma.num_instances; ip_num_rings = adev->sdma.num_instances;
break; break;
case AMDGPU_HW_IP_UVD: case AMDGPU_HW_IP_UVD:
ip_num_rings = adev->uvd.num_uvd_inst; for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
if (!(adev->uvd.harvest_config & (1 << i)))
ip_num_rings++;
}
break; break;
case AMDGPU_HW_IP_VCE: case AMDGPU_HW_IP_VCE:
ip_num_rings = adev->vce.num_rings; ip_num_rings = adev->vce.num_rings;
break; break;
case AMDGPU_HW_IP_UVD_ENC: case AMDGPU_HW_IP_UVD_ENC:
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
if (!(adev->uvd.harvest_config & (1 << i)))
ip_num_rings++;
}
ip_num_rings = ip_num_rings =
adev->uvd.num_enc_rings * adev->uvd.num_uvd_inst; adev->uvd.num_enc_rings * ip_num_rings;
break; break;
case AMDGPU_HW_IP_VCN_DEC: case AMDGPU_HW_IP_VCN_DEC:
ip_num_rings = 1; ip_num_rings = 1;
......
...@@ -255,7 +255,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) ...@@ -255,7 +255,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
for (j = 0; j < adev->uvd.num_uvd_inst; j++) { for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
if (adev->uvd.harvest_config & (1 << j))
continue;
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
&adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr); &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr);
...@@ -308,6 +309,8 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) ...@@ -308,6 +309,8 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
drm_sched_entity_destroy(&adev->uvd.entity); drm_sched_entity_destroy(&adev->uvd.entity);
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
if (adev->uvd.harvest_config & (1 << j))
continue;
kfree(adev->uvd.inst[j].saved_bo); kfree(adev->uvd.inst[j].saved_bo);
amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo, amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
...@@ -343,6 +346,8 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) ...@@ -343,6 +346,8 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
} }
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
if (adev->uvd.harvest_config & (1 << j))
continue;
if (adev->uvd.inst[j].vcpu_bo == NULL) if (adev->uvd.inst[j].vcpu_bo == NULL)
continue; continue;
...@@ -365,6 +370,8 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) ...@@ -365,6 +370,8 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
int i; int i;
for (i = 0; i < adev->uvd.num_uvd_inst; i++) { for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
if (adev->uvd.harvest_config & (1 << i))
continue;
if (adev->uvd.inst[i].vcpu_bo == NULL) if (adev->uvd.inst[i].vcpu_bo == NULL)
return -EINVAL; return -EINVAL;
...@@ -1159,6 +1166,8 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) ...@@ -1159,6 +1166,8 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
unsigned fences = 0, i, j; unsigned fences = 0, i, j;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
if (adev->uvd.harvest_config & (1 << i))
continue;
fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring); fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
for (j = 0; j < adev->uvd.num_enc_rings; ++j) { for (j = 0; j < adev->uvd.num_enc_rings; ++j) {
fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]); fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
......
...@@ -48,6 +48,9 @@ struct amdgpu_uvd_inst { ...@@ -48,6 +48,9 @@ struct amdgpu_uvd_inst {
uint32_t srbm_soft_reset; uint32_t srbm_soft_reset;
}; };
#define AMDGPU_UVD_HARVEST_UVD0 (1 << 0)
#define AMDGPU_UVD_HARVEST_UVD1 (1 << 1)
struct amdgpu_uvd { struct amdgpu_uvd {
const struct firmware *fw; /* UVD firmware */ const struct firmware *fw; /* UVD firmware */
unsigned fw_version; unsigned fw_version;
...@@ -61,6 +64,7 @@ struct amdgpu_uvd { ...@@ -61,6 +64,7 @@ struct amdgpu_uvd {
atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
struct drm_sched_entity entity; struct drm_sched_entity entity;
struct delayed_work idle_work; struct delayed_work idle_work;
unsigned harvest_config;
}; };
int amdgpu_uvd_sw_init(struct amdgpu_device *adev); int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
......
...@@ -41,6 +41,12 @@ ...@@ -41,6 +41,12 @@
#include "mmhub/mmhub_1_0_sh_mask.h" #include "mmhub/mmhub_1_0_sh_mask.h"
#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h" #include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
#define mmUVD_PG0_CC_UVD_HARVESTING 0x00c7
#define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX 1
//UVD_PG0_CC_UVD_HARVESTING
#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1
#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L
#define UVD7_MAX_HW_INSTANCES_VEGA20 2 #define UVD7_MAX_HW_INSTANCES_VEGA20 2
static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev); static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
...@@ -370,10 +376,25 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) ...@@ -370,10 +376,25 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
static int uvd_v7_0_early_init(void *handle) static int uvd_v7_0_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->asic_type == CHIP_VEGA20)
if (adev->asic_type == CHIP_VEGA20) {
u32 harvest;
int i;
adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20; adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
else for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
adev->uvd.harvest_config |= 1 << i;
}
}
if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
AMDGPU_UVD_HARVEST_UVD1))
/* both instances are harvested, disable the block */
return -ENOENT;
} else {
adev->uvd.num_uvd_inst = 1; adev->uvd.num_uvd_inst = 1;
}
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
adev->uvd.num_enc_rings = 1; adev->uvd.num_enc_rings = 1;
...@@ -393,6 +414,8 @@ static int uvd_v7_0_sw_init(void *handle) ...@@ -393,6 +414,8 @@ static int uvd_v7_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (j = 0; j < adev->uvd.num_uvd_inst; j++) { for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
if (adev->uvd.harvest_config & (1 << j))
continue;
/* UVD TRAP */ /* UVD TRAP */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq); r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
if (r) if (r)
...@@ -425,6 +448,8 @@ static int uvd_v7_0_sw_init(void *handle) ...@@ -425,6 +448,8 @@ static int uvd_v7_0_sw_init(void *handle)
return r; return r;
for (j = 0; j < adev->uvd.num_uvd_inst; j++) { for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
if (adev->uvd.harvest_config & (1 << j))
continue;
if (!amdgpu_sriov_vf(adev)) { if (!amdgpu_sriov_vf(adev)) {
ring = &adev->uvd.inst[j].ring; ring = &adev->uvd.inst[j].ring;
sprintf(ring->name, "uvd<%d>", j); sprintf(ring->name, "uvd<%d>", j);
...@@ -472,6 +497,8 @@ static int uvd_v7_0_sw_fini(void *handle) ...@@ -472,6 +497,8 @@ static int uvd_v7_0_sw_fini(void *handle)
return r; return r;
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
if (adev->uvd.harvest_config & (1 << j))
continue;
for (i = 0; i < adev->uvd.num_enc_rings; ++i) for (i = 0; i < adev->uvd.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
} }
...@@ -500,6 +527,8 @@ static int uvd_v7_0_hw_init(void *handle) ...@@ -500,6 +527,8 @@ static int uvd_v7_0_hw_init(void *handle)
goto done; goto done;
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
if (adev->uvd.harvest_config & (1 << j))
continue;
ring = &adev->uvd.inst[j].ring; ring = &adev->uvd.inst[j].ring;
if (!amdgpu_sriov_vf(adev)) { if (!amdgpu_sriov_vf(adev)) {
...@@ -579,8 +608,11 @@ static int uvd_v7_0_hw_fini(void *handle) ...@@ -579,8 +608,11 @@ static int uvd_v7_0_hw_fini(void *handle)
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
} }
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
if (adev->uvd.harvest_config & (1 << i))
continue;
adev->uvd.inst[i].ring.ready = false; adev->uvd.inst[i].ring.ready = false;
}
return 0; return 0;
} }
...@@ -623,6 +655,8 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev) ...@@ -623,6 +655,8 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
int i; int i;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
if (adev->uvd.harvest_config & (1 << i))
continue;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
...@@ -695,6 +729,8 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev, ...@@ -695,6 +729,8 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0); WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
if (adev->uvd.harvest_config & (1 << i))
continue;
WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0); WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0; adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0;
adev->uvd.inst[i].ring_enc[0].wptr = 0; adev->uvd.inst[i].ring_enc[0].wptr = 0;
...@@ -751,6 +787,8 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) ...@@ -751,6 +787,8 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
init_table += header->uvd_table_offset; init_table += header->uvd_table_offset;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
if (adev->uvd.harvest_config & (1 << i))
continue;
ring = &adev->uvd.inst[i].ring; ring = &adev->uvd.inst[i].ring;
ring->wptr = 0; ring->wptr = 0;
size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
...@@ -890,6 +928,8 @@ static int uvd_v7_0_start(struct amdgpu_device *adev) ...@@ -890,6 +928,8 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
int i, j, k, r; int i, j, k, r;
for (k = 0; k < adev->uvd.num_uvd_inst; ++k) { for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
if (adev->uvd.harvest_config & (1 << k))
continue;
/* disable DPG */ /* disable DPG */
WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0, WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK); ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
...@@ -902,6 +942,8 @@ static int uvd_v7_0_start(struct amdgpu_device *adev) ...@@ -902,6 +942,8 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
uvd_v7_0_mc_resume(adev); uvd_v7_0_mc_resume(adev);
for (k = 0; k < adev->uvd.num_uvd_inst; ++k) { for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
if (adev->uvd.harvest_config & (1 << k))
continue;
ring = &adev->uvd.inst[k].ring; ring = &adev->uvd.inst[k].ring;
/* disable clock gating */ /* disable clock gating */
WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0, WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
...@@ -1069,6 +1111,8 @@ static void uvd_v7_0_stop(struct amdgpu_device *adev) ...@@ -1069,6 +1111,8 @@ static void uvd_v7_0_stop(struct amdgpu_device *adev)
uint8_t i = 0; uint8_t i = 0;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
if (adev->uvd.harvest_config & (1 << i))
continue;
/* force RBC into idle state */ /* force RBC into idle state */
WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101); WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
...@@ -1785,6 +1829,8 @@ static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev) ...@@ -1785,6 +1829,8 @@ static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
int i; int i;
for (i = 0; i < adev->uvd.num_uvd_inst; i++) { for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
if (adev->uvd.harvest_config & (1 << i))
continue;
adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs; adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
adev->uvd.inst[i].ring.me = i; adev->uvd.inst[i].ring.me = i;
DRM_INFO("UVD(%d) is enabled in VM mode\n", i); DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
...@@ -1796,6 +1842,8 @@ static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev) ...@@ -1796,6 +1842,8 @@ static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
int i, j; int i, j;
for (j = 0; j < adev->uvd.num_uvd_inst; j++) { for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
if (adev->uvd.harvest_config & (1 << j))
continue;
for (i = 0; i < adev->uvd.num_enc_rings; ++i) { for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs; adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
adev->uvd.inst[j].ring_enc[i].me = j; adev->uvd.inst[j].ring_enc[i].me = j;
...@@ -1815,6 +1863,8 @@ static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev) ...@@ -1815,6 +1863,8 @@ static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
int i; int i;
for (i = 0; i < adev->uvd.num_uvd_inst; i++) { for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
if (adev->uvd.harvest_config & (1 << i))
continue;
adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1; adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs; adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment