Commit da146d3b authored by Alex Deucher's avatar Alex Deucher

drm/amdgpu: fix amdgpu_need_full_reset (v2)

IP types are not an index.  Each asic may have number and
type of IPs.  Properly check the the type rather than
using the type id as an index.

v2: fix all the IPs to not use IP type as an idx as well.
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Cc: stable@vger.kernel.org
parent aee3960a
...@@ -2075,7 +2075,8 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev) ...@@ -2075,7 +2075,8 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
if (!adev->ip_block_status[i].valid) if (!adev->ip_block_status[i].valid)
continue; continue;
if (adev->ip_blocks[i].funcs->check_soft_reset) if (adev->ip_blocks[i].funcs->check_soft_reset)
adev->ip_blocks[i].funcs->check_soft_reset(adev); adev->ip_block_status[i].hang =
adev->ip_blocks[i].funcs->check_soft_reset(adev);
if (adev->ip_block_status[i].hang) { if (adev->ip_block_status[i].hang) {
DRM_INFO("IP block:%d is hang!\n", i); DRM_INFO("IP block:%d is hang!\n", i);
asic_hang = true; asic_hang = true;
...@@ -2104,12 +2105,20 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev) ...@@ -2104,12 +2105,20 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
static bool amdgpu_need_full_reset(struct amdgpu_device *adev) static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
{ {
if (adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang || int i;
adev->ip_block_status[AMD_IP_BLOCK_TYPE_SMC].hang ||
adev->ip_block_status[AMD_IP_BLOCK_TYPE_ACP].hang || for (i = 0; i < adev->num_ip_blocks; i++) {
adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang) { if (!adev->ip_block_status[i].valid)
DRM_INFO("Some block need full reset!\n"); continue;
return true; if ((adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) ||
(adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) ||
(adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_ACP) ||
(adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_DCE)) {
if (adev->ip_block_status[i].hang) {
DRM_INFO("Some block need full reset!\n");
return true;
}
}
} }
return false; return false;
} }
......
...@@ -3188,16 +3188,11 @@ static int dce_v10_0_wait_for_idle(void *handle) ...@@ -3188,16 +3188,11 @@ static int dce_v10_0_wait_for_idle(void *handle)
return 0; return 0;
} }
static int dce_v10_0_check_soft_reset(void *handle) static bool dce_v10_0_check_soft_reset(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (dce_v10_0_is_display_hung(adev)) return dce_v10_0_is_display_hung(adev);
adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = true;
else
adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = false;
return 0;
} }
static int dce_v10_0_soft_reset(void *handle) static int dce_v10_0_soft_reset(void *handle)
...@@ -3205,9 +3200,6 @@ static int dce_v10_0_soft_reset(void *handle) ...@@ -3205,9 +3200,6 @@ static int dce_v10_0_soft_reset(void *handle)
u32 srbm_soft_reset = 0, tmp; u32 srbm_soft_reset = 0, tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang)
return 0;
if (dce_v10_0_is_display_hung(adev)) if (dce_v10_0_is_display_hung(adev))
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
......
...@@ -5144,7 +5144,7 @@ static int gfx_v8_0_wait_for_idle(void *handle) ...@@ -5144,7 +5144,7 @@ static int gfx_v8_0_wait_for_idle(void *handle)
return -ETIMEDOUT; return -ETIMEDOUT;
} }
static int gfx_v8_0_check_soft_reset(void *handle) static bool gfx_v8_0_check_soft_reset(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 grbm_soft_reset = 0, srbm_soft_reset = 0; u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
...@@ -5196,16 +5196,14 @@ static int gfx_v8_0_check_soft_reset(void *handle) ...@@ -5196,16 +5196,14 @@ static int gfx_v8_0_check_soft_reset(void *handle)
SRBM_SOFT_RESET, SOFT_RESET_SEM, 1); SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
if (grbm_soft_reset || srbm_soft_reset) { if (grbm_soft_reset || srbm_soft_reset) {
adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = true;
adev->gfx.grbm_soft_reset = grbm_soft_reset; adev->gfx.grbm_soft_reset = grbm_soft_reset;
adev->gfx.srbm_soft_reset = srbm_soft_reset; adev->gfx.srbm_soft_reset = srbm_soft_reset;
return true;
} else { } else {
adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = false;
adev->gfx.grbm_soft_reset = 0; adev->gfx.grbm_soft_reset = 0;
adev->gfx.srbm_soft_reset = 0; adev->gfx.srbm_soft_reset = 0;
return false;
} }
return 0;
} }
static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev, static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev,
...@@ -5233,7 +5231,8 @@ static int gfx_v8_0_pre_soft_reset(void *handle) ...@@ -5233,7 +5231,8 @@ static int gfx_v8_0_pre_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 grbm_soft_reset = 0, srbm_soft_reset = 0; u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang) if ((!adev->gfx.grbm_soft_reset) &&
(!adev->gfx.srbm_soft_reset))
return 0; return 0;
grbm_soft_reset = adev->gfx.grbm_soft_reset; grbm_soft_reset = adev->gfx.grbm_soft_reset;
...@@ -5271,7 +5270,8 @@ static int gfx_v8_0_soft_reset(void *handle) ...@@ -5271,7 +5270,8 @@ static int gfx_v8_0_soft_reset(void *handle)
u32 grbm_soft_reset = 0, srbm_soft_reset = 0; u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
u32 tmp; u32 tmp;
if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang) if ((!adev->gfx.grbm_soft_reset) &&
(!adev->gfx.srbm_soft_reset))
return 0; return 0;
grbm_soft_reset = adev->gfx.grbm_soft_reset; grbm_soft_reset = adev->gfx.grbm_soft_reset;
...@@ -5341,7 +5341,8 @@ static int gfx_v8_0_post_soft_reset(void *handle) ...@@ -5341,7 +5341,8 @@ static int gfx_v8_0_post_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 grbm_soft_reset = 0, srbm_soft_reset = 0; u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang) if ((!adev->gfx.grbm_soft_reset) &&
(!adev->gfx.srbm_soft_reset))
return 0; return 0;
grbm_soft_reset = adev->gfx.grbm_soft_reset; grbm_soft_reset = adev->gfx.grbm_soft_reset;
......
...@@ -1099,7 +1099,7 @@ static int gmc_v8_0_wait_for_idle(void *handle) ...@@ -1099,7 +1099,7 @@ static int gmc_v8_0_wait_for_idle(void *handle)
} }
static int gmc_v8_0_check_soft_reset(void *handle) static bool gmc_v8_0_check_soft_reset(void *handle)
{ {
u32 srbm_soft_reset = 0; u32 srbm_soft_reset = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
...@@ -1116,20 +1116,19 @@ static int gmc_v8_0_check_soft_reset(void *handle) ...@@ -1116,20 +1116,19 @@ static int gmc_v8_0_check_soft_reset(void *handle)
SRBM_SOFT_RESET, SOFT_RESET_MC, 1); SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
} }
if (srbm_soft_reset) { if (srbm_soft_reset) {
adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = true;
adev->mc.srbm_soft_reset = srbm_soft_reset; adev->mc.srbm_soft_reset = srbm_soft_reset;
return true;
} else { } else {
adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = false;
adev->mc.srbm_soft_reset = 0; adev->mc.srbm_soft_reset = 0;
return false;
} }
return 0;
} }
static int gmc_v8_0_pre_soft_reset(void *handle) static int gmc_v8_0_pre_soft_reset(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang) if (!adev->mc.srbm_soft_reset)
return 0; return 0;
gmc_v8_0_mc_stop(adev, &adev->mc.save); gmc_v8_0_mc_stop(adev, &adev->mc.save);
...@@ -1145,7 +1144,7 @@ static int gmc_v8_0_soft_reset(void *handle) ...@@ -1145,7 +1144,7 @@ static int gmc_v8_0_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset; u32 srbm_soft_reset;
if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang) if (!adev->mc.srbm_soft_reset)
return 0; return 0;
srbm_soft_reset = adev->mc.srbm_soft_reset; srbm_soft_reset = adev->mc.srbm_soft_reset;
...@@ -1175,7 +1174,7 @@ static int gmc_v8_0_post_soft_reset(void *handle) ...@@ -1175,7 +1174,7 @@ static int gmc_v8_0_post_soft_reset(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang) if (!adev->mc.srbm_soft_reset)
return 0; return 0;
gmc_v8_0_mc_resume(adev, &adev->mc.save); gmc_v8_0_mc_resume(adev, &adev->mc.save);
......
...@@ -1268,7 +1268,7 @@ static int sdma_v3_0_wait_for_idle(void *handle) ...@@ -1268,7 +1268,7 @@ static int sdma_v3_0_wait_for_idle(void *handle)
return -ETIMEDOUT; return -ETIMEDOUT;
} }
static int sdma_v3_0_check_soft_reset(void *handle) static bool sdma_v3_0_check_soft_reset(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0; u32 srbm_soft_reset = 0;
...@@ -1281,14 +1281,12 @@ static int sdma_v3_0_check_soft_reset(void *handle) ...@@ -1281,14 +1281,12 @@ static int sdma_v3_0_check_soft_reset(void *handle)
} }
if (srbm_soft_reset) { if (srbm_soft_reset) {
adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = true;
adev->sdma.srbm_soft_reset = srbm_soft_reset; adev->sdma.srbm_soft_reset = srbm_soft_reset;
return true;
} else { } else {
adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = false;
adev->sdma.srbm_soft_reset = 0; adev->sdma.srbm_soft_reset = 0;
return false;
} }
return 0;
} }
static int sdma_v3_0_pre_soft_reset(void *handle) static int sdma_v3_0_pre_soft_reset(void *handle)
...@@ -1296,7 +1294,7 @@ static int sdma_v3_0_pre_soft_reset(void *handle) ...@@ -1296,7 +1294,7 @@ static int sdma_v3_0_pre_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0; u32 srbm_soft_reset = 0;
if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang) if (!adev->sdma.srbm_soft_reset)
return 0; return 0;
srbm_soft_reset = adev->sdma.srbm_soft_reset; srbm_soft_reset = adev->sdma.srbm_soft_reset;
...@@ -1315,7 +1313,7 @@ static int sdma_v3_0_post_soft_reset(void *handle) ...@@ -1315,7 +1313,7 @@ static int sdma_v3_0_post_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0; u32 srbm_soft_reset = 0;
if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang) if (!adev->sdma.srbm_soft_reset)
return 0; return 0;
srbm_soft_reset = adev->sdma.srbm_soft_reset; srbm_soft_reset = adev->sdma.srbm_soft_reset;
...@@ -1335,7 +1333,7 @@ static int sdma_v3_0_soft_reset(void *handle) ...@@ -1335,7 +1333,7 @@ static int sdma_v3_0_soft_reset(void *handle)
u32 srbm_soft_reset = 0; u32 srbm_soft_reset = 0;
u32 tmp; u32 tmp;
if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang) if (!adev->sdma.srbm_soft_reset)
return 0; return 0;
srbm_soft_reset = adev->sdma.srbm_soft_reset; srbm_soft_reset = adev->sdma.srbm_soft_reset;
......
...@@ -373,7 +373,7 @@ static int tonga_ih_wait_for_idle(void *handle) ...@@ -373,7 +373,7 @@ static int tonga_ih_wait_for_idle(void *handle)
return -ETIMEDOUT; return -ETIMEDOUT;
} }
static int tonga_ih_check_soft_reset(void *handle) static bool tonga_ih_check_soft_reset(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0; u32 srbm_soft_reset = 0;
...@@ -384,21 +384,19 @@ static int tonga_ih_check_soft_reset(void *handle) ...@@ -384,21 +384,19 @@ static int tonga_ih_check_soft_reset(void *handle)
SOFT_RESET_IH, 1); SOFT_RESET_IH, 1);
if (srbm_soft_reset) { if (srbm_soft_reset) {
adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = true;
adev->irq.srbm_soft_reset = srbm_soft_reset; adev->irq.srbm_soft_reset = srbm_soft_reset;
return true;
} else { } else {
adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = false;
adev->irq.srbm_soft_reset = 0; adev->irq.srbm_soft_reset = 0;
return false;
} }
return 0;
} }
static int tonga_ih_pre_soft_reset(void *handle) static int tonga_ih_pre_soft_reset(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang) if (!adev->irq.srbm_soft_reset)
return 0; return 0;
return tonga_ih_hw_fini(adev); return tonga_ih_hw_fini(adev);
...@@ -408,7 +406,7 @@ static int tonga_ih_post_soft_reset(void *handle) ...@@ -408,7 +406,7 @@ static int tonga_ih_post_soft_reset(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang) if (!adev->irq.srbm_soft_reset)
return 0; return 0;
return tonga_ih_hw_init(adev); return tonga_ih_hw_init(adev);
...@@ -419,7 +417,7 @@ static int tonga_ih_soft_reset(void *handle) ...@@ -419,7 +417,7 @@ static int tonga_ih_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset; u32 srbm_soft_reset;
if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang) if (!adev->irq.srbm_soft_reset)
return 0; return 0;
srbm_soft_reset = adev->irq.srbm_soft_reset; srbm_soft_reset = adev->irq.srbm_soft_reset;
......
...@@ -770,7 +770,7 @@ static int uvd_v6_0_wait_for_idle(void *handle) ...@@ -770,7 +770,7 @@ static int uvd_v6_0_wait_for_idle(void *handle)
} }
#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd #define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
static int uvd_v6_0_check_soft_reset(void *handle) static bool uvd_v6_0_check_soft_reset(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0; u32 srbm_soft_reset = 0;
...@@ -782,19 +782,19 @@ static int uvd_v6_0_check_soft_reset(void *handle) ...@@ -782,19 +782,19 @@ static int uvd_v6_0_check_soft_reset(void *handle)
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
if (srbm_soft_reset) { if (srbm_soft_reset) {
adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = true;
adev->uvd.srbm_soft_reset = srbm_soft_reset; adev->uvd.srbm_soft_reset = srbm_soft_reset;
return true;
} else { } else {
adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = false;
adev->uvd.srbm_soft_reset = 0; adev->uvd.srbm_soft_reset = 0;
return false;
} }
return 0;
} }
static int uvd_v6_0_pre_soft_reset(void *handle) static int uvd_v6_0_pre_soft_reset(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang) if (!adev->uvd.srbm_soft_reset)
return 0; return 0;
uvd_v6_0_stop(adev); uvd_v6_0_stop(adev);
...@@ -806,7 +806,7 @@ static int uvd_v6_0_soft_reset(void *handle) ...@@ -806,7 +806,7 @@ static int uvd_v6_0_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset; u32 srbm_soft_reset;
if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang) if (!adev->uvd.srbm_soft_reset)
return 0; return 0;
srbm_soft_reset = adev->uvd.srbm_soft_reset; srbm_soft_reset = adev->uvd.srbm_soft_reset;
...@@ -836,7 +836,7 @@ static int uvd_v6_0_post_soft_reset(void *handle) ...@@ -836,7 +836,7 @@ static int uvd_v6_0_post_soft_reset(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang) if (!adev->uvd.srbm_soft_reset)
return 0; return 0;
mdelay(5); mdelay(5);
......
...@@ -561,7 +561,7 @@ static int vce_v3_0_wait_for_idle(void *handle) ...@@ -561,7 +561,7 @@ static int vce_v3_0_wait_for_idle(void *handle)
#define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \ #define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK) VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
static int vce_v3_0_check_soft_reset(void *handle) static bool vce_v3_0_check_soft_reset(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0; u32 srbm_soft_reset = 0;
...@@ -591,16 +591,15 @@ static int vce_v3_0_check_soft_reset(void *handle) ...@@ -591,16 +591,15 @@ static int vce_v3_0_check_soft_reset(void *handle)
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
} }
WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
mutex_unlock(&adev->grbm_idx_mutex);
if (srbm_soft_reset) { if (srbm_soft_reset) {
adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = true;
adev->vce.srbm_soft_reset = srbm_soft_reset; adev->vce.srbm_soft_reset = srbm_soft_reset;
return true;
} else { } else {
adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = false;
adev->vce.srbm_soft_reset = 0; adev->vce.srbm_soft_reset = 0;
return false;
} }
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
} }
static int vce_v3_0_soft_reset(void *handle) static int vce_v3_0_soft_reset(void *handle)
...@@ -608,7 +607,7 @@ static int vce_v3_0_soft_reset(void *handle) ...@@ -608,7 +607,7 @@ static int vce_v3_0_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset; u32 srbm_soft_reset;
if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang) if (!adev->vce.srbm_soft_reset)
return 0; return 0;
srbm_soft_reset = adev->vce.srbm_soft_reset; srbm_soft_reset = adev->vce.srbm_soft_reset;
...@@ -638,7 +637,7 @@ static int vce_v3_0_pre_soft_reset(void *handle) ...@@ -638,7 +637,7 @@ static int vce_v3_0_pre_soft_reset(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang) if (!adev->vce.srbm_soft_reset)
return 0; return 0;
mdelay(5); mdelay(5);
...@@ -651,7 +650,7 @@ static int vce_v3_0_post_soft_reset(void *handle) ...@@ -651,7 +650,7 @@ static int vce_v3_0_post_soft_reset(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang) if (!adev->vce.srbm_soft_reset)
return 0; return 0;
mdelay(5); mdelay(5);
......
...@@ -165,7 +165,7 @@ struct amd_ip_funcs { ...@@ -165,7 +165,7 @@ struct amd_ip_funcs {
/* poll for idle */ /* poll for idle */
int (*wait_for_idle)(void *handle); int (*wait_for_idle)(void *handle);
/* check soft reset the IP block */ /* check soft reset the IP block */
int (*check_soft_reset)(void *handle); bool (*check_soft_reset)(void *handle);
/* pre soft reset the IP block */ /* pre soft reset the IP block */
int (*pre_soft_reset)(void *handle); int (*pre_soft_reset)(void *handle);
/* soft reset the IP block */ /* soft reset the IP block */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment