Commit 1c534671 authored by Alex Deucher's avatar Alex Deucher

drm/radeon: rework GPU reset on cayman/TN

Update the code to better match the recommended
programming sequence for soft reset.
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 187e3593
...@@ -61,6 +61,7 @@ extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev); ...@@ -61,6 +61,7 @@ extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save); extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save); extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev); extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
/* get temperature in millidegrees */ /* get temperature in millidegrees */
int si_get_temp(struct radeon_device *rdev) int si_get_temp(struct radeon_device *rdev)
...@@ -2126,97 +2127,15 @@ bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) ...@@ -2126,97 +2127,15 @@ bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
return radeon_ring_test_lockup(rdev, ring); return radeon_ring_test_lockup(rdev, ring);
} }
static void si_gpu_soft_reset_gfx(struct radeon_device *rdev)
{
u32 grbm_reset = 0;
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
return;
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
RREG32(GRBM_STATUS));
dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
RREG32(GRBM_STATUS2));
dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
RREG32(GRBM_STATUS_SE0));
dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
/* Disable CP parsing/prefetching */
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
/* reset all the gfx blocks */
grbm_reset = (SOFT_RESET_CP |
SOFT_RESET_CB |
SOFT_RESET_DB |
SOFT_RESET_GDS |
SOFT_RESET_PA |
SOFT_RESET_SC |
SOFT_RESET_BCI |
SOFT_RESET_SPI |
SOFT_RESET_SX |
SOFT_RESET_TC |
SOFT_RESET_TA |
SOFT_RESET_VGT |
SOFT_RESET_IA);
dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
WREG32(GRBM_SOFT_RESET, grbm_reset);
(void)RREG32(GRBM_SOFT_RESET);
udelay(50);
WREG32(GRBM_SOFT_RESET, 0);
(void)RREG32(GRBM_SOFT_RESET);
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
RREG32(GRBM_STATUS));
dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
RREG32(GRBM_STATUS2));
dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
RREG32(GRBM_STATUS_SE0));
dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
}
static void si_gpu_soft_reset_dma(struct radeon_device *rdev)
{
u32 tmp;
if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
return;
dev_info(rdev->dev, " DMA_STATUS_REG = 0x%08X\n",
RREG32(DMA_STATUS_REG));
/* dma0 */
tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
tmp &= ~DMA_RB_ENABLE;
WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
/* dma1 */
tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
tmp &= ~DMA_RB_ENABLE;
WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
/* Reset dma */
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
RREG32(SRBM_SOFT_RESET);
udelay(50);
WREG32(SRBM_SOFT_RESET, 0);
dev_info(rdev->dev, " DMA_STATUS_REG = 0x%08X\n",
RREG32(DMA_STATUS_REG));
}
static int si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) static int si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
{ {
struct evergreen_mc_save save; struct evergreen_mc_save save;
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
u32 tmp;
int ret = 0;
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE); reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP);
if (RREG32(DMA_STATUS_REG) & DMA_IDLE) if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
reset_mask &= ~RADEON_RESET_DMA; reset_mask &= ~RADEON_RESET_DMA;
...@@ -2226,6 +2145,7 @@ static int si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) ...@@ -2226,6 +2145,7 @@ static int si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask); dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
evergreen_print_gpu_status_regs(rdev);
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
...@@ -2234,22 +2154,99 @@ static int si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) ...@@ -2234,22 +2154,99 @@ static int si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
r600_set_bios_scratch_engine_hung(rdev, true); r600_set_bios_scratch_engine_hung(rdev, true);
evergreen_mc_stop(rdev, &save); evergreen_mc_stop(rdev, &save);
if (radeon_mc_wait_for_idle(rdev)) { if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
} }
if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) /* Disable CP parsing/prefetching */
si_gpu_soft_reset_gfx(rdev); WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
if (reset_mask & RADEON_RESET_DMA) {
/* dma0 */
tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
tmp &= ~DMA_RB_ENABLE;
WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
/* dma1 */
tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
tmp &= ~DMA_RB_ENABLE;
WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
}
if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP)) {
grbm_soft_reset = SOFT_RESET_CB |
SOFT_RESET_DB |
SOFT_RESET_GDS |
SOFT_RESET_PA |
SOFT_RESET_SC |
SOFT_RESET_BCI |
SOFT_RESET_SPI |
SOFT_RESET_SX |
SOFT_RESET_TC |
SOFT_RESET_TA |
SOFT_RESET_VGT |
SOFT_RESET_IA;
}
if (reset_mask & RADEON_RESET_CP) {
grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
srbm_soft_reset |= SOFT_RESET_GRBM;
}
if (reset_mask & RADEON_RESET_DMA) if (reset_mask & RADEON_RESET_DMA)
si_gpu_soft_reset_dma(rdev); srbm_soft_reset |= SOFT_RESET_DMA | SOFT_RESET_DMA1;
if (grbm_soft_reset) {
tmp = RREG32(GRBM_SOFT_RESET);
tmp |= grbm_soft_reset;
dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(GRBM_SOFT_RESET, tmp);
tmp = RREG32(GRBM_SOFT_RESET);
udelay(50);
tmp &= ~grbm_soft_reset;
WREG32(GRBM_SOFT_RESET, tmp);
tmp = RREG32(GRBM_SOFT_RESET);
}
if (srbm_soft_reset) {
tmp = RREG32(SRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
WREG32(SRBM_SOFT_RESET, tmp);
tmp = RREG32(SRBM_SOFT_RESET);
udelay(50);
tmp &= ~srbm_soft_reset;
WREG32(SRBM_SOFT_RESET, tmp);
tmp = RREG32(SRBM_SOFT_RESET);
}
/* Wait a little for things to settle down */ /* Wait a little for things to settle down */
udelay(50); udelay(50);
evergreen_mc_resume(rdev, &save); evergreen_mc_resume(rdev, &save);
udelay(50);
#if 0
if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP)) {
if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
ret = -EAGAIN;
}
if (reset_mask & RADEON_RESET_DMA) {
if (!(RREG32(DMA_STATUS_REG) & DMA_IDLE))
ret = -EAGAIN;
}
#endif
if (!ret)
r600_set_bios_scratch_engine_hung(rdev, false);
r600_set_bios_scratch_engine_hung(rdev, false); evergreen_print_gpu_status_regs(rdev);
return 0; return 0;
} }
...@@ -2258,7 +2255,8 @@ int si_asic_reset(struct radeon_device *rdev) ...@@ -2258,7 +2255,8 @@ int si_asic_reset(struct radeon_device *rdev)
{ {
return si_gpu_soft_reset(rdev, (RADEON_RESET_GFX | return si_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
RADEON_RESET_COMPUTE | RADEON_RESET_COMPUTE |
RADEON_RESET_DMA)); RADEON_RESET_DMA |
RADEON_RESET_CP));
} }
/* MC */ /* MC */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment