Commit 9bb39ff4 authored by Maarten Lankhorst's avatar Maarten Lankhorst Committed by Alex Deucher

drm/radeon: take exclusive_lock in read mode during ring tests, v5

This is needed for the next commit, because the lockup detection
will need the read lock to run.

v4 (chk): split out forced fence completion, remove unrelated changes,
          add and handle in_reset flag
v5 (agd5f): rebase fix
Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@canonical.com>
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent eb98c709
...@@ -2326,7 +2326,7 @@ struct radeon_device { ...@@ -2326,7 +2326,7 @@ struct radeon_device {
bool need_dma32; bool need_dma32;
bool accel_working; bool accel_working;
bool fastfb_working; /* IGP feature*/ bool fastfb_working; /* IGP feature*/
bool needs_reset; bool needs_reset, in_reset;
struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
const struct firmware *me_fw; /* all family ME firmware */ const struct firmware *me_fw; /* all family ME firmware */
const struct firmware *pfp_fw; /* r6/700 PFP firmware */ const struct firmware *pfp_fw; /* r6/700 PFP firmware */
......
...@@ -653,6 +653,13 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -653,6 +653,13 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
up_read(&rdev->exclusive_lock); up_read(&rdev->exclusive_lock);
return -EBUSY; return -EBUSY;
} }
if (rdev->in_reset) {
up_read(&rdev->exclusive_lock);
r = radeon_gpu_reset(rdev);
if (!r)
r = -EAGAIN;
return r;
}
/* initialize parser */ /* initialize parser */
memset(&parser, 0, sizeof(struct radeon_cs_parser)); memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp; parser.filp = filp;
......
...@@ -1673,6 +1673,7 @@ int radeon_gpu_reset(struct radeon_device *rdev) ...@@ -1673,6 +1673,7 @@ int radeon_gpu_reset(struct radeon_device *rdev)
return 0; return 0;
} }
rdev->in_reset = true;
rdev->needs_reset = false; rdev->needs_reset = false;
radeon_save_bios_scratch_regs(rdev); radeon_save_bios_scratch_regs(rdev);
...@@ -1691,7 +1692,6 @@ int radeon_gpu_reset(struct radeon_device *rdev) ...@@ -1691,7 +1692,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
} }
} }
retry:
r = radeon_asic_reset(rdev); r = radeon_asic_reset(rdev);
if (!r) { if (!r) {
dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n"); dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
...@@ -1700,25 +1700,11 @@ int radeon_gpu_reset(struct radeon_device *rdev) ...@@ -1700,25 +1700,11 @@ int radeon_gpu_reset(struct radeon_device *rdev)
radeon_restore_bios_scratch_regs(rdev); radeon_restore_bios_scratch_regs(rdev);
if (!r) { for (i = 0; i < RADEON_NUM_RINGS; ++i) {
for (i = 0; i < RADEON_NUM_RINGS; ++i) { if (!r && ring_data[i]) {
radeon_ring_restore(rdev, &rdev->ring[i], radeon_ring_restore(rdev, &rdev->ring[i],
ring_sizes[i], ring_data[i]); ring_sizes[i], ring_data[i]);
ring_sizes[i] = 0; } else {
ring_data[i] = NULL;
}
r = radeon_ib_ring_tests(rdev);
if (r) {
dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
if (saved) {
saved = false;
radeon_suspend(rdev);
goto retry;
}
}
} else {
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
radeon_fence_driver_force_completion(rdev, i); radeon_fence_driver_force_completion(rdev, i);
kfree(ring_data[i]); kfree(ring_data[i]);
} }
...@@ -1751,19 +1737,28 @@ int radeon_gpu_reset(struct radeon_device *rdev) ...@@ -1751,19 +1737,28 @@ int radeon_gpu_reset(struct radeon_device *rdev)
/* reset hpd state */ /* reset hpd state */
radeon_hpd_init(rdev); radeon_hpd_init(rdev);
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
downgrade_write(&rdev->exclusive_lock);
drm_helper_resume_force_mode(rdev->ddev); drm_helper_resume_force_mode(rdev->ddev);
/* set the power state here in case we are a PX system or headless */ /* set the power state here in case we are a PX system or headless */
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
radeon_pm_compute_clocks(rdev); radeon_pm_compute_clocks(rdev);
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); if (!r) {
if (r) { r = radeon_ib_ring_tests(rdev);
if (r && saved)
r = -EAGAIN;
} else {
/* bad news, how to tell it to userspace ? */ /* bad news, how to tell it to userspace ? */
dev_info(rdev->dev, "GPU reset failed\n"); dev_info(rdev->dev, "GPU reset failed\n");
} }
up_write(&rdev->exclusive_lock); rdev->needs_reset = r == -EAGAIN;
rdev->in_reset = false;
up_read(&rdev->exclusive_lock);
return r; return r;
} }
......
...@@ -405,7 +405,9 @@ static void radeon_flip_work_func(struct work_struct *__work) ...@@ -405,7 +405,9 @@ static void radeon_flip_work_func(struct work_struct *__work)
r = radeon_fence_wait(work->fence, false); r = radeon_fence_wait(work->fence, false);
if (r == -EDEADLK) { if (r == -EDEADLK) {
up_read(&rdev->exclusive_lock); up_read(&rdev->exclusive_lock);
r = radeon_gpu_reset(rdev); do {
r = radeon_gpu_reset(rdev);
} while (r == -EAGAIN);
down_read(&rdev->exclusive_lock); down_read(&rdev->exclusive_lock);
} }
if (r) if (r)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment