Commit 8a47cc9e authored by Christian König's avatar Christian König Committed by Dave Airlie

drm/radeon: rework locking ring emission mutex in fence deadlock detection v2

Some callers illegal called fence_wait_next/empty
while holding the ring emission mutex. So don't
relock the mutex in that cases, and move the actual
locking into the fence code.

v2: Don't try to unlock the mutex if it isn't locked.
Signed-off-by: default avatarChristian König <deathsimple@vodafone.de>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent 3b7a2b24
...@@ -284,8 +284,8 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence); ...@@ -284,8 +284,8 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence);
void radeon_fence_process(struct radeon_device *rdev, int ring); void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence); bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
int radeon_fence_wait_next(struct radeon_device *rdev, int ring); int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
int radeon_fence_wait_empty(struct radeon_device *rdev, int ring); int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence); struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
void radeon_fence_unref(struct radeon_fence **fence); void radeon_fence_unref(struct radeon_fence **fence);
unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring); unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
......
...@@ -912,9 +912,12 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) ...@@ -912,9 +912,12 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
} }
/* evict vram memory */ /* evict vram memory */
radeon_bo_evict_vram(rdev); radeon_bo_evict_vram(rdev);
mutex_lock(&rdev->ring_lock);
/* wait for gpu to finish processing current batch */ /* wait for gpu to finish processing current batch */
for (i = 0; i < RADEON_NUM_RINGS; i++) for (i = 0; i < RADEON_NUM_RINGS; i++)
radeon_fence_wait_empty(rdev, i); radeon_fence_wait_empty_locked(rdev, i);
mutex_unlock(&rdev->ring_lock);
radeon_save_bios_scratch_regs(rdev); radeon_save_bios_scratch_regs(rdev);
......
...@@ -194,7 +194,7 @@ bool radeon_fence_signaled(struct radeon_fence *fence) ...@@ -194,7 +194,7 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
} }
static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq, static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
unsigned ring, bool intr) unsigned ring, bool intr, bool lock_ring)
{ {
unsigned long timeout, last_activity; unsigned long timeout, last_activity;
uint64_t seq; uint64_t seq;
...@@ -249,8 +249,16 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq, ...@@ -249,8 +249,16 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) { if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
continue; continue;
} }
if (lock_ring) {
mutex_lock(&rdev->ring_lock);
}
/* test if somebody else has already decided that this is a lockup */ /* test if somebody else has already decided that this is a lockup */
if (last_activity != rdev->fence_drv[ring].last_activity) { if (last_activity != rdev->fence_drv[ring].last_activity) {
if (lock_ring) {
mutex_unlock(&rdev->ring_lock);
}
continue; continue;
} }
...@@ -264,15 +272,17 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq, ...@@ -264,15 +272,17 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
rdev->fence_drv[i].last_activity = jiffies; rdev->fence_drv[i].last_activity = jiffies;
} }
/* change last activity so nobody else think there is a lockup */
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
rdev->fence_drv[i].last_activity = jiffies;
}
/* mark the ring as not ready any more */ /* mark the ring as not ready any more */
rdev->ring[ring].ready = false; rdev->ring[ring].ready = false;
if (lock_ring) {
mutex_unlock(&rdev->ring_lock);
}
return -EDEADLK; return -EDEADLK;
} }
if (lock_ring) {
mutex_unlock(&rdev->ring_lock);
}
} }
} }
return 0; return 0;
...@@ -287,7 +297,8 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr) ...@@ -287,7 +297,8 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
return -EINVAL; return -EINVAL;
} }
r = radeon_fence_wait_seq(fence->rdev, fence->seq, fence->ring, intr); r = radeon_fence_wait_seq(fence->rdev, fence->seq,
fence->ring, intr, true);
if (r) { if (r) {
return r; return r;
} }
...@@ -295,7 +306,7 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr) ...@@ -295,7 +306,7 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
return 0; return 0;
} }
int radeon_fence_wait_next(struct radeon_device *rdev, int ring) int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
{ {
uint64_t seq; uint64_t seq;
...@@ -305,20 +316,22 @@ int radeon_fence_wait_next(struct radeon_device *rdev, int ring) ...@@ -305,20 +316,22 @@ int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
*/ */
seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
if (seq >= rdev->fence_drv[ring].seq) { if (seq >= rdev->fence_drv[ring].seq) {
/* nothing to wait for, last_seq is already the last emited fence */ /* nothing to wait for, last_seq is
return 0; already the last emited fence */
return -ENOENT;
} }
return radeon_fence_wait_seq(rdev, seq, ring, false); return radeon_fence_wait_seq(rdev, seq, ring, false, false);
} }
int radeon_fence_wait_empty(struct radeon_device *rdev, int ring) int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
{ {
/* We are not protected by ring lock when reading current seq /* We are not protected by ring lock when reading current seq
* but it's ok as wait empty is call from place where no more * but it's ok as wait empty is call from place where no more
* activity can be scheduled so there won't be concurrent access * activity can be scheduled so there won't be concurrent access
* to seq value. * to seq value.
*/ */
return radeon_fence_wait_seq(rdev, rdev->fence_drv[ring].seq, ring, false); return radeon_fence_wait_seq(rdev, rdev->fence_drv[ring].seq,
ring, false, false);
} }
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
...@@ -410,14 +423,16 @@ void radeon_fence_driver_fini(struct radeon_device *rdev) ...@@ -410,14 +423,16 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
{ {
int ring; int ring;
mutex_lock(&rdev->ring_lock);
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
if (!rdev->fence_drv[ring].initialized) if (!rdev->fence_drv[ring].initialized)
continue; continue;
radeon_fence_wait_empty(rdev, ring); radeon_fence_wait_empty_locked(rdev, ring);
wake_up_all(&rdev->fence_drv[ring].queue); wake_up_all(&rdev->fence_drv[ring].queue);
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
rdev->fence_drv[ring].initialized = false; rdev->fence_drv[ring].initialized = false;
} }
mutex_unlock(&rdev->ring_lock);
} }
......
...@@ -270,13 +270,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) ...@@ -270,13 +270,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
} else { } else {
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
if (ring->ready) { if (ring->ready) {
struct radeon_fence *fence; radeon_fence_wait_empty_locked(rdev, RADEON_RING_TYPE_GFX_INDEX);
radeon_ring_alloc(rdev, ring, 64);
radeon_fence_create(rdev, &fence, radeon_ring_index(rdev, ring));
radeon_fence_emit(rdev, fence);
radeon_ring_commit(rdev, ring);
radeon_fence_wait(fence, false);
radeon_fence_unref(&fence);
} }
} }
radeon_unmap_vram_bos(rdev); radeon_unmap_vram_bos(rdev);
......
...@@ -347,9 +347,7 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi ...@@ -347,9 +347,7 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
if (ndw < ring->ring_free_dw) { if (ndw < ring->ring_free_dw) {
break; break;
} }
mutex_unlock(&rdev->ring_lock); r = radeon_fence_wait_next_locked(rdev, radeon_ring_index(rdev, ring));
r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring));
mutex_lock(&rdev->ring_lock);
if (r) if (r)
return r; return r;
} }
...@@ -408,7 +406,6 @@ void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring * ...@@ -408,7 +406,6 @@ void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *
{ {
int r; int r;
mutex_lock(&rdev->ring_lock);
radeon_ring_free_size(rdev, ring); radeon_ring_free_size(rdev, ring);
if (ring->rptr == ring->wptr) { if (ring->rptr == ring->wptr) {
r = radeon_ring_alloc(rdev, ring, 1); r = radeon_ring_alloc(rdev, ring, 1);
...@@ -417,7 +414,6 @@ void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring * ...@@ -417,7 +414,6 @@ void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *
radeon_ring_commit(rdev, ring); radeon_ring_commit(rdev, ring);
} }
} }
mutex_unlock(&rdev->ring_lock);
} }
void radeon_ring_lockup_update(struct radeon_ring *ring) void radeon_ring_lockup_update(struct radeon_ring *ring)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment