Commit 7f06c236 authored by monk.liu's avatar monk.liu Committed by Alex Deucher

drm/amdgpu: move wait_queue_head from adev to ring (v2)

thus unnecessary wake up could be avoid between rings
v2:
move wait_queue_head to fence_drv from ring
Signed-off-by: default avatarmonk.liu <monk.liu@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
parent e2955155
...@@ -391,6 +391,7 @@ struct amdgpu_fence_driver { ...@@ -391,6 +391,7 @@ struct amdgpu_fence_driver {
struct amdgpu_irq_src *irq_src; struct amdgpu_irq_src *irq_src;
unsigned irq_type; unsigned irq_type;
struct delayed_work lockup_work; struct delayed_work lockup_work;
wait_queue_head_t fence_queue;
}; };
/* some special values for the owner field */ /* some special values for the owner field */
...@@ -2036,7 +2037,6 @@ struct amdgpu_device { ...@@ -2036,7 +2037,6 @@ struct amdgpu_device {
struct amdgpu_irq_src hpd_irq; struct amdgpu_irq_src hpd_irq;
/* rings */ /* rings */
wait_queue_head_t fence_queue;
unsigned fence_context; unsigned fence_context;
struct mutex ring_lock; struct mutex ring_lock;
unsigned num_rings; unsigned num_rings;
......
...@@ -126,7 +126,8 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, ...@@ -126,7 +126,8 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
(*fence)->ring = ring; (*fence)->ring = ring;
(*fence)->owner = owner; (*fence)->owner = owner;
fence_init(&(*fence)->base, &amdgpu_fence_ops, fence_init(&(*fence)->base, &amdgpu_fence_ops,
&adev->fence_queue.lock, adev->fence_context + ring->idx, &ring->fence_drv.fence_queue.lock,
adev->fence_context + ring->idx,
(*fence)->seq); (*fence)->seq);
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
(*fence)->seq, (*fence)->seq,
...@@ -164,7 +165,7 @@ static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int fl ...@@ -164,7 +165,7 @@ static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int fl
else else
FENCE_TRACE(&fence->base, "was already signaled\n"); FENCE_TRACE(&fence->base, "was already signaled\n");
__remove_wait_queue(&adev->fence_queue, &fence->fence_wake); __remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake);
fence_put(&fence->base); fence_put(&fence->base);
} else } else
FENCE_TRACE(&fence->base, "pending\n"); FENCE_TRACE(&fence->base, "pending\n");
...@@ -265,8 +266,9 @@ static void amdgpu_fence_check_lockup(struct work_struct *work) ...@@ -265,8 +266,9 @@ static void amdgpu_fence_check_lockup(struct work_struct *work)
return; return;
} }
if (amdgpu_fence_activity(ring)) if (amdgpu_fence_activity(ring)) {
wake_up_all(&ring->adev->fence_queue); wake_up_all(&ring->fence_drv.fence_queue);
}
else if (amdgpu_ring_is_lockup(ring)) { else if (amdgpu_ring_is_lockup(ring)) {
/* good news we believe it's a lockup */ /* good news we believe it's a lockup */
dev_warn(ring->adev->dev, "GPU lockup (current fence id " dev_warn(ring->adev->dev, "GPU lockup (current fence id "
...@@ -276,7 +278,7 @@ static void amdgpu_fence_check_lockup(struct work_struct *work) ...@@ -276,7 +278,7 @@ static void amdgpu_fence_check_lockup(struct work_struct *work)
/* remember that we need an reset */ /* remember that we need an reset */
ring->adev->needs_reset = true; ring->adev->needs_reset = true;
wake_up_all(&ring->adev->fence_queue); wake_up_all(&ring->fence_drv.fence_queue);
} }
up_read(&ring->adev->exclusive_lock); up_read(&ring->adev->exclusive_lock);
} }
...@@ -364,7 +366,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring) ...@@ -364,7 +366,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
} while (amd_sched_get_handled_seq(ring->scheduler) < latest_seq); } while (amd_sched_get_handled_seq(ring->scheduler) < latest_seq);
} }
wake_up_all(&ring->adev->fence_queue); wake_up_all(&ring->fence_drv.fence_queue);
} }
exit: exit:
spin_unlock_irqrestore(&ring->fence_lock, irqflags); spin_unlock_irqrestore(&ring->fence_lock, irqflags);
...@@ -427,7 +429,6 @@ static bool amdgpu_fence_enable_signaling(struct fence *f) ...@@ -427,7 +429,6 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
{ {
struct amdgpu_fence *fence = to_amdgpu_fence(f); struct amdgpu_fence *fence = to_amdgpu_fence(f);
struct amdgpu_ring *ring = fence->ring; struct amdgpu_ring *ring = fence->ring;
struct amdgpu_device *adev = ring->adev;
if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
return false; return false;
...@@ -435,7 +436,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f) ...@@ -435,7 +436,7 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
fence->fence_wake.flags = 0; fence->fence_wake.flags = 0;
fence->fence_wake.private = NULL; fence->fence_wake.private = NULL;
fence->fence_wake.func = amdgpu_fence_check_signaled; fence->fence_wake.func = amdgpu_fence_check_signaled;
__add_wait_queue(&adev->fence_queue, &fence->fence_wake); __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
fence_get(f); fence_get(f);
FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
return true; return true;
...@@ -463,152 +464,79 @@ bool amdgpu_fence_signaled(struct amdgpu_fence *fence) ...@@ -463,152 +464,79 @@ bool amdgpu_fence_signaled(struct amdgpu_fence *fence)
return false; return false;
} }
/** /*
* amdgpu_fence_any_seq_signaled - check if any sequence number is signaled * amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal
* * @ring: ring to wait on for the seq number
* @adev: amdgpu device pointer * @seq: seq number wait for
* @seq: sequence numbers * @intr: if interruptible
* * @timeout: jiffies before time out
* Check if the last signaled fence sequnce number is >= the requested
* sequence number (all asics).
* Returns true if any has signaled (current value is >= requested value)
* or false if it has not. Helper function for amdgpu_fence_wait_seq.
*/
static bool amdgpu_fence_any_seq_signaled(struct amdgpu_device *adev, u64 *seq)
{
unsigned i;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
if (!adev->rings[i] || !seq[i])
continue;
if (amdgpu_fence_seq_signaled(adev->rings[i], seq[i]))
return true;
}
return false;
}
/**
* amdgpu_fence_wait_seq_timeout - wait for a specific sequence numbers
*
* @adev: amdgpu device pointer
* @target_seq: sequence number(s) we want to wait for
* @intr: use interruptable sleep
* @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
* *
* Wait for the requested sequence number(s) to be written by any ring * return value:
* (all asics). Sequnce number array is indexed by ring id. * 0: time out but seq not signaled, and gpu not hang
* @intr selects whether to use interruptable (true) or non-interruptable * X (X > 0): seq signaled and X means how many jiffies remains before time out
* (false) sleep when waiting for the sequence number. Helper function * -EDEADL: GPU hang before time out
* for amdgpu_fence_wait_*(). * -ESYSRESTART: interrupted before seq signaled
* Returns remaining time if the sequence number has passed, 0 when * -EINVAL: some paramter is not valid
* the wait timeout, or an error for all other cases.
* -EDEADLK is returned when a GPU lockup has been detected.
*/ */
static long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev, static long amdgpu_fence_ring_wait_seq_timeout(struct amdgpu_ring *ring, uint64_t seq,
u64 *target_seq, bool intr, bool intr, long timeout)
long timeout)
{ {
uint64_t last_seq[AMDGPU_MAX_RINGS]; struct amdgpu_device *adev = ring->adev;
bool signaled; long r = 0;
int i; bool signaled = false;
long r;
if (timeout == 0) {
return amdgpu_fence_any_seq_signaled(adev, target_seq);
}
while (!amdgpu_fence_any_seq_signaled(adev, target_seq)) {
/* Save current sequence values, used to check for GPU lockups */
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !target_seq[i]) BUG_ON(!ring);
continue; if (seq > ring->fence_drv.sync_seq[ring->idx])
return -EINVAL;
last_seq[i] = atomic64_read(&ring->fence_drv.last_seq); if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
trace_amdgpu_fence_wait_begin(adev->ddev, i, target_seq[i]); return timeout;
}
while (1) {
if (intr) { if (intr) {
r = wait_event_interruptible_timeout(adev->fence_queue, ( r = wait_event_interruptible_timeout(ring->fence_drv.fence_queue, (
(signaled = amdgpu_fence_any_seq_signaled(adev, target_seq)) (signaled = amdgpu_fence_seq_signaled(ring, seq))
|| adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT); || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT);
if (r == -ERESTARTSYS) /* interrupted */
return r;
} else { } else {
r = wait_event_timeout(adev->fence_queue, ( r = wait_event_timeout(ring->fence_drv.fence_queue, (
(signaled = amdgpu_fence_any_seq_signaled(adev, target_seq)) (signaled = amdgpu_fence_seq_signaled(ring, seq))
|| adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT); || adev->needs_reset), AMDGPU_FENCE_JIFFIES_TIMEOUT);
} }
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { if (signaled) {
struct amdgpu_ring *ring = adev->rings[i]; /* seq signaled */
if (timeout == MAX_SCHEDULE_TIMEOUT)
if (!ring || !target_seq[i]) return timeout;
continue; return (timeout - AMDGPU_FENCE_JIFFIES_TIMEOUT - r);
}
trace_amdgpu_fence_wait_end(adev->ddev, i, target_seq[i]); else if (adev->needs_reset) {
return -EDEADLK;
} }
if (unlikely(r < 0)) /* check if it's a lockup */
return r; if (amdgpu_ring_is_lockup(ring)) {
uint64_t last_seq = atomic64_read(&ring->fence_drv.last_seq);
if (unlikely(!signaled)) { /* ring lookup */
dev_warn(adev->dev, "GPU lockup (waiting for "
if (adev->needs_reset)
return -EDEADLK;
/* we were interrupted for some reason and fence
* isn't signaled yet, resume waiting */
if (r)
continue;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !target_seq[i])
continue;
if (last_seq[i] != atomic64_read(&ring->fence_drv.last_seq))
break;
}
if (i != AMDGPU_MAX_RINGS)
continue;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
if (!adev->rings[i] || !target_seq[i])
continue;
if (amdgpu_ring_is_lockup(adev->rings[i]))
break;
}
if (i < AMDGPU_MAX_RINGS) {
/* good news we believe it's a lockup */
dev_warn(adev->dev, "GPU lockup (waiting for "
"0x%016llx last fence id 0x%016llx on" "0x%016llx last fence id 0x%016llx on"
" ring %d)\n", " ring %d)\n",
target_seq[i], last_seq[i], i); seq, last_seq, ring->idx);
wake_up_all(&ring->fence_drv.fence_queue);
/* remember that we need an reset */ return -EDEADLK;
adev->needs_reset = true; }
wake_up_all(&adev->fence_queue);
return -EDEADLK;
}
if (timeout < MAX_SCHEDULE_TIMEOUT) { if (timeout < MAX_SCHEDULE_TIMEOUT) {
timeout -= AMDGPU_FENCE_JIFFIES_TIMEOUT; timeout -= AMDGPU_FENCE_JIFFIES_TIMEOUT;
if (timeout <= 0) { if (timeout < 1)
return 0; return 0;
}
}
} }
} }
return timeout;
} }
/** /**
* amdgpu_fence_wait - wait for a fence to signal * amdgpu_fence_wait - wait for a fence to signal
* *
...@@ -642,18 +570,15 @@ int amdgpu_fence_wait(struct amdgpu_fence *fence, bool intr) ...@@ -642,18 +570,15 @@ int amdgpu_fence_wait(struct amdgpu_fence *fence, bool intr)
*/ */
int amdgpu_fence_wait_next(struct amdgpu_ring *ring) int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
{ {
uint64_t seq[AMDGPU_MAX_RINGS] = {};
long r; long r;
seq[ring->idx] = atomic64_read(&ring->fence_drv.last_seq) + 1ULL; uint64_t seq = atomic64_read(&ring->fence_drv.last_seq) + 1ULL;
if (seq[ring->idx] >= ring->fence_drv.sync_seq[ring->idx]) { if (seq >= ring->fence_drv.sync_seq[ring->idx])
/* nothing to wait for, last_seq is
already the last emited fence */
return -ENOENT; return -ENOENT;
} r = amdgpu_fence_ring_wait_seq_timeout(ring, seq, false, MAX_SCHEDULE_TIMEOUT);
r = amdgpu_fence_wait_seq_timeout(ring->adev, seq, false, MAX_SCHEDULE_TIMEOUT);
if (r < 0) if (r < 0)
return r; return r;
return 0; return 0;
} }
...@@ -669,21 +594,20 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring) ...@@ -669,21 +594,20 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
*/ */
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
{ {
struct amdgpu_device *adev = ring->adev;
uint64_t seq[AMDGPU_MAX_RINGS] = {};
long r; long r;
seq[ring->idx] = ring->fence_drv.sync_seq[ring->idx]; uint64_t seq = ring->fence_drv.sync_seq[ring->idx];
if (!seq[ring->idx]) if (!seq)
return 0; return 0;
r = amdgpu_fence_wait_seq_timeout(adev, seq, false, MAX_SCHEDULE_TIMEOUT); r = amdgpu_fence_ring_wait_seq_timeout(ring, seq, false, MAX_SCHEDULE_TIMEOUT);
if (r < 0) { if (r < 0) {
if (r == -EDEADLK) if (r == -EDEADLK)
return -EDEADLK; return -EDEADLK;
dev_err(adev->dev, "error waiting for ring[%d] to become idle (%ld)\n", dev_err(ring->adev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
ring->idx, r); ring->idx, r);
} }
return 0; return 0;
} }
...@@ -898,7 +822,6 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) ...@@ -898,7 +822,6 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
*/ */
int amdgpu_fence_driver_init(struct amdgpu_device *adev) int amdgpu_fence_driver_init(struct amdgpu_device *adev)
{ {
init_waitqueue_head(&adev->fence_queue);
if (amdgpu_debugfs_fence_init(adev)) if (amdgpu_debugfs_fence_init(adev))
dev_err(adev->dev, "fence debugfs file creation failed\n"); dev_err(adev->dev, "fence debugfs file creation failed\n");
...@@ -927,7 +850,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) ...@@ -927,7 +850,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
/* no need to trigger GPU reset as we are unloading */ /* no need to trigger GPU reset as we are unloading */
amdgpu_fence_driver_force_completion(adev); amdgpu_fence_driver_force_completion(adev);
} }
wake_up_all(&adev->fence_queue); wake_up_all(&ring->fence_drv.fence_queue);
amdgpu_irq_put(adev, ring->fence_drv.irq_src, amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type); ring->fence_drv.irq_type);
if (ring->scheduler) if (ring->scheduler)
......
...@@ -342,6 +342,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, ...@@ -342,6 +342,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
amdgpu_fence_driver_init_ring(ring); amdgpu_fence_driver_init_ring(ring);
} }
init_waitqueue_head(&ring->fence_drv.fence_queue);
r = amdgpu_wb_get(adev, &ring->rptr_offs); r = amdgpu_wb_get(adev, &ring->rptr_offs);
if (r) { if (r) {
dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment