Commit f9d5355f authored by Rob Clark's avatar Rob Clark

drm/msm/gpu: Drop duplicate fence counter

The ring seqno counter duplicates the fence-context last_fence counter.
They end up getting incremented in lock-step, on the same scheduler
thread, but the split just makes things less obvious.
Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
Link: https://lore.kernel.org/r/20220411215849.297838-3-robdclark@gmail.comSigned-off-by: default avatarRob Clark <robdclark@chromium.org>
parent 695383a1
...@@ -1235,7 +1235,7 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu) ...@@ -1235,7 +1235,7 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
return; return;
DRM_DEV_ERROR(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n", DRM_DEV_ERROR(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
ring ? ring->id : -1, ring ? ring->seqno : 0, ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0,
gpu_read(gpu, REG_A5XX_RBBM_STATUS), gpu_read(gpu, REG_A5XX_RBBM_STATUS),
gpu_read(gpu, REG_A5XX_CP_RB_RPTR), gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
gpu_read(gpu, REG_A5XX_CP_RB_WPTR), gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
......
...@@ -1390,7 +1390,7 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu) ...@@ -1390,7 +1390,7 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
DRM_DEV_ERROR(&gpu->pdev->dev, DRM_DEV_ERROR(&gpu->pdev->dev,
"gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n", "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
ring ? ring->id : -1, ring ? ring->seqno : 0, ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0,
gpu_read(gpu, REG_A6XX_RBBM_STATUS), gpu_read(gpu, REG_A6XX_RBBM_STATUS),
gpu_read(gpu, REG_A6XX_CP_RB_RPTR), gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
gpu_read(gpu, REG_A6XX_CP_RB_WPTR), gpu_read(gpu, REG_A6XX_CP_RB_WPTR),
......
...@@ -578,7 +578,7 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state) ...@@ -578,7 +578,7 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
state->ring[i].fence = gpu->rb[i]->memptrs->fence; state->ring[i].fence = gpu->rb[i]->memptrs->fence;
state->ring[i].iova = gpu->rb[i]->iova; state->ring[i].iova = gpu->rb[i]->iova;
state->ring[i].seqno = gpu->rb[i]->seqno; state->ring[i].seqno = gpu->rb[i]->fctx->last_fence;
state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]); state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]);
state->ring[i].wptr = get_wptr(gpu->rb[i]); state->ring[i].wptr = get_wptr(gpu->rb[i]);
...@@ -828,7 +828,7 @@ void adreno_dump_info(struct msm_gpu *gpu) ...@@ -828,7 +828,7 @@ void adreno_dump_info(struct msm_gpu *gpu)
printk("rb %d: fence: %d/%d\n", i, printk("rb %d: fence: %d/%d\n", i,
ring->memptrs->fence, ring->memptrs->fence,
ring->seqno); ring->fctx->last_fence);
printk("rptr: %d\n", get_rptr(adreno_gpu, ring)); printk("rptr: %d\n", get_rptr(adreno_gpu, ring));
printk("rb wptr: %d\n", get_wptr(ring)); printk("rb wptr: %d\n", get_wptr(ring));
......
...@@ -523,7 +523,7 @@ static void hangcheck_handler(struct timer_list *t) ...@@ -523,7 +523,7 @@ static void hangcheck_handler(struct timer_list *t)
if (fence != ring->hangcheck_fence) { if (fence != ring->hangcheck_fence) {
/* some progress has been made.. ya! */ /* some progress has been made.. ya! */
ring->hangcheck_fence = fence; ring->hangcheck_fence = fence;
} else if (fence_before(fence, ring->seqno)) { } else if (fence_before(fence, ring->fctx->last_fence)) {
/* no progress and not done.. hung! */ /* no progress and not done.. hung! */
ring->hangcheck_fence = fence; ring->hangcheck_fence = fence;
DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n", DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
...@@ -531,13 +531,13 @@ static void hangcheck_handler(struct timer_list *t) ...@@ -531,13 +531,13 @@ static void hangcheck_handler(struct timer_list *t)
DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n", DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n",
gpu->name, fence); gpu->name, fence);
DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n", DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n",
gpu->name, ring->seqno); gpu->name, ring->fctx->last_fence);
kthread_queue_work(gpu->worker, &gpu->recover_work); kthread_queue_work(gpu->worker, &gpu->recover_work);
} }
/* if still more pending work, reset the hangcheck timer: */ /* if still more pending work, reset the hangcheck timer: */
if (fence_after(ring->seqno, ring->hangcheck_fence)) if (fence_after(ring->fctx->last_fence, ring->hangcheck_fence))
hangcheck_timer_reset(gpu); hangcheck_timer_reset(gpu);
/* workaround for missing irq: */ /* workaround for missing irq: */
...@@ -754,7 +754,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) ...@@ -754,7 +754,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
msm_gpu_hw_init(gpu); msm_gpu_hw_init(gpu);
submit->seqno = ++ring->seqno; submit->seqno = submit->hw_fence->seqno;
msm_rd_dump_submit(priv->rd, submit, NULL); msm_rd_dump_submit(priv->rd, submit, NULL);
......
...@@ -291,7 +291,7 @@ static inline bool msm_gpu_active(struct msm_gpu *gpu) ...@@ -291,7 +291,7 @@ static inline bool msm_gpu_active(struct msm_gpu *gpu)
for (i = 0; i < gpu->nr_rings; i++) { for (i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i]; struct msm_ringbuffer *ring = gpu->rb[i];
if (fence_after(ring->seqno, ring->memptrs->fence)) if (fence_after(ring->fctx->last_fence, ring->memptrs->fence))
return true; return true;
} }
......
...@@ -59,7 +59,6 @@ struct msm_ringbuffer { ...@@ -59,7 +59,6 @@ struct msm_ringbuffer {
spinlock_t submit_lock; spinlock_t submit_lock;
uint64_t iova; uint64_t iova;
uint32_t seqno;
uint32_t hangcheck_fence; uint32_t hangcheck_fence;
struct msm_rbmemptrs *memptrs; struct msm_rbmemptrs *memptrs;
uint64_t memptrs_iova; uint64_t memptrs_iova;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment