Commit 50248a3e authored by Lucas Stach's avatar Lucas Stach

drm/etnaviv: always start/stop scheduler in timeout processing

The drm scheduler currently expects that the stop/start sequence is always
executed in the timeout handling, as the job at the head of the hardware
execution list is always removed from the ring mirror before the driver
function is called and only inserted back into the list when starting the
scheduler.

This adds some unnecessary overhead if the timeout handler determines
that the GPU is still executing jobs normally and just wished to extend
the timeout, but a better solution requires a major rearchitecture of the
scheduler, which is not applicable as a fix.

Fixes: 135517d3 ("drm/scheduler: Avoid accessing freed bad job.")
Signed-off-by: default avatarLucas Stach <l.stach@pengutronix.de>
Tested-by: default avatarRussell King <rmk+kernel@armlinux.org.uk>
parent 2c5bf028
...@@ -89,12 +89,15 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) ...@@ -89,12 +89,15 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
u32 dma_addr; u32 dma_addr;
int change; int change;
/* block scheduler */
drm_sched_stop(&gpu->sched, sched_job);
/* /*
* If the GPU managed to complete this jobs fence, the timout is * If the GPU managed to complete this jobs fence, the timout is
* spurious. Bail out. * spurious. Bail out.
*/ */
if (dma_fence_is_signaled(submit->out_fence)) if (dma_fence_is_signaled(submit->out_fence))
return; goto out_no_timeout;
/* /*
* If the GPU is still making forward progress on the front-end (which * If the GPU is still making forward progress on the front-end (which
...@@ -105,12 +108,9 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) ...@@ -105,12 +108,9 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
change = dma_addr - gpu->hangcheck_dma_addr; change = dma_addr - gpu->hangcheck_dma_addr;
if (change < 0 || change > 16) { if (change < 0 || change > 16) {
gpu->hangcheck_dma_addr = dma_addr; gpu->hangcheck_dma_addr = dma_addr;
return; goto out_no_timeout;
} }
/* block scheduler */
drm_sched_stop(&gpu->sched, sched_job);
if(sched_job) if(sched_job)
drm_sched_increase_karma(sched_job); drm_sched_increase_karma(sched_job);
...@@ -120,6 +120,7 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) ...@@ -120,6 +120,7 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
drm_sched_resubmit_jobs(&gpu->sched); drm_sched_resubmit_jobs(&gpu->sched);
out_no_timeout:
/* restart scheduler after GPU is usable again */ /* restart scheduler after GPU is usable again */
drm_sched_start(&gpu->sched, true); drm_sched_start(&gpu->sched, true);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment