Commit a11c4711 authored by Boris Brezillon's avatar Boris Brezillon

drm/panfrost: Simplify the reset serialization logic

Now that we can pass our own workqueue to drm_sched_init(), we can use
an ordered workqueue on for both the scheduler timeout tdr and our own
reset work (which we use when the reset is not caused by a fault/timeout
on a specific job, like when we have AS_ACTIVE bit stuck). This
guarantees that the timeout handlers and reset handler can't run
concurrently which drastically simplifies the locking.

v5:
* Don't call cancel_delayed_timeout() in the reset path (those works
  are canceled in drm_sched_stop())

v4:
* Actually pass the reset workqueue to drm_sched_init()
* Don't call cancel_work_sync() in panfrost_reset(). It will deadlock
  since it might be called from the reset work, which is executing and
  cancel_work_sync() will wait for the handler to return. Checking the
  reset pending status should avoid spurious resets

v3:
* New patch
Suggested-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: default avatarBoris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: default avatarSteven Price <steven.price@arm.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210630062751.2832545-10-boris.brezillon@collabora.com
parent 070ce765
...@@ -109,6 +109,7 @@ struct panfrost_device { ...@@ -109,6 +109,7 @@ struct panfrost_device {
struct mutex sched_lock; struct mutex sched_lock;
struct { struct {
struct workqueue_struct *wq;
struct work_struct work; struct work_struct work;
atomic_t pending; atomic_t pending;
} reset; } reset;
...@@ -247,9 +248,8 @@ const char *panfrost_exception_name(u32 exception_code); ...@@ -247,9 +248,8 @@ const char *panfrost_exception_name(u32 exception_code);
static inline void static inline void
panfrost_device_schedule_reset(struct panfrost_device *pfdev) panfrost_device_schedule_reset(struct panfrost_device *pfdev)
{ {
/* Schedule a reset if there's no reset in progress. */ atomic_set(&pfdev->reset.pending, 1);
if (!atomic_xchg(&pfdev->reset.pending, 1)) queue_work(pfdev->reset.wq, &pfdev->reset.work);
schedule_work(&pfdev->reset.work);
} }
#endif #endif
...@@ -25,17 +25,8 @@ ...@@ -25,17 +25,8 @@
#define job_write(dev, reg, data) writel(data, dev->iomem + (reg)) #define job_write(dev, reg, data) writel(data, dev->iomem + (reg))
#define job_read(dev, reg) readl(dev->iomem + (reg)) #define job_read(dev, reg) readl(dev->iomem + (reg))
enum panfrost_queue_status {
PANFROST_QUEUE_STATUS_ACTIVE,
PANFROST_QUEUE_STATUS_STOPPED,
PANFROST_QUEUE_STATUS_STARTING,
PANFROST_QUEUE_STATUS_FAULT_PENDING,
};
struct panfrost_queue_state { struct panfrost_queue_state {
struct drm_gpu_scheduler sched; struct drm_gpu_scheduler sched;
atomic_t status;
struct mutex lock;
u64 fence_context; u64 fence_context;
u64 emit_seqno; u64 emit_seqno;
}; };
...@@ -368,57 +359,67 @@ void panfrost_job_enable_interrupts(struct panfrost_device *pfdev) ...@@ -368,57 +359,67 @@ void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
job_write(pfdev, JOB_INT_MASK, irq_mask); job_write(pfdev, JOB_INT_MASK, irq_mask);
} }
static bool panfrost_scheduler_stop(struct panfrost_queue_state *queue, static void panfrost_reset(struct panfrost_device *pfdev,
struct drm_sched_job *bad) struct drm_sched_job *bad)
{ {
enum panfrost_queue_status old_status; unsigned int i;
bool stopped = false; bool cookie;
if (!atomic_read(&pfdev->reset.pending))
return;
/* Stop the schedulers.
*
* FIXME: We temporarily get out of the dma_fence_signalling section
* because the cleanup path generate lockdep splats when taking locks
* to release job resources. We should rework the code to follow this
* pattern:
*
* try_lock
* if (locked)
* release
* else
* schedule_work_to_release_later
*/
for (i = 0; i < NUM_JOB_SLOTS; i++)
drm_sched_stop(&pfdev->js->queue[i].sched, bad);
mutex_lock(&queue->lock); cookie = dma_fence_begin_signalling();
old_status = atomic_xchg(&queue->status,
PANFROST_QUEUE_STATUS_STOPPED);
if (old_status == PANFROST_QUEUE_STATUS_STOPPED)
goto out;
WARN_ON(old_status != PANFROST_QUEUE_STATUS_ACTIVE);
drm_sched_stop(&queue->sched, bad);
if (bad) if (bad)
drm_sched_increase_karma(bad); drm_sched_increase_karma(bad);
stopped = true; spin_lock(&pfdev->js->job_lock);
for (i = 0; i < NUM_JOB_SLOTS; i++) {
if (pfdev->jobs[i]) {
pm_runtime_put_noidle(pfdev->dev);
panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
pfdev->jobs[i] = NULL;
}
}
spin_unlock(&pfdev->js->job_lock);
/* panfrost_device_reset(pfdev);
* Set the timeout to max so the timer doesn't get started
* when we return from the timeout handler (restored in
* panfrost_scheduler_start()).
*/
queue->sched.timeout = MAX_SCHEDULE_TIMEOUT;
out: /* GPU has been reset, we can clear the reset pending bit. */
mutex_unlock(&queue->lock); atomic_set(&pfdev->reset.pending, 0);
return stopped; /* Now resubmit jobs that were previously queued but didn't have a
} * chance to finish.
* FIXME: We temporarily get out of the DMA fence signalling section
* while resubmitting jobs because the job submission logic will
* allocate memory with the GFP_KERNEL flag which can trigger memory
* reclaim and exposes a lock ordering issue.
*/
dma_fence_end_signalling(cookie);
for (i = 0; i < NUM_JOB_SLOTS; i++)
drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched);
cookie = dma_fence_begin_signalling();
static void panfrost_scheduler_start(struct panfrost_queue_state *queue) for (i = 0; i < NUM_JOB_SLOTS; i++)
{ drm_sched_start(&pfdev->js->queue[i].sched, true);
enum panfrost_queue_status old_status;
dma_fence_end_signalling(cookie);
mutex_lock(&queue->lock);
old_status = atomic_xchg(&queue->status,
PANFROST_QUEUE_STATUS_STARTING);
WARN_ON(old_status != PANFROST_QUEUE_STATUS_STOPPED);
/* Restore the original timeout before starting the scheduler. */
queue->sched.timeout = msecs_to_jiffies(JOB_TIMEOUT_MS);
drm_sched_resubmit_jobs(&queue->sched);
drm_sched_start(&queue->sched, true);
old_status = atomic_xchg(&queue->status,
PANFROST_QUEUE_STATUS_ACTIVE);
if (old_status == PANFROST_QUEUE_STATUS_FAULT_PENDING)
drm_sched_fault(&queue->sched);
mutex_unlock(&queue->lock);
} }
static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
...@@ -443,11 +444,8 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job ...@@ -443,11 +444,8 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
job_read(pfdev, JS_TAIL_LO(js)), job_read(pfdev, JS_TAIL_LO(js)),
sched_job); sched_job);
/* Scheduler is already stopped, nothing to do. */ atomic_set(&pfdev->reset.pending, 1);
if (!panfrost_scheduler_stop(&pfdev->js->queue[js], sched_job)) panfrost_reset(pfdev, sched_job);
return DRM_GPU_SCHED_STAT_NOMINAL;
panfrost_device_schedule_reset(pfdev);
return DRM_GPU_SCHED_STAT_NOMINAL; return DRM_GPU_SCHED_STAT_NOMINAL;
} }
...@@ -474,8 +472,6 @@ static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status) ...@@ -474,8 +472,6 @@ static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status)
job_write(pfdev, JOB_INT_CLEAR, mask); job_write(pfdev, JOB_INT_CLEAR, mask);
if (status & JOB_INT_MASK_ERR(j)) { if (status & JOB_INT_MASK_ERR(j)) {
enum panfrost_queue_status old_status;
job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP); job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP);
dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x", dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x",
...@@ -483,19 +479,7 @@ static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status) ...@@ -483,19 +479,7 @@ static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status)
panfrost_exception_name(job_read(pfdev, JS_STATUS(j))), panfrost_exception_name(job_read(pfdev, JS_STATUS(j))),
job_read(pfdev, JS_HEAD_LO(j)), job_read(pfdev, JS_HEAD_LO(j)),
job_read(pfdev, JS_TAIL_LO(j))); job_read(pfdev, JS_TAIL_LO(j)));
drm_sched_fault(&pfdev->js->queue[j].sched);
/*
* When the queue is being restarted we don't report
* faults directly to avoid races between the timeout
* and reset handlers. panfrost_scheduler_start() will
* call drm_sched_fault() after the queue has been
* started if status == FAULT_PENDING.
*/
old_status = atomic_cmpxchg(&pfdev->js->queue[j].status,
PANFROST_QUEUE_STATUS_STARTING,
PANFROST_QUEUE_STATUS_FAULT_PENDING);
if (old_status == PANFROST_QUEUE_STATUS_ACTIVE)
drm_sched_fault(&pfdev->js->queue[j].sched);
} }
if (status & JOB_INT_MASK_DONE(j)) { if (status & JOB_INT_MASK_DONE(j)) {
...@@ -550,56 +534,13 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data) ...@@ -550,56 +534,13 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
return IRQ_WAKE_THREAD; return IRQ_WAKE_THREAD;
} }
static void panfrost_reset(struct work_struct *work) static void panfrost_reset_work(struct work_struct *work)
{ {
struct panfrost_device *pfdev = container_of(work, struct panfrost_device *pfdev = container_of(work,
struct panfrost_device, struct panfrost_device,
reset.work); reset.work);
unsigned int i;
bool cookie;
cookie = dma_fence_begin_signalling();
for (i = 0; i < NUM_JOB_SLOTS; i++) {
/*
* We want pending timeouts to be handled before we attempt
* to stop the scheduler. If we don't do that and the timeout
* handler is in flight, it might have removed the bad job
* from the list, and we'll lose this job if the reset handler
* enters the critical section in panfrost_scheduler_stop()
* before the timeout handler.
*
* Timeout is set to MAX_SCHEDULE_TIMEOUT - 1 because we need
* something big enough to make sure the timer will not expire
* before we manage to stop the scheduler, but we can't use
* MAX_SCHEDULE_TIMEOUT because drm_sched_get_cleanup_job()
* considers that as 'timer is not running' and will dequeue
* the job without making sure the timeout handler is not
* running.
*/
pfdev->js->queue[i].sched.timeout = MAX_SCHEDULE_TIMEOUT - 1;
cancel_delayed_work_sync(&pfdev->js->queue[i].sched.work_tdr);
panfrost_scheduler_stop(&pfdev->js->queue[i], NULL);
}
/* All timers have been stopped, we can safely reset the pending state. */
atomic_set(&pfdev->reset.pending, 0);
spin_lock(&pfdev->js->job_lock); panfrost_reset(pfdev, NULL);
for (i = 0; i < NUM_JOB_SLOTS; i++) {
if (pfdev->jobs[i]) {
pm_runtime_put_noidle(pfdev->dev);
panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
pfdev->jobs[i] = NULL;
}
}
spin_unlock(&pfdev->js->job_lock);
panfrost_device_reset(pfdev);
for (i = 0; i < NUM_JOB_SLOTS; i++)
panfrost_scheduler_start(&pfdev->js->queue[i]);
dma_fence_end_signalling(cookie);
} }
int panfrost_job_init(struct panfrost_device *pfdev) int panfrost_job_init(struct panfrost_device *pfdev)
...@@ -607,7 +548,7 @@ int panfrost_job_init(struct panfrost_device *pfdev) ...@@ -607,7 +548,7 @@ int panfrost_job_init(struct panfrost_device *pfdev)
struct panfrost_job_slot *js; struct panfrost_job_slot *js;
int ret, j, irq; int ret, j, irq;
INIT_WORK(&pfdev->reset.work, panfrost_reset); INIT_WORK(&pfdev->reset.work, panfrost_reset_work);
pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL); pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL);
if (!js) if (!js)
...@@ -629,15 +570,18 @@ int panfrost_job_init(struct panfrost_device *pfdev) ...@@ -629,15 +570,18 @@ int panfrost_job_init(struct panfrost_device *pfdev)
return ret; return ret;
} }
for (j = 0; j < NUM_JOB_SLOTS; j++) { pfdev->reset.wq = alloc_ordered_workqueue("panfrost-reset", 0);
mutex_init(&js->queue[j].lock); if (!pfdev->reset.wq)
return -ENOMEM;
for (j = 0; j < NUM_JOB_SLOTS; j++) {
js->queue[j].fence_context = dma_fence_context_alloc(1); js->queue[j].fence_context = dma_fence_context_alloc(1);
ret = drm_sched_init(&js->queue[j].sched, ret = drm_sched_init(&js->queue[j].sched,
&panfrost_sched_ops, &panfrost_sched_ops,
1, 0, 1, 0,
msecs_to_jiffies(JOB_TIMEOUT_MS), NULL, msecs_to_jiffies(JOB_TIMEOUT_MS),
pfdev->reset.wq,
NULL, "pan_js"); NULL, "pan_js");
if (ret) { if (ret) {
dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret); dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
...@@ -653,6 +597,7 @@ int panfrost_job_init(struct panfrost_device *pfdev) ...@@ -653,6 +597,7 @@ int panfrost_job_init(struct panfrost_device *pfdev)
for (j--; j >= 0; j--) for (j--; j >= 0; j--)
drm_sched_fini(&js->queue[j].sched); drm_sched_fini(&js->queue[j].sched);
destroy_workqueue(pfdev->reset.wq);
return ret; return ret;
} }
...@@ -665,9 +610,10 @@ void panfrost_job_fini(struct panfrost_device *pfdev) ...@@ -665,9 +610,10 @@ void panfrost_job_fini(struct panfrost_device *pfdev)
for (j = 0; j < NUM_JOB_SLOTS; j++) { for (j = 0; j < NUM_JOB_SLOTS; j++) {
drm_sched_fini(&js->queue[j].sched); drm_sched_fini(&js->queue[j].sched);
mutex_destroy(&js->queue[j].lock);
} }
cancel_work_sync(&pfdev->reset.work);
destroy_workqueue(pfdev->reset.wq);
} }
int panfrost_job_open(struct panfrost_file_priv *panfrost_priv) int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment