Commit 475be514 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-misc-fixes-2024-10-02' of...

Merge tag 'drm-misc-fixes-2024-10-02' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-fixes

Short summary of fixes pull:

panthor:
- Set FOP_UNSIGNED_OFFSET in fops instance
- Acquire lock in panthor_vm_prepare_map_op_ctx()
- Avoid ninitialized variable in tick_ctx_cleanup()
- Do not block scheduler queue if work is pending
- Do not add write fences to the shared BOs

scheduler:
- Fix locking in drm_sched_entity_modify_sched()
- Fix pointer deref if entity queue changes
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20241002151528.GA300287@linux.fritz.box
parents 156cc376 f9e7ac6e
...@@ -1383,6 +1383,7 @@ static const struct file_operations panthor_drm_driver_fops = { ...@@ -1383,6 +1383,7 @@ static const struct file_operations panthor_drm_driver_fops = {
.read = drm_read, .read = drm_read,
.llseek = noop_llseek, .llseek = noop_llseek,
.mmap = panthor_mmap, .mmap = panthor_mmap,
.fop_flags = FOP_UNSIGNED_OFFSET,
}; };
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
......
...@@ -1251,9 +1251,17 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx, ...@@ -1251,9 +1251,17 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
goto err_cleanup; goto err_cleanup;
} }
/* drm_gpuvm_bo_obtain_prealloc() will call drm_gpuvm_bo_put() on our
* pre-allocated BO if the <BO,VM> association exists. Given we
* only have one ref on preallocated_vm_bo, drm_gpuvm_bo_destroy() will
* be called immediately, and we have to hold the VM resv lock when
* calling this function.
*/
dma_resv_lock(panthor_vm_resv(vm), NULL);
mutex_lock(&bo->gpuva_list_lock); mutex_lock(&bo->gpuva_list_lock);
op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo); op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo);
mutex_unlock(&bo->gpuva_list_lock); mutex_unlock(&bo->gpuva_list_lock);
dma_resv_unlock(panthor_vm_resv(vm));
/* If the a vm_bo for this <VM,BO> combination exists, it already /* If the a vm_bo for this <VM,BO> combination exists, it already
* retains a pin ref, and we can release the one we took earlier. * retains a pin ref, and we can release the one we took earlier.
......
...@@ -1103,7 +1103,13 @@ cs_slot_sync_queue_state_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs ...@@ -1103,7 +1103,13 @@ cs_slot_sync_queue_state_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs
list_move_tail(&group->wait_node, list_move_tail(&group->wait_node,
&group->ptdev->scheduler->groups.waiting); &group->ptdev->scheduler->groups.waiting);
} }
group->blocked_queues |= BIT(cs_id);
/* The queue is only blocked if there's no deferred operation
* pending, which can be checked through the scoreboard status.
*/
if (!cs_iface->output->status_scoreboards)
group->blocked_queues |= BIT(cs_id);
queue->syncwait.gpu_va = cs_iface->output->status_wait_sync_ptr; queue->syncwait.gpu_va = cs_iface->output->status_wait_sync_ptr;
queue->syncwait.ref = cs_iface->output->status_wait_sync_value; queue->syncwait.ref = cs_iface->output->status_wait_sync_value;
status_wait_cond = cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_COND_MASK; status_wait_cond = cs_iface->output->status_wait & CS_STATUS_WAIT_SYNC_COND_MASK;
...@@ -2046,6 +2052,7 @@ static void ...@@ -2046,6 +2052,7 @@ static void
tick_ctx_cleanup(struct panthor_scheduler *sched, tick_ctx_cleanup(struct panthor_scheduler *sched,
struct panthor_sched_tick_ctx *ctx) struct panthor_sched_tick_ctx *ctx)
{ {
struct panthor_device *ptdev = sched->ptdev;
struct panthor_group *group, *tmp; struct panthor_group *group, *tmp;
u32 i; u32 i;
...@@ -2054,7 +2061,7 @@ tick_ctx_cleanup(struct panthor_scheduler *sched, ...@@ -2054,7 +2061,7 @@ tick_ctx_cleanup(struct panthor_scheduler *sched,
/* If everything went fine, we should only have groups /* If everything went fine, we should only have groups
* to be terminated in the old_groups lists. * to be terminated in the old_groups lists.
*/ */
drm_WARN_ON(&group->ptdev->base, !ctx->csg_upd_failed_mask && drm_WARN_ON(&ptdev->base, !ctx->csg_upd_failed_mask &&
group_can_run(group)); group_can_run(group));
if (!group_can_run(group)) { if (!group_can_run(group)) {
...@@ -2077,7 +2084,7 @@ tick_ctx_cleanup(struct panthor_scheduler *sched, ...@@ -2077,7 +2084,7 @@ tick_ctx_cleanup(struct panthor_scheduler *sched,
/* If everything went fine, the groups to schedule lists should /* If everything went fine, the groups to schedule lists should
* be empty. * be empty.
*/ */
drm_WARN_ON(&group->ptdev->base, drm_WARN_ON(&ptdev->base,
!ctx->csg_upd_failed_mask && !list_empty(&ctx->groups[i])); !ctx->csg_upd_failed_mask && !list_empty(&ctx->groups[i]));
list_for_each_entry_safe(group, tmp, &ctx->groups[i], run_node) { list_for_each_entry_safe(group, tmp, &ctx->groups[i], run_node) {
...@@ -3436,13 +3443,8 @@ void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *sched ...@@ -3436,13 +3443,8 @@ void panthor_job_update_resvs(struct drm_exec *exec, struct drm_sched_job *sched
{ {
struct panthor_job *job = container_of(sched_job, struct panthor_job, base); struct panthor_job *job = container_of(sched_job, struct panthor_job, base);
/* Still not sure why we want USAGE_WRITE for external objects, since I
* was assuming this would be handled through explicit syncs being imported
* to external BOs with DMA_BUF_IOCTL_IMPORT_SYNC_FILE, but other drivers
* seem to pass DMA_RESV_USAGE_WRITE, so there must be a good reason.
*/
panthor_vm_update_resvs(job->group->vm, exec, &sched_job->s_fence->finished, panthor_vm_update_resvs(job->group->vm, exec, &sched_job->s_fence->finished,
DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE); DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
} }
void panthor_sched_unplug(struct panthor_device *ptdev) void panthor_sched_unplug(struct panthor_device *ptdev)
......
...@@ -133,8 +133,10 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, ...@@ -133,8 +133,10 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
{ {
WARN_ON(!num_sched_list || !sched_list); WARN_ON(!num_sched_list || !sched_list);
spin_lock(&entity->rq_lock);
entity->sched_list = sched_list; entity->sched_list = sched_list;
entity->num_sched_list = num_sched_list; entity->num_sched_list = num_sched_list;
spin_unlock(&entity->rq_lock);
} }
EXPORT_SYMBOL(drm_sched_entity_modify_sched); EXPORT_SYMBOL(drm_sched_entity_modify_sched);
...@@ -597,6 +599,9 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job) ...@@ -597,6 +599,9 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
/* first job wakes up scheduler */ /* first job wakes up scheduler */
if (first) { if (first) {
struct drm_gpu_scheduler *sched;
struct drm_sched_rq *rq;
/* Add the entity to the run queue */ /* Add the entity to the run queue */
spin_lock(&entity->rq_lock); spin_lock(&entity->rq_lock);
if (entity->stopped) { if (entity->stopped) {
...@@ -606,13 +611,16 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job) ...@@ -606,13 +611,16 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
return; return;
} }
drm_sched_rq_add_entity(entity->rq, entity); rq = entity->rq;
sched = rq->sched;
drm_sched_rq_add_entity(rq, entity);
spin_unlock(&entity->rq_lock); spin_unlock(&entity->rq_lock);
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
drm_sched_rq_update_fifo(entity, submit_ts); drm_sched_rq_update_fifo(entity, submit_ts);
drm_sched_wakeup(entity->rq->sched); drm_sched_wakeup(sched);
} }
} }
EXPORT_SYMBOL(drm_sched_entity_push_job); EXPORT_SYMBOL(drm_sched_entity_push_job);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment