Commit b634acb2 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-misc-fixes-2024-10-10' of...

Merge tag 'drm-misc-fixes-2024-10-10' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-fixes

Short summary of fixes pull:

fbdev-dma:
- Only clean up deferred I/O if instanciated

nouveau:
- dmem: Fix privileged error in copy engine channel; Fix possible
data leak in migrate_to_ram()
- gsp: Fix coding style

sched:
- Avoid leaking lockdep map

v3d:
- Stop active perfmon before destroying it

vc4:
- Stop active perfmon before destroying it

xe:
- Drop GuC submit_wq pool
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20241010133708.GA461532@localhost.localdomain
parents fe4a435b fcddc71e
......@@ -50,7 +50,8 @@ static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
if (!fb_helper->dev)
return;
fb_deferred_io_cleanup(info);
if (info->fbdefio)
fb_deferred_io_cleanup(info);
drm_fb_helper_fini(fb_helper);
drm_client_buffer_vunmap(fb_helper->buffer);
......
......@@ -210,7 +210,7 @@ struct nvkm_gsp {
} *rm;
struct {
struct mutex mutex;;
struct mutex mutex;
struct idr idr;
} client_id;
......
......@@ -193,7 +193,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
if (!spage || !(src & MIGRATE_PFN_MIGRATE))
goto done;
dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
dpage = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vmf->vma, vmf->address);
if (!dpage)
goto done;
......
......@@ -331,7 +331,7 @@ nouveau_accel_ce_init(struct nouveau_drm *drm)
return;
}
ret = nouveau_channel_new(&drm->client, false, runm, NvDmaFB, NvDmaTT, &drm->cechan);
ret = nouveau_channel_new(&drm->client, true, runm, NvDmaFB, NvDmaTT, &drm->cechan);
if (ret)
NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
}
......
......@@ -87,6 +87,12 @@
#define CREATE_TRACE_POINTS
#include "gpu_scheduler_trace.h"
#ifdef CONFIG_LOCKDEP
static struct lockdep_map drm_sched_lockdep_map = {
.name = "drm_sched_lockdep_map"
};
#endif
#define to_drm_sched_job(sched_job) \
container_of((sched_job), struct drm_sched_job, queue_node)
......@@ -1269,7 +1275,12 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
sched->submit_wq = submit_wq;
sched->own_submit_wq = false;
} else {
#ifdef CONFIG_LOCKDEP
sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name, 0,
&drm_sched_lockdep_map);
#else
sched->submit_wq = alloc_ordered_workqueue(name, 0);
#endif
if (!sched->submit_wq)
return -ENOMEM;
......
......@@ -306,6 +306,11 @@ void v3d_perfmon_open_file(struct v3d_file_priv *v3d_priv)
static int v3d_perfmon_idr_del(int id, void *elem, void *data)
{
struct v3d_perfmon *perfmon = elem;
struct v3d_dev *v3d = (struct v3d_dev *)data;
/* If the active perfmon is being destroyed, stop it first */
if (perfmon == v3d->active_perfmon)
v3d_perfmon_stop(v3d, perfmon, false);
v3d_perfmon_put(perfmon);
......@@ -314,8 +319,10 @@ static int v3d_perfmon_idr_del(int id, void *elem, void *data)
void v3d_perfmon_close_file(struct v3d_file_priv *v3d_priv)
{
struct v3d_dev *v3d = v3d_priv->v3d;
mutex_lock(&v3d_priv->perfmon.lock);
idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, NULL);
idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, v3d);
idr_destroy(&v3d_priv->perfmon.idr);
mutex_unlock(&v3d_priv->perfmon.lock);
mutex_destroy(&v3d_priv->perfmon.lock);
......
......@@ -116,6 +116,11 @@ void vc4_perfmon_open_file(struct vc4_file *vc4file)
static int vc4_perfmon_idr_del(int id, void *elem, void *data)
{
struct vc4_perfmon *perfmon = elem;
struct vc4_dev *vc4 = (struct vc4_dev *)data;
/* If the active perfmon is being destroyed, stop it first */
if (perfmon == vc4->active_perfmon)
vc4_perfmon_stop(vc4, perfmon, false);
vc4_perfmon_put(perfmon);
......@@ -130,7 +135,7 @@ void vc4_perfmon_close_file(struct vc4_file *vc4file)
return;
mutex_lock(&vc4file->perfmon.lock);
idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, NULL);
idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, vc4);
idr_destroy(&vc4file->perfmon.idr);
mutex_unlock(&vc4file->perfmon.lock);
mutex_destroy(&vc4file->perfmon.lock);
......
......@@ -224,80 +224,11 @@ static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q)
EXEC_QUEUE_STATE_BANNED));
}
#ifdef CONFIG_PROVE_LOCKING
static int alloc_submit_wq(struct xe_guc *guc)
{
int i;
for (i = 0; i < NUM_SUBMIT_WQ; ++i) {
guc->submission_state.submit_wq_pool[i] =
alloc_ordered_workqueue("submit_wq", 0);
if (!guc->submission_state.submit_wq_pool[i])
goto err_free;
}
return 0;
err_free:
while (i)
destroy_workqueue(guc->submission_state.submit_wq_pool[--i]);
return -ENOMEM;
}
static void free_submit_wq(struct xe_guc *guc)
{
int i;
for (i = 0; i < NUM_SUBMIT_WQ; ++i)
destroy_workqueue(guc->submission_state.submit_wq_pool[i]);
}
static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
{
int idx = guc->submission_state.submit_wq_idx++ % NUM_SUBMIT_WQ;
return guc->submission_state.submit_wq_pool[idx];
}
#else
static int alloc_submit_wq(struct xe_guc *guc)
{
return 0;
}
static void free_submit_wq(struct xe_guc *guc)
{
}
static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
{
return NULL;
}
#endif
static void xe_guc_submit_fini(struct xe_guc *guc)
{
struct xe_device *xe = guc_to_xe(guc);
struct xe_gt *gt = guc_to_gt(guc);
int ret;
ret = wait_event_timeout(guc->submission_state.fini_wq,
xa_empty(&guc->submission_state.exec_queue_lookup),
HZ * 5);
drain_workqueue(xe->destroy_wq);
xe_gt_assert(gt, ret);
}
static void guc_submit_fini(struct drm_device *drm, void *arg)
{
struct xe_guc *guc = arg;
xe_guc_submit_fini(guc);
xa_destroy(&guc->submission_state.exec_queue_lookup);
free_submit_wq(guc);
}
static void guc_submit_wedged_fini(void *arg)
......@@ -359,10 +290,6 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids)
if (err)
return err;
err = alloc_submit_wq(guc);
if (err)
return err;
gt->exec_queue_ops = &guc_exec_queue_ops;
xa_init(&guc->submission_state.exec_queue_lookup);
......@@ -1482,8 +1409,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
msecs_to_jiffies(q->sched_props.job_timeout_ms);
err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
get_submit_wq(guc),
q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64,
NULL, q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64,
timeout, guc_to_gt(guc)->ordered_wq, NULL,
q->name, gt_to_xe(q->gt)->drm.dev);
if (err)
......
......@@ -72,13 +72,6 @@ struct xe_guc {
atomic_t stopped;
/** @submission_state.lock: protects submission state */
struct mutex lock;
#ifdef CONFIG_PROVE_LOCKING
#define NUM_SUBMIT_WQ 256
/** @submission_state.submit_wq_pool: submission ordered workqueues pool */
struct workqueue_struct *submit_wq_pool[NUM_SUBMIT_WQ];
/** @submission_state.submit_wq_idx: submission ordered workqueue index */
int submit_wq_idx;
#endif
/** @submission_state.enabled: submission is enabled */
bool enabled;
/** @submission_state.fini_wq: submit fini wait queue */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment