Commit 3acec63a authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/core: protect engine context list with hardirq-safe spinlock

IRQ handlers will need access to engine contexts.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 6fa8e629
...@@ -30,6 +30,25 @@ ...@@ -30,6 +30,25 @@
#include <subdev/vm.h> #include <subdev/vm.h>
static inline int
nouveau_engctx_exists(struct nouveau_object *parent,
struct nouveau_engine *engine, void **pobject)
{
struct nouveau_engctx *engctx;
struct nouveau_object *parctx;
list_for_each_entry(engctx, &engine->contexts, head) {
parctx = nv_pclass(nv_object(engctx), NV_PARENT_CLASS);
if (parctx == parent) {
atomic_inc(&nv_object(engctx)->refcount);
*pobject = engctx;
return 1;
}
}
return 0;
}
int int
nouveau_engctx_create_(struct nouveau_object *parent, nouveau_engctx_create_(struct nouveau_object *parent,
struct nouveau_object *engobj, struct nouveau_object *engobj,
...@@ -41,23 +60,22 @@ nouveau_engctx_create_(struct nouveau_object *parent, ...@@ -41,23 +60,22 @@ nouveau_engctx_create_(struct nouveau_object *parent,
struct nouveau_client *client = nouveau_client(parent); struct nouveau_client *client = nouveau_client(parent);
struct nouveau_engine *engine = nv_engine(engobj); struct nouveau_engine *engine = nv_engine(engobj);
struct nouveau_subdev *subdev = nv_subdev(engine); struct nouveau_subdev *subdev = nv_subdev(engine);
struct nouveau_engctx *engctx; struct nouveau_object *engctx;
struct nouveau_object *ctxpar; unsigned long save;
int ret; int ret;
/* use existing context for the engine if one is available */ /* check if this engine already has a context for the parent object,
mutex_lock(&subdev->mutex); * and reference it instead of creating a new one
list_for_each_entry(engctx, &engine->contexts, head) { */
ctxpar = nv_pclass(nv_object(engctx), NV_PARENT_CLASS); spin_lock_irqsave(&engine->lock, save);
if (ctxpar == parent) { ret = nouveau_engctx_exists(parent, engine, pobject);
atomic_inc(&nv_object(engctx)->refcount); spin_unlock_irqrestore(&engine->lock, save);
*pobject = engctx; if (ret)
mutex_unlock(&subdev->mutex); return ret;
return 1;
}
}
mutex_unlock(&subdev->mutex);
/* create the new context, supports creating both raw objects and
* objects backed by instance memory
*/
if (size) { if (size) {
ret = nouveau_gpuobj_create_(parent, engobj, oclass, ret = nouveau_gpuobj_create_(parent, engobj, oclass,
NV_ENGCTX_CLASS, NV_ENGCTX_CLASS,
...@@ -69,25 +87,43 @@ nouveau_engctx_create_(struct nouveau_object *parent, ...@@ -69,25 +87,43 @@ nouveau_engctx_create_(struct nouveau_object *parent,
} }
engctx = *pobject; engctx = *pobject;
if (engctx && client->vm)
atomic_inc(&client->vm->engref[nv_engidx(engobj)]);
if (ret) if (ret)
return ret; return ret;
list_add(&engctx->head, &engine->contexts); /* must take the lock again and re-check a context doesn't already
* exist (in case of a race) - the lock had to be dropped before as
* it's not possible to allocate the object with it held.
*/
spin_lock_irqsave(&engine->lock, save);
ret = nouveau_engctx_exists(parent, engine, pobject);
if (ret) {
spin_unlock_irqrestore(&engine->lock, save);
nouveau_object_ref(NULL, &engctx);
return ret;
}
if (client->vm)
atomic_inc(&client->vm->engref[nv_engidx(engobj)]);
list_add(&nv_engctx(engctx)->head, &engine->contexts);
spin_unlock_irqrestore(&engine->lock, save);
return 0; return 0;
} }
void void
nouveau_engctx_destroy(struct nouveau_engctx *engctx) nouveau_engctx_destroy(struct nouveau_engctx *engctx)
{ {
struct nouveau_object *engine = nv_object(engctx)->engine; struct nouveau_object *engobj = nv_object(engctx)->engine;
struct nouveau_engine *engine = nv_engine(engobj);
struct nouveau_client *client = nouveau_client(engctx); struct nouveau_client *client = nouveau_client(engctx);
unsigned long save;
nouveau_gpuobj_unmap(&engctx->vma); nouveau_gpuobj_unmap(&engctx->vma);
spin_lock_irqsave(&engine->lock, save);
list_del(&engctx->head); list_del(&engctx->head);
spin_unlock_irqrestore(&engine->lock, save);
if (client->vm) if (client->vm)
atomic_dec(&client->vm->engref[nv_engidx(engine)]); atomic_dec(&client->vm->engref[nv_engidx(engobj)]);
if (engctx->base.size) if (engctx->base.size)
nouveau_gpuobj_destroy(&engctx->base); nouveau_gpuobj_destroy(&engctx->base);
......
...@@ -50,5 +50,6 @@ nouveau_engine_create_(struct nouveau_object *parent, ...@@ -50,5 +50,6 @@ nouveau_engine_create_(struct nouveau_object *parent,
} }
INIT_LIST_HEAD(&engine->contexts); INIT_LIST_HEAD(&engine->contexts);
spin_lock_init(&engine->lock);
return 0; return 0;
} }
...@@ -15,7 +15,7 @@ struct nouveau_engctx { ...@@ -15,7 +15,7 @@ struct nouveau_engctx {
struct list_head head; struct list_head head;
}; };
static inline void * static inline struct nouveau_engctx *
nv_engctx(void *obj) nv_engctx(void *obj)
{ {
#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA #if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
......
...@@ -11,7 +11,10 @@ struct nouveau_engine { ...@@ -11,7 +11,10 @@ struct nouveau_engine {
struct nouveau_subdev base; struct nouveau_subdev base;
struct nouveau_oclass *cclass; struct nouveau_oclass *cclass;
struct nouveau_oclass *sclass; struct nouveau_oclass *sclass;
struct list_head contexts; struct list_head contexts;
spinlock_t lock;
void (*tile_prog)(struct nouveau_engine *, int region); void (*tile_prog)(struct nouveau_engine *, int region);
int (*tlb_flush)(struct nouveau_engine *); int (*tlb_flush)(struct nouveau_engine *);
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment