Commit a6419360 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/fifo: private mutex

nvkm_subdev.mutex is going away.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
Reviewed-by: default avatarLyude Paul <lyude@redhat.com>
parent bfa7f6a6
...@@ -40,6 +40,7 @@ struct nvkm_fifo { ...@@ -40,6 +40,7 @@ struct nvkm_fifo {
int nr; int nr;
struct list_head chan; struct list_head chan;
spinlock_t lock; spinlock_t lock;
struct mutex mutex;
struct nvkm_event uevent; /* async user trigger */ struct nvkm_event uevent; /* async user trigger */
struct nvkm_event cevent; /* channel creation event */ struct nvkm_event cevent; /* channel creation event */
......
...@@ -334,6 +334,7 @@ nvkm_fifo_dtor(struct nvkm_engine *engine) ...@@ -334,6 +334,7 @@ nvkm_fifo_dtor(struct nvkm_engine *engine)
nvkm_event_fini(&fifo->kevent); nvkm_event_fini(&fifo->kevent);
nvkm_event_fini(&fifo->cevent); nvkm_event_fini(&fifo->cevent);
nvkm_event_fini(&fifo->uevent); nvkm_event_fini(&fifo->uevent);
mutex_destroy(&fifo->mutex);
return data; return data;
} }
...@@ -358,6 +359,7 @@ nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device, ...@@ -358,6 +359,7 @@ nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device,
fifo->func = func; fifo->func = func;
INIT_LIST_HEAD(&fifo->chan); INIT_LIST_HEAD(&fifo->chan);
spin_lock_init(&fifo->lock); spin_lock_init(&fifo->lock);
mutex_init(&fifo->mutex);
if (WARN_ON(fifo->nr > NVKM_FIFO_CHID_NR)) if (WARN_ON(fifo->nr > NVKM_FIFO_CHID_NR))
fifo->nr = NVKM_FIFO_CHID_NR; fifo->nr = NVKM_FIFO_CHID_NR;
......
...@@ -38,9 +38,9 @@ nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *base, int cookie) ...@@ -38,9 +38,9 @@ nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *base, int cookie)
struct nv04_fifo_chan *chan = nv04_fifo_chan(base); struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem; struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
mutex_lock(&chan->fifo->base.engine.subdev.mutex); mutex_lock(&chan->fifo->base.mutex);
nvkm_ramht_remove(imem->ramht, cookie); nvkm_ramht_remove(imem->ramht, cookie);
mutex_unlock(&chan->fifo->base.engine.subdev.mutex); mutex_unlock(&chan->fifo->base.mutex);
} }
static int static int
...@@ -63,10 +63,10 @@ nv04_fifo_dma_object_ctor(struct nvkm_fifo_chan *base, ...@@ -63,10 +63,10 @@ nv04_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
return -EINVAL; return -EINVAL;
} }
mutex_lock(&chan->fifo->base.engine.subdev.mutex); mutex_lock(&chan->fifo->base.mutex);
hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4, hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4,
handle, context); handle, context);
mutex_unlock(&chan->fifo->base.engine.subdev.mutex); mutex_unlock(&chan->fifo->base.mutex);
return hash; return hash;
} }
......
...@@ -159,10 +159,10 @@ nv40_fifo_dma_object_ctor(struct nvkm_fifo_chan *base, ...@@ -159,10 +159,10 @@ nv40_fifo_dma_object_ctor(struct nvkm_fifo_chan *base,
return -EINVAL; return -EINVAL;
} }
mutex_lock(&chan->fifo->base.engine.subdev.mutex); mutex_lock(&chan->fifo->base.mutex);
hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4, hash = nvkm_ramht_insert(imem->ramht, object, chan->base.chid, 4,
handle, context); handle, context);
mutex_unlock(&chan->fifo->base.engine.subdev.mutex); mutex_unlock(&chan->fifo->base.mutex);
return hash; return hash;
} }
......
...@@ -57,7 +57,7 @@ gf100_fifo_runlist_commit(struct gf100_fifo *fifo) ...@@ -57,7 +57,7 @@ gf100_fifo_runlist_commit(struct gf100_fifo *fifo)
int nr = 0; int nr = 0;
int target; int target;
mutex_lock(&subdev->mutex); mutex_lock(&fifo->base.mutex);
cur = fifo->runlist.mem[fifo->runlist.active]; cur = fifo->runlist.mem[fifo->runlist.active];
fifo->runlist.active = !fifo->runlist.active; fifo->runlist.active = !fifo->runlist.active;
...@@ -73,7 +73,7 @@ gf100_fifo_runlist_commit(struct gf100_fifo *fifo) ...@@ -73,7 +73,7 @@ gf100_fifo_runlist_commit(struct gf100_fifo *fifo)
case NVKM_MEM_TARGET_VRAM: target = 0; break; case NVKM_MEM_TARGET_VRAM: target = 0; break;
case NVKM_MEM_TARGET_NCOH: target = 3; break; case NVKM_MEM_TARGET_NCOH: target = 3; break;
default: default:
mutex_unlock(&subdev->mutex); mutex_unlock(&fifo->base.mutex);
WARN_ON(1); WARN_ON(1);
return; return;
} }
...@@ -86,23 +86,23 @@ gf100_fifo_runlist_commit(struct gf100_fifo *fifo) ...@@ -86,23 +86,23 @@ gf100_fifo_runlist_commit(struct gf100_fifo *fifo)
!(nvkm_rd32(device, 0x00227c) & 0x00100000), !(nvkm_rd32(device, 0x00227c) & 0x00100000),
msecs_to_jiffies(2000)) == 0) msecs_to_jiffies(2000)) == 0)
nvkm_error(subdev, "runlist update timeout\n"); nvkm_error(subdev, "runlist update timeout\n");
mutex_unlock(&subdev->mutex); mutex_unlock(&fifo->base.mutex);
} }
void void
gf100_fifo_runlist_remove(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan) gf100_fifo_runlist_remove(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan)
{ {
mutex_lock(&fifo->base.engine.subdev.mutex); mutex_lock(&fifo->base.mutex);
list_del_init(&chan->head); list_del_init(&chan->head);
mutex_unlock(&fifo->base.engine.subdev.mutex); mutex_unlock(&fifo->base.mutex);
} }
void void
gf100_fifo_runlist_insert(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan) gf100_fifo_runlist_insert(struct gf100_fifo *fifo, struct gf100_fifo_chan *chan)
{ {
mutex_lock(&fifo->base.engine.subdev.mutex); mutex_lock(&fifo->base.mutex);
list_add_tail(&chan->head, &fifo->chan); list_add_tail(&chan->head, &fifo->chan);
mutex_unlock(&fifo->base.engine.subdev.mutex); mutex_unlock(&fifo->base.mutex);
} }
static inline int static inline int
......
...@@ -168,12 +168,11 @@ gk104_fifo_runlist_update(struct gk104_fifo *fifo, int runl) ...@@ -168,12 +168,11 @@ gk104_fifo_runlist_update(struct gk104_fifo *fifo, int runl)
{ {
const struct gk104_fifo_runlist_func *func = fifo->func->runlist; const struct gk104_fifo_runlist_func *func = fifo->func->runlist;
struct gk104_fifo_chan *chan; struct gk104_fifo_chan *chan;
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_memory *mem; struct nvkm_memory *mem;
struct nvkm_fifo_cgrp *cgrp; struct nvkm_fifo_cgrp *cgrp;
int nr = 0; int nr = 0;
mutex_lock(&subdev->mutex); mutex_lock(&fifo->base.mutex);
mem = fifo->runlist[runl].mem[fifo->runlist[runl].next]; mem = fifo->runlist[runl].mem[fifo->runlist[runl].next];
fifo->runlist[runl].next = !fifo->runlist[runl].next; fifo->runlist[runl].next = !fifo->runlist[runl].next;
...@@ -191,27 +190,27 @@ gk104_fifo_runlist_update(struct gk104_fifo *fifo, int runl) ...@@ -191,27 +190,27 @@ gk104_fifo_runlist_update(struct gk104_fifo *fifo, int runl)
nvkm_done(mem); nvkm_done(mem);
func->commit(fifo, runl, mem, nr); func->commit(fifo, runl, mem, nr);
mutex_unlock(&subdev->mutex); mutex_unlock(&fifo->base.mutex);
} }
void void
gk104_fifo_runlist_remove(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan) gk104_fifo_runlist_remove(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
{ {
struct nvkm_fifo_cgrp *cgrp = chan->cgrp; struct nvkm_fifo_cgrp *cgrp = chan->cgrp;
mutex_lock(&fifo->base.engine.subdev.mutex); mutex_lock(&fifo->base.mutex);
if (!list_empty(&chan->head)) { if (!list_empty(&chan->head)) {
list_del_init(&chan->head); list_del_init(&chan->head);
if (cgrp && !--cgrp->chan_nr) if (cgrp && !--cgrp->chan_nr)
list_del_init(&cgrp->head); list_del_init(&cgrp->head);
} }
mutex_unlock(&fifo->base.engine.subdev.mutex); mutex_unlock(&fifo->base.mutex);
} }
void void
gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan) gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
{ {
struct nvkm_fifo_cgrp *cgrp = chan->cgrp; struct nvkm_fifo_cgrp *cgrp = chan->cgrp;
mutex_lock(&fifo->base.engine.subdev.mutex); mutex_lock(&fifo->base.mutex);
if (cgrp) { if (cgrp) {
if (!cgrp->chan_nr++) if (!cgrp->chan_nr++)
list_add_tail(&cgrp->head, &fifo->runlist[chan->runl].cgrp); list_add_tail(&cgrp->head, &fifo->runlist[chan->runl].cgrp);
...@@ -219,7 +218,7 @@ gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan) ...@@ -219,7 +218,7 @@ gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
} else { } else {
list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan); list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan);
} }
mutex_unlock(&fifo->base.engine.subdev.mutex); mutex_unlock(&fifo->base.mutex);
} }
void void
......
...@@ -77,7 +77,7 @@ gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base, ...@@ -77,7 +77,7 @@ gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
struct nvkm_gpuobj *inst = chan->base.inst; struct nvkm_gpuobj *inst = chan->base.inst;
int ret = 0; int ret = 0;
mutex_lock(&subdev->mutex); mutex_lock(&chan->fifo->base.mutex);
nvkm_wr32(device, 0x002634, chan->base.chid); nvkm_wr32(device, 0x002634, chan->base.chid);
if (nvkm_msec(device, 2000, if (nvkm_msec(device, 2000,
if (nvkm_rd32(device, 0x002634) == chan->base.chid) if (nvkm_rd32(device, 0x002634) == chan->base.chid)
...@@ -87,7 +87,7 @@ gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base, ...@@ -87,7 +87,7 @@ gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
chan->base.chid, chan->base.object.client->name); chan->base.chid, chan->base.object.client->name);
ret = -ETIMEDOUT; ret = -ETIMEDOUT;
} }
mutex_unlock(&subdev->mutex); mutex_unlock(&chan->fifo->base.mutex);
if (ret && suspend) if (ret && suspend)
return ret; return ret;
......
...@@ -65,9 +65,9 @@ int ...@@ -65,9 +65,9 @@ int
gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan) gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
{ {
int ret; int ret;
mutex_lock(&chan->base.fifo->engine.subdev.mutex); mutex_lock(&chan->base.fifo->mutex);
ret = gk104_fifo_gpfifo_kick_locked(chan); ret = gk104_fifo_gpfifo_kick_locked(chan);
mutex_unlock(&chan->base.fifo->engine.subdev.mutex); mutex_unlock(&chan->base.fifo->mutex);
return ret; return ret;
} }
......
...@@ -44,7 +44,7 @@ gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid ...@@ -44,7 +44,7 @@ gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid
int ret; int ret;
/* Block runlist to prevent the channel from being rescheduled. */ /* Block runlist to prevent the channel from being rescheduled. */
mutex_lock(&subdev->mutex); mutex_lock(&chan->fifo->base.mutex);
nvkm_mask(device, 0x002630, BIT(chan->runl), BIT(chan->runl)); nvkm_mask(device, 0x002630, BIT(chan->runl), BIT(chan->runl));
/* Preempt the channel. */ /* Preempt the channel. */
...@@ -58,7 +58,7 @@ gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid ...@@ -58,7 +58,7 @@ gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid
/* Resume runlist. */ /* Resume runlist. */
nvkm_mask(device, 0x002630, BIT(chan->runl), 0); nvkm_mask(device, 0x002630, BIT(chan->runl), 0);
mutex_unlock(&subdev->mutex); mutex_unlock(&chan->fifo->base.mutex);
return ret; return ret;
} }
......
...@@ -51,9 +51,9 @@ nv50_fifo_runlist_update_locked(struct nv50_fifo *fifo) ...@@ -51,9 +51,9 @@ nv50_fifo_runlist_update_locked(struct nv50_fifo *fifo)
void void
nv50_fifo_runlist_update(struct nv50_fifo *fifo) nv50_fifo_runlist_update(struct nv50_fifo *fifo)
{ {
mutex_lock(&fifo->base.engine.subdev.mutex); mutex_lock(&fifo->base.mutex);
nv50_fifo_runlist_update_locked(fifo); nv50_fifo_runlist_update_locked(fifo);
mutex_unlock(&fifo->base.engine.subdev.mutex); mutex_unlock(&fifo->base.mutex);
} }
int int
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment