Commit a98a3c52 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/fifo/gv100: allocate method buffer

The GPU saves off some stuff to the address specified in this part of RAMFC
when the channel faults, so we should probably point it at a valid address.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent f7cc47e4
...@@ -14,6 +14,8 @@ struct gk104_fifo_chan { ...@@ -14,6 +14,8 @@ struct gk104_fifo_chan {
struct list_head head; struct list_head head;
bool killed; bool killed;
struct nvkm_memory *mthd;
struct { struct {
struct nvkm_gpuobj *inst; struct nvkm_gpuobj *inst;
struct nvkm_vma *vma; struct nvkm_vma *vma;
......
...@@ -222,6 +222,7 @@ void * ...@@ -222,6 +222,7 @@ void *
gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base) gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
{ {
struct gk104_fifo_chan *chan = gk104_fifo_chan(base); struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
nvkm_memory_unref(&chan->mthd);
kfree(chan->cgrp); kfree(chan->cgrp);
return chan; return chan;
} }
......
...@@ -118,11 +118,13 @@ gv100_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid, ...@@ -118,11 +118,13 @@ gv100_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
const struct nvkm_oclass *oclass, const struct nvkm_oclass *oclass,
struct nvkm_object **pobject) struct nvkm_object **pobject)
{ {
struct nvkm_device *device = fifo->base.engine.subdev.device;
struct gk104_fifo_chan *chan; struct gk104_fifo_chan *chan;
int runlist = ffs(*runlists) -1, ret, i; int runlist = ffs(*runlists) -1, ret, i;
unsigned long engm; unsigned long engm;
u64 subdevs = 0; u64 subdevs = 0;
u64 usermem; u64 usermem, mthd;
u32 size;
if (!vmm || runlist < 0 || runlist >= fifo->runlist_nr) if (!vmm || runlist < 0 || runlist >= fifo->runlist_nr)
return -EINVAL; return -EINVAL;
...@@ -174,6 +176,20 @@ gv100_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid, ...@@ -174,6 +176,20 @@ gv100_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
nvkm_done(fifo->user.mem); nvkm_done(fifo->user.mem);
usermem = nvkm_memory_addr(fifo->user.mem) + usermem; usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
/* Allocate fault method buffer (magics come from nvgpu). */
size = nvkm_rd32(device, 0x104028); /* NV_PCE_PCE_MAP */
size = 27 * 5 * (((9 + 1 + 3) * hweight32(size)) + 2);
size = roundup(size, PAGE_SIZE);
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000, true,
&chan->mthd);
if (ret)
return ret;
mthd = nvkm_memory_bar2(chan->mthd);
if (mthd == ~0ULL)
return -EFAULT;
/* RAMFC */ /* RAMFC */
nvkm_kmap(chan->base.inst); nvkm_kmap(chan->base.inst);
nvkm_wo32(chan->base.inst, 0x008, lower_32_bits(usermem)); nvkm_wo32(chan->base.inst, 0x008, lower_32_bits(usermem));
...@@ -190,8 +206,8 @@ gv100_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid, ...@@ -190,8 +206,8 @@ gv100_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
nvkm_wo32(chan->base.inst, 0x0f4, 0x00001000); nvkm_wo32(chan->base.inst, 0x0f4, 0x00001000);
nvkm_wo32(chan->base.inst, 0x0f8, 0x10003080); nvkm_wo32(chan->base.inst, 0x0f8, 0x10003080);
nvkm_mo32(chan->base.inst, 0x218, 0x00000000, 0x00000000); nvkm_mo32(chan->base.inst, 0x218, 0x00000000, 0x00000000);
nvkm_wo32(chan->base.inst, 0x220, 0x020a1000); nvkm_wo32(chan->base.inst, 0x220, lower_32_bits(mthd));
nvkm_wo32(chan->base.inst, 0x224, 0x00000000); nvkm_wo32(chan->base.inst, 0x224, upper_32_bits(mthd));
nvkm_done(chan->base.inst); nvkm_done(chan->base.inst);
return gv100_fifo_gpfifo_engine_valid(chan, true, true); return gv100_fifo_gpfifo_engine_valid(chan, true, true);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment