Commit 40184ece authored by Ben Skeggs's avatar Ben Skeggs Committed by Dave Airlie

drm/nouveau/ce/gv100-: move method buffer to ce ctx

Didn't really know what this buffer was when initially implemented,
but these days we do, so move it somewhere more appropriate.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
Reviewed-by: default avatarDave Airlie <airlied@redhat.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent 097d56cd
......@@ -147,6 +147,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
/* destroy channel object, all children will be killed too */
if (chan->chan) {
nvif_object_dtor(&chan->ce);
nouveau_channel_idle(chan->chan);
nouveau_channel_del(&chan->chan);
}
......@@ -325,6 +326,31 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
init->nr_subchan = 2;
}
/* Workaround "nvc0" gallium driver using classes it doesn't allocate on
* Kepler and above. NVKM no longer always sets CE_CTX_VALID as part of
* channel init, now we know what that stuff actually is.
*
* Doesn't matter for Kepler/Pascal, CE context stored in NV_RAMIN.
*
* Userspace was fixed prior to adding Ampere support.
*/
switch (device->info.family) {
case NV_DEVICE_INFO_V0_VOLTA:
ret = nvif_object_ctor(&chan->chan->user, "abi16CeWar", 0, VOLTA_DMA_COPY_A,
NULL, 0, &chan->ce);
if (ret)
goto done;
break;
case NV_DEVICE_INFO_V0_TURING:
ret = nvif_object_ctor(&chan->chan->user, "abi16CeWar", 0, TURING_DMA_COPY_A,
NULL, 0, &chan->ce);
if (ret)
goto done;
break;
default:
break;
}
/* Named memory object area */
ret = nouveau_gem_new(cli, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
0, 0, &chan->ntfy);
......
......@@ -21,6 +21,7 @@ struct nouveau_abi16_ntfy {
struct nouveau_abi16_chan {
struct list_head head;
struct nouveau_channel *chan;
struct nvif_object ce;
struct list_head notifiers;
struct nouveau_bo *ntfy;
struct nouveau_vma *ntfy_vma;
......
......@@ -21,11 +21,35 @@
*/
#include "priv.h"
#include <core/gpuobj.h>
#include <core/object.h>
#include <nvif/class.h>
static int
gv100_ce_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent, int align,
struct nvkm_gpuobj **pgpuobj)
{
struct nvkm_device *device = object->engine->subdev.device;
u32 size;
/* Allocate fault method buffer (magics come from nvgpu). */
size = nvkm_rd32(device, 0x104028); /* NV_PCE_PCE_MAP */
size = 27 * 5 * (((9 + 1 + 3) * hweight32(size)) + 2);
size = roundup(size, PAGE_SIZE);
return nvkm_gpuobj_new(device, size, align, true, parent, pgpuobj);
}
const struct nvkm_object_func
gv100_ce_cclass = {
.bind = gv100_ce_cclass_bind,
};
static const struct nvkm_engine_func
gv100_ce = {
.intr = gp100_ce_intr,
.cclass = &gv100_ce_cclass,
.sclass = {
{ -1, -1, VOLTA_DMA_COPY_A },
{}
......
......@@ -6,4 +6,6 @@
void gt215_ce_intr(struct nvkm_falcon *, struct nvkm_fifo_chan *);
void gk104_ce_intr(struct nvkm_engine *);
void gp100_ce_intr(struct nvkm_engine *);
extern const struct nvkm_object_func gv100_ce_cclass;
#endif
......@@ -26,6 +26,7 @@
static const struct nvkm_engine_func
tu102_ce = {
.intr = gp100_ce_intr,
.cclass = &gv100_ce_cclass,
.sclass = {
{ -1, -1, TURING_DMA_COPY_A },
{}
......
......@@ -14,8 +14,6 @@ struct gk104_fifo_chan {
struct list_head head;
bool killed;
struct nvkm_memory *mthd;
#define GK104_FIFO_ENGN_SW 15
struct gk104_fifo_engn {
struct nvkm_gpuobj *inst;
......
......@@ -175,13 +175,19 @@ gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *base,
struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
int ret;
if (!gk104_fifo_gpfifo_engine_addr(engine))
return 0;
if (!gk104_fifo_gpfifo_engine_addr(engine)) {
if (engine->subdev.type != NVKM_ENGINE_CE ||
engine->subdev.device->card_type < GV100)
return 0;
}
ret = nvkm_object_bind(object, NULL, 0, &engn->inst);
if (ret)
return ret;
if (!gk104_fifo_gpfifo_engine_addr(engine))
return 0;
ret = nvkm_vmm_get(chan->base.vmm, 12, engn->inst->size, &engn->vma);
if (ret)
return ret;
......@@ -231,7 +237,6 @@ void *
gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
nvkm_memory_unref(&chan->mthd);
kfree(chan->cgrp);
return chan;
}
......
......@@ -70,8 +70,17 @@ gv100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
struct nvkm_gpuobj *inst = chan->base.inst;
int ret;
if (engine->subdev.type == NVKM_ENGINE_CE)
return gk104_fifo_gpfifo_kick(chan);
if (engine->subdev.type == NVKM_ENGINE_CE) {
ret = gv100_fifo_gpfifo_engine_valid(chan, true, false);
if (ret && suspend)
return ret;
nvkm_kmap(inst);
nvkm_wo32(chan->base.inst, 0x220, 0x00000000);
nvkm_wo32(chan->base.inst, 0x224, 0x00000000);
nvkm_done(inst);
return ret;
}
ret = gv100_fifo_gpfifo_engine_valid(chan, false, false);
if (ret && suspend)
......@@ -92,8 +101,16 @@ gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
struct nvkm_gpuobj *inst = chan->base.inst;
if (engine->subdev.type == NVKM_ENGINE_CE)
return 0;
if (engine->subdev.type == NVKM_ENGINE_CE) {
const u64 bar2 = nvkm_memory_bar2(engn->inst->memory);
nvkm_kmap(inst);
nvkm_wo32(chan->base.inst, 0x220, lower_32_bits(bar2));
nvkm_wo32(chan->base.inst, 0x224, upper_32_bits(bar2));
nvkm_done(inst);
return gv100_fifo_gpfifo_engine_valid(chan, true, true);
}
nvkm_kmap(inst);
nvkm_wo32(inst, 0x210, lower_32_bits(engn->vma->addr) | 0x00000004);
......@@ -123,11 +140,9 @@ gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
u32 *token, const struct nvkm_oclass *oclass,
struct nvkm_object **pobject)
{
struct nvkm_device *device = fifo->base.engine.subdev.device;
struct gk104_fifo_chan *chan;
int runlist = ffs(*runlists) -1, ret, i;
u64 usermem, mthd;
u32 size;
u64 usermem;
if (!vmm || runlist < 0 || runlist >= fifo->runlist_nr)
return -EINVAL;
......@@ -173,20 +188,6 @@ gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
nvkm_done(fifo->user.mem);
usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
/* Allocate fault method buffer (magics come from nvgpu). */
size = nvkm_rd32(device, 0x104028); /* NV_PCE_PCE_MAP */
size = 27 * 5 * (((9 + 1 + 3) * hweight32(size)) + 2);
size = roundup(size, PAGE_SIZE);
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000, true,
&chan->mthd);
if (ret)
return ret;
mthd = nvkm_memory_bar2(chan->mthd);
if (mthd == ~0ULL)
return -EFAULT;
/* RAMFC */
nvkm_kmap(chan->base.inst);
nvkm_wo32(chan->base.inst, 0x008, lower_32_bits(usermem));
......@@ -203,10 +204,8 @@ gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
nvkm_wo32(chan->base.inst, 0x0f4, 0x00001000);
nvkm_wo32(chan->base.inst, 0x0f8, 0x10003080);
nvkm_mo32(chan->base.inst, 0x218, 0x00000000, 0x00000000);
nvkm_wo32(chan->base.inst, 0x220, lower_32_bits(mthd));
nvkm_wo32(chan->base.inst, 0x224, upper_32_bits(mthd));
nvkm_done(chan->base.inst);
return gv100_fifo_gpfifo_engine_valid(chan, true, true);
return 0;
}
int
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment