Commit d30af7ce authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/mmu: handle instance block setup

We previously required each VMM user to allocate their own page directory
and fill in the instance block themselves.

It makes more sense to handle this in a common location.
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent af3b8d53
......@@ -78,7 +78,7 @@ struct nvkm_falcon_func {
void (*load_imem)(struct nvkm_falcon *, void *, u32, u32, u16, u8, bool);
void (*load_dmem)(struct nvkm_falcon *, void *, u32, u32, u8);
void (*read_dmem)(struct nvkm_falcon *, u32, u32, u8, void *);
void (*bind_context)(struct nvkm_falcon *, struct nvkm_gpuobj *);
void (*bind_context)(struct nvkm_falcon *, struct nvkm_memory *);
int (*wait_for_halt)(struct nvkm_falcon *, u32);
int (*clear_interrupt)(struct nvkm_falcon *, u32);
void (*set_start_addr)(struct nvkm_falcon *, u32 start_addr);
......@@ -113,7 +113,7 @@ void nvkm_falcon_load_imem(struct nvkm_falcon *, void *, u32, u32, u16, u8,
bool);
void nvkm_falcon_load_dmem(struct nvkm_falcon *, void *, u32, u32, u8);
void nvkm_falcon_read_dmem(struct nvkm_falcon *, u32, u32, u8, void *);
void nvkm_falcon_bind_context(struct nvkm_falcon *, struct nvkm_gpuobj *);
void nvkm_falcon_bind_context(struct nvkm_falcon *, struct nvkm_memory *);
void nvkm_falcon_set_start_addr(struct nvkm_falcon *, u32);
void nvkm_falcon_start(struct nvkm_falcon *);
int nvkm_falcon_wait_for_halt(struct nvkm_falcon *, u32);
......
......@@ -10,11 +10,6 @@ struct nvkm_vm_pgt {
u32 refcount[2];
};
struct nvkm_vm_pgd {
struct list_head head;
struct nvkm_gpuobj *obj;
};
struct nvkm_vma {
struct nvkm_vm *vm;
struct nvkm_mm_node *node;
......@@ -40,7 +35,6 @@ struct nvkm_vm {
struct nvkm_mm mm;
struct kref refcount;
struct list_head pgd_list;
struct nvkm_vm_pgt *pgt;
u32 fpde;
u32 lpde;
......@@ -54,7 +48,7 @@ struct nvkm_vm {
int nvkm_vm_new(struct nvkm_device *, u64 offset, u64 length, u64 mm_offset,
struct lock_class_key *, struct nvkm_vm **);
int nvkm_vm_ref(struct nvkm_vm *, struct nvkm_vm **, struct nvkm_gpuobj *pgd);
int nvkm_vm_ref(struct nvkm_vm *, struct nvkm_vm **, struct nvkm_memory *inst);
int nvkm_vm_boot(struct nvkm_vm *, u64 size);
int nvkm_vm_get(struct nvkm_vm *, u64 size, u32 page_shift, u32 access,
struct nvkm_vma *);
......
......@@ -281,5 +281,5 @@ g84_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push,
if (ret)
return ret;
return nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
return nvkm_vm_ref(chan->base.vm, &chan->vm, chan->base.inst->memory);
}
......@@ -11,7 +11,6 @@ struct gf100_fifo_chan {
struct list_head head;
bool killed;
struct nvkm_gpuobj *pgd;
struct nvkm_vm *vm;
struct {
......
......@@ -12,7 +12,6 @@ struct gk104_fifo_chan {
struct list_head head;
bool killed;
struct nvkm_gpuobj *pgd;
struct nvkm_vm *vm;
struct {
......
......@@ -206,7 +206,8 @@ void *
nv50_fifo_chan_dtor(struct nvkm_fifo_chan *base)
{
struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
nvkm_vm_ref(NULL, &chan->vm, chan->pgd);
if (chan->base.inst)
nvkm_vm_ref(NULL, &chan->vm, chan->base.inst->memory);
nvkm_ramht_del(&chan->ramht);
nvkm_gpuobj_del(&chan->pgd);
nvkm_gpuobj_del(&chan->eng);
......@@ -266,5 +267,5 @@ nv50_fifo_chan_ctor(struct nv50_fifo *fifo, u64 vm, u64 push,
if (ret)
return ret;
return nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
return nvkm_vm_ref(chan->base.vm, &chan->vm, chan->base.inst->memory);
}
......@@ -200,8 +200,8 @@ static void *
gf100_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
{
struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
nvkm_vm_ref(NULL, &chan->vm, chan->pgd);
nvkm_gpuobj_del(&chan->pgd);
if (chan->base.inst)
nvkm_vm_ref(NULL, &chan->vm, chan->base.inst->memory);
return chan;
}
......@@ -225,7 +225,6 @@ gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
struct fermi_channel_gpfifo_v0 v0;
} *args = data;
struct gf100_fifo *fifo = gf100_fifo(base);
struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_object *parent = oclass->parent;
struct gf100_fifo_chan *chan;
u64 usermem, ioffset, ilength;
......@@ -263,19 +262,7 @@ gf100_fifo_gpfifo_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
args->v0.chid = chan->base.chid;
/* page directory */
ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &chan->pgd);
if (ret)
return ret;
nvkm_kmap(chan->base.inst);
nvkm_wo32(chan->base.inst, 0x0200, lower_32_bits(chan->pgd->addr));
nvkm_wo32(chan->base.inst, 0x0204, upper_32_bits(chan->pgd->addr));
nvkm_wo32(chan->base.inst, 0x0208, 0xffffffff);
nvkm_wo32(chan->base.inst, 0x020c, 0x000000ff);
nvkm_done(chan->base.inst);
ret = nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
ret = nvkm_vm_ref(chan->base.vm, &chan->vm, chan->base.inst->memory);
if (ret)
return ret;
......
......@@ -213,8 +213,8 @@ static void *
gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
nvkm_vm_ref(NULL, &chan->vm, chan->pgd);
nvkm_gpuobj_del(&chan->pgd);
if (chan->base.inst)
nvkm_vm_ref(NULL, &chan->vm, chan->base.inst->memory);
return chan;
}
......@@ -242,7 +242,6 @@ gk104_fifo_gpfifo_new_(const struct gk104_fifo_chan_func *func,
const struct nvkm_oclass *oclass,
struct nvkm_object **pobject)
{
struct nvkm_device *device = fifo->base.engine.subdev.device;
struct gk104_fifo_chan *chan;
int runlist = -1, ret = -ENOSYS, i, j;
u32 engines = 0, present = 0;
......@@ -302,19 +301,7 @@ gk104_fifo_gpfifo_new_(const struct gk104_fifo_chan_func *func,
*chid = chan->base.chid;
/* Page directory. */
ret = nvkm_gpuobj_new(device, 0x10000, 0x1000, false, NULL, &chan->pgd);
if (ret)
return ret;
nvkm_kmap(chan->base.inst);
nvkm_wo32(chan->base.inst, 0x0200, lower_32_bits(chan->pgd->addr));
nvkm_wo32(chan->base.inst, 0x0204, upper_32_bits(chan->pgd->addr));
nvkm_wo32(chan->base.inst, 0x0208, 0xffffffff);
nvkm_wo32(chan->base.inst, 0x020c, 0x000000ff);
nvkm_done(chan->base.inst);
ret = nvkm_vm_ref(chan->base.vm, &chan->vm, chan->pgd);
ret = nvkm_vm_ref(chan->base.vm, &chan->vm, chan->base.inst->memory);
if (ret)
return ret;
......
......@@ -60,7 +60,7 @@ nvkm_falcon_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, u8 port,
}
void
nvkm_falcon_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *inst)
nvkm_falcon_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *inst)
{
if (!falcon->func->bind_context) {
nvkm_error(falcon->user,
......
......@@ -180,7 +180,7 @@ nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size,
}
static void
nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx)
nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx)
{
u32 inst_loc;
u32 fbif;
......@@ -216,7 +216,7 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx)
nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_SYS_NCOH, 0x6);
/* Set context */
switch (nvkm_memory_target(ctx->memory)) {
switch (nvkm_memory_target(ctx)) {
case NVKM_MEM_TARGET_VRAM: inst_loc = 0; break;
case NVKM_MEM_TARGET_HOST: inst_loc = 2; break;
case NVKM_MEM_TARGET_NCOH: inst_loc = 3; break;
......@@ -228,7 +228,7 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx)
/* Enable context */
nvkm_falcon_mask(falcon, 0x048, 0x1, 0x1);
nvkm_falcon_wr32(falcon, 0x054,
((ctx->addr >> 12) & 0xfffffff) |
((nvkm_memory_addr(ctx) >> 12) & 0xfffffff) |
(inst_loc << 28) | (1 << 30));
nvkm_falcon_mask(falcon, 0x090, 0x10000, 0x10000);
......
......@@ -23,7 +23,7 @@
*/
#include "gf100.h"
#include <core/gpuobj.h>
#include <core/memory.h>
#include <core/option.h>
#include <subdev/fb.h>
#include <subdev/mmu.h>
......@@ -53,7 +53,7 @@ gf100_bar_bar1_init(struct nvkm_bar *base)
{
struct nvkm_device *device = base->subdev.device;
struct gf100_bar *bar = gf100_bar(base);
const u32 addr = nvkm_memory_addr(bar->bar[1].mem) >> 12;
const u32 addr = nvkm_memory_addr(bar->bar[1].inst) >> 12;
nvkm_wr32(device, 0x001704, 0x80000000 | addr);
}
......@@ -74,7 +74,7 @@ gf100_bar_bar2_init(struct nvkm_bar *base)
{
struct nvkm_device *device = base->subdev.device;
struct gf100_bar *bar = gf100_bar(base);
u32 addr = nvkm_memory_addr(bar->bar[0].mem) >> 12;
u32 addr = nvkm_memory_addr(bar->bar[0].inst) >> 12;
if (bar->bar2_halve)
addr |= 0x40000000;
nvkm_wr32(device, 0x001714, 0x80000000 | addr);
......@@ -90,11 +90,7 @@ gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm,
int ret;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, false,
&bar_vm->mem);
if (ret)
return ret;
ret = nvkm_gpuobj_new(device, 0x8000, 0, false, NULL, &bar_vm->pgd);
&bar_vm->inst);
if (ret)
return ret;
......@@ -119,17 +115,11 @@ gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm,
}
}
ret = nvkm_vm_ref(vm, &bar_vm->vm, bar_vm->pgd);
ret = nvkm_vm_ref(vm, &bar_vm->vm, bar_vm->inst);
nvkm_vm_ref(NULL, &vm, NULL);
if (ret)
return ret;
nvkm_kmap(bar_vm->mem);
nvkm_wo32(bar_vm->mem, 0x0200, lower_32_bits(bar_vm->pgd->addr));
nvkm_wo32(bar_vm->mem, 0x0204, upper_32_bits(bar_vm->pgd->addr));
nvkm_wo32(bar_vm->mem, 0x0208, lower_32_bits(bar_len - 1));
nvkm_wo32(bar_vm->mem, 0x020c, upper_32_bits(bar_len - 1));
nvkm_done(bar_vm->mem);
return 0;
}
......@@ -164,13 +154,11 @@ gf100_bar_dtor(struct nvkm_bar *base)
{
struct gf100_bar *bar = gf100_bar(base);
nvkm_vm_ref(NULL, &bar->bar[1].vm, bar->bar[1].pgd);
nvkm_gpuobj_del(&bar->bar[1].pgd);
nvkm_memory_unref(&bar->bar[1].mem);
nvkm_vm_ref(NULL, &bar->bar[1].vm, bar->bar[1].inst);
nvkm_memory_unref(&bar->bar[1].inst);
nvkm_vm_ref(NULL, &bar->bar[0].vm, bar->bar[0].pgd);
nvkm_gpuobj_del(&bar->bar[0].pgd);
nvkm_memory_unref(&bar->bar[0].mem);
nvkm_vm_ref(NULL, &bar->bar[0].vm, bar->bar[0].inst);
nvkm_memory_unref(&bar->bar[0].inst);
return bar;
}
......
......@@ -4,8 +4,7 @@
#include "priv.h"
struct gf100_barN {
struct nvkm_memory *mem;
struct nvkm_gpuobj *pgd;
struct nvkm_memory *inst;
struct nvkm_vm *vm;
};
......
......@@ -140,7 +140,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
if (ret)
return ret;
ret = nvkm_vm_ref(vm, &bar->bar2_vm, bar->pgd);
ret = nvkm_vm_ref(vm, &bar->bar2_vm, bar->mem->memory);
nvkm_vm_ref(NULL, &vm, NULL);
if (ret)
return ret;
......@@ -172,7 +172,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
atomic_inc(&vm->engref[NVKM_SUBDEV_BAR]);
ret = nvkm_vm_ref(vm, &bar->bar1_vm, bar->pgd);
ret = nvkm_vm_ref(vm, &bar->bar1_vm, bar->mem->memory);
nvkm_vm_ref(NULL, &vm, NULL);
if (ret)
return ret;
......@@ -197,13 +197,15 @@ void *
nv50_bar_dtor(struct nvkm_bar *base)
{
struct nv50_bar *bar = nv50_bar(base);
if (bar->mem) {
nvkm_gpuobj_del(&bar->bar1);
nvkm_vm_ref(NULL, &bar->bar1_vm, bar->pgd);
nvkm_vm_ref(NULL, &bar->bar1_vm, bar->mem->memory);
nvkm_gpuobj_del(&bar->bar2);
nvkm_vm_ref(NULL, &bar->bar2_vm, bar->pgd);
nvkm_vm_ref(NULL, &bar->bar2_vm, bar->mem->memory);
nvkm_gpuobj_del(&bar->pgd);
nvkm_gpuobj_del(&bar->pad);
nvkm_gpuobj_del(&bar->mem);
}
return bar;
}
......
......@@ -446,7 +446,6 @@ static void
nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
{
struct nvkm_mmu *mmu = vm->mmu;
struct nvkm_vm_pgd *vpgd;
struct nvkm_vm_pgt *vpgt;
struct nvkm_memory *pgt;
u32 pde;
......@@ -459,9 +458,8 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
pgt = vpgt->mem[big];
vpgt->mem[big] = NULL;
list_for_each_entry(vpgd, &vm->pgd_list, head) {
mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
}
if (mmu->func->map_pgt)
mmu->func->map_pgt(vm, pde, vpgt->mem);
mmu->func->flush(vm);
......@@ -474,7 +472,6 @@ nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type)
{
struct nvkm_mmu *mmu = vm->mmu;
struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
struct nvkm_vm_pgd *vpgd;
int big = (type != mmu->func->spg_shift);
u32 pgt_size;
int ret;
......@@ -487,9 +484,8 @@ nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type)
if (unlikely(ret))
return ret;
list_for_each_entry(vpgd, &vm->pgd_list, head) {
mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
}
if (mmu->func->map_pgt)
mmu->func->map_pgt(vm, pde, vpgt->mem);
vpgt->refcount[big]++;
return 0;
......@@ -592,7 +588,6 @@ nvkm_vm_legacy(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
u64 mm_length = (offset + length) - mm_offset;
int ret;
INIT_LIST_HEAD(&vm->pgd_list);
kref_init(&vm->refcount);
vm->fpde = offset >> (mmu->func->pgt_bits + 12);
vm->lpde = (offset + length - 1) >> (mmu->func->pgt_bits + 12);
......@@ -644,58 +639,10 @@ nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset,
return -EINVAL;
}
static int
nvkm_vm_link(struct nvkm_vm *vm, struct nvkm_gpuobj *pgd)
{
struct nvkm_mmu *mmu = vm->mmu;
struct nvkm_vm_pgd *vpgd;
int i;
if (!pgd)
return 0;
vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
if (!vpgd)
return -ENOMEM;
vpgd->obj = pgd;
mutex_lock(&vm->mutex);
for (i = vm->fpde; i <= vm->lpde; i++)
mmu->func->map_pgt(pgd, i, vm->pgt[i - vm->fpde].mem);
list_add(&vpgd->head, &vm->pgd_list);
mutex_unlock(&vm->mutex);
return 0;
}
static void
nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd)
{
struct nvkm_vm_pgd *vpgd, *tmp;
if (!mpgd)
return;
mutex_lock(&vm->mutex);
list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
if (vpgd->obj == mpgd) {
list_del(&vpgd->head);
kfree(vpgd);
break;
}
}
mutex_unlock(&vm->mutex);
}
static void
nvkm_vm_del(struct kref *kref)
{
struct nvkm_vm *vm = container_of(kref, typeof(*vm), refcount);
struct nvkm_vm_pgd *vpgd, *tmp;
list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
nvkm_vm_unlink(vm, vpgd->obj);
}
nvkm_mm_fini(&vm->mm);
vfree(vm->pgt);
......@@ -705,20 +652,28 @@ nvkm_vm_del(struct kref *kref)
}
int
nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_gpuobj *pgd)
nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_memory *inst)
{
if (ref) {
int ret = nvkm_vm_link(ref, pgd);
if (ref->func->join && inst) {
int ret = ref->func->join(ref, inst), i;
if (ret)
return ret;
if (ref->mmu->func->map_pgt) {
for (i = ref->fpde; i <= ref->lpde; i++)
ref->mmu->func->map_pgt(ref, i, ref->pgt[i - ref->fpde].mem);
}
}
kref_get(&ref->refcount);
}
if (*ptr) {
if ((*ptr)->bootstrapped && pgd)
if ((*ptr)->func->part && inst)
(*ptr)->func->part(*ptr, inst);
if ((*ptr)->bootstrapped && inst)
nvkm_memory_unref(&(*ptr)->pgt[0].mem[0]);
nvkm_vm_unlink(*ptr, pgd);
kref_put(&(*ptr)->refcount, nvkm_vm_del);
}
......
......@@ -70,8 +70,9 @@ const u8 gf100_pte_storage_type_map[256] =
void
gf100_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 index, struct nvkm_memory *pgt[2])
gf100_vm_map_pgt(struct nvkm_vmm *vmm, u32 index, struct nvkm_memory *pgt[2])
{
struct nvkm_memory *pgd = vmm->pd->pt[0]->memory;
u32 pde[2] = { 0, 0 };
if (pgt[0])
......@@ -161,7 +162,6 @@ gf100_vm_flush(struct nvkm_vm *vm)
{
struct nvkm_mmu *mmu = vm->mmu;
struct nvkm_device *device = mmu->subdev.device;
struct nvkm_vm_pgd *vpgd;
u32 type;
type = 0x00000001; /* PAGE_ALL */
......@@ -169,7 +169,6 @@ gf100_vm_flush(struct nvkm_vm *vm)
type |= 0x00000004; /* HUB_ONLY */
mutex_lock(&mmu->subdev.mutex);
list_for_each_entry(vpgd, &vm->pgd_list, head) {
/* looks like maybe a "free flush slots" counter, the
* faster you write to 0x100cbc to more it decreases
*/
......@@ -178,7 +177,7 @@ gf100_vm_flush(struct nvkm_vm *vm)
break;
);
nvkm_wr32(device, 0x100cb8, vpgd->obj->addr >> 8);
nvkm_wr32(device, 0x100cb8, vm->pd->pt[0]->addr >> 8);
nvkm_wr32(device, 0x100cbc, 0x80000000 | type);
/* wait for flush to be queued? */
......@@ -186,7 +185,6 @@ gf100_vm_flush(struct nvkm_vm *vm)
if (nvkm_rd32(device, 0x100c80) & 0x00008000)
break;
);
}
mutex_unlock(&mmu->subdev.mutex);
}
......
......@@ -31,8 +31,10 @@
#include <nvif/class.h>
void
nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_memory *pgt[2])
nv50_vm_map_pgt(struct nvkm_vmm *vmm, u32 pde, struct nvkm_memory *pgt[2])
{
struct nvkm_vmm_join *join;
u32 pdeo = vmm->mmu->func->vmm.pd_offset + (pde * 8);
u64 phys = 0xdeadcafe00000000ULL;
u32 coverage = 0;
......@@ -56,10 +58,12 @@ nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_memory *pgt[2])
phys |= 0x20;
}
nvkm_kmap(pgd);
nvkm_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
nvkm_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
nvkm_done(pgd);
list_for_each_entry(join, &vmm->join, head) {
nvkm_kmap(join->inst);
nvkm_wo32(join->inst, pdeo + 0, lower_32_bits(phys));
nvkm_wo32(join->inst, pdeo + 4, upper_32_bits(phys));
nvkm_done(join->inst);
}
}
static inline u64
......
......@@ -18,7 +18,7 @@ struct nvkm_mmu_func {
u8 spg_shift;
u8 lpg_shift;
void (*map_pgt)(struct nvkm_gpuobj *pgd, u32 pde,
void (*map_pgt)(struct nvkm_vmm *, u32 pde,
struct nvkm_memory *pgt[2]);
void (*map)(struct nvkm_vma *, struct nvkm_memory *,
struct nvkm_mem *, u32 pte, u32 cnt,
......@@ -41,7 +41,7 @@ struct nvkm_mmu_func {
extern const struct nvkm_mmu_func nv04_mmu;
void nv50_vm_map_pgt(struct nvkm_gpuobj *, u32, struct nvkm_memory **);
void nv50_vm_map_pgt(struct nvkm_vmm *, u32, struct nvkm_memory **);
void nv50_vm_map(struct nvkm_vma *, struct nvkm_memory *, struct nvkm_mem *,
u32, u32, u64, u64);
void nv50_vm_map_sg(struct nvkm_vma *, struct nvkm_memory *, struct nvkm_mem *,
......@@ -49,7 +49,7 @@ void nv50_vm_map_sg(struct nvkm_vma *, struct nvkm_memory *, struct nvkm_mem *,
void nv50_vm_unmap(struct nvkm_vma *, struct nvkm_memory *, u32, u32);
void nv50_vm_flush(struct nvkm_vm *);
void gf100_vm_map_pgt(struct nvkm_gpuobj *, u32, struct nvkm_memory **);
void gf100_vm_map_pgt(struct nvkm_vmm *, u32, struct nvkm_memory **);
void gf100_vm_map(struct nvkm_vma *, struct nvkm_memory *, struct nvkm_mem *,
u32, u32, u64, u64);
void gf100_vm_map_sg(struct nvkm_vma *, struct nvkm_memory *, struct nvkm_mem *,
......
......@@ -131,7 +131,7 @@ nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
/* ... and the GPU storage for it, except on Tesla-class GPUs that
* have the PD embedded in the instance structure.
*/
if (desc->size && mmu->func->vmm.global) {
if (desc->size) {
const u32 size = pd_header + desc->size * (1 << desc->bits);
vmm->pd->pt[0] = nvkm_mmu_ptc_get(mmu, size, desc->align, true);
if (!vmm->pd->pt[0])
......
......@@ -112,11 +112,8 @@ gm200_secboot_oneinit(struct nvkm_secboot *sb)
int ret;
/* Allocate instance block and VM */
ret = nvkm_gpuobj_new(device, 0x1000, 0, true, NULL, &gsb->inst);
if (ret)
return ret;
ret = nvkm_gpuobj_new(device, 0x8000, 0, true, NULL, &gsb->pgd);
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0, true,
&gsb->inst);
if (ret)
return ret;
......@@ -126,18 +123,11 @@ gm200_secboot_oneinit(struct nvkm_secboot *sb)
atomic_inc(&vm->engref[NVKM_SUBDEV_PMU]);
ret = nvkm_vm_ref(vm, &gsb->vm, gsb->pgd);
ret = nvkm_vm_ref(vm, &gsb->vm, gsb->inst);
nvkm_vm_ref(NULL, &vm, NULL);
if (ret)
return ret;
nvkm_kmap(gsb->inst);
nvkm_wo32(gsb->inst, 0x200, lower_32_bits(gsb->pgd->addr));
nvkm_wo32(gsb->inst, 0x204, upper_32_bits(gsb->pgd->addr));
nvkm_wo32(gsb->inst, 0x208, lower_32_bits(vm_area_len - 1));
nvkm_wo32(gsb->inst, 0x20c, upper_32_bits(vm_area_len - 1));
nvkm_done(gsb->inst);
if (sb->acr->func->oneinit) {
ret = sb->acr->func->oneinit(sb->acr, sb);
if (ret)
......@@ -165,9 +155,8 @@ gm200_secboot_dtor(struct nvkm_secboot *sb)
sb->acr->func->dtor(sb->acr);
nvkm_vm_ref(NULL, &gsb->vm, gsb->pgd);
nvkm_gpuobj_del(&gsb->pgd);
nvkm_gpuobj_del(&gsb->inst);
nvkm_vm_ref(NULL, &gsb->vm, gsb->inst);
nvkm_memory_unref(&gsb->inst);
return gsb;
}
......
......@@ -29,8 +29,7 @@ struct gm200_secboot {
struct nvkm_secboot base;
/* Instance block & address space used for HS FW execution */
struct nvkm_gpuobj *inst;
struct nvkm_gpuobj *pgd;
struct nvkm_memory *inst;
struct nvkm_vm *vm;
};
#define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment