Commit 5b17f362 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/mmu/nv04: implement vmm on top of new base

Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 806a7335
......@@ -14,7 +14,8 @@
#define NVIF_CLASS_SW_NV50 /* if0005.h */ -0x00000006
#define NVIF_CLASS_SW_GF100 /* if0005.h */ -0x00000007
#define NVIF_CLASS_VMM /* if000c.h */ 0x0000000c
#define NVIF_CLASS_VMM /* if000c.h */ 0x8000000c
#define NVIF_CLASS_VMM_NV04 /* if000d.h */ 0x8000000d
/* the below match nvidia-assigned (either in hw, or sw) class numbers */
#define NV_NULL_CLASS 0x00000030
......
#ifndef __NVIF_IF000D_H__
#define __NVIF_IF000D_H__
#include "if000c.h"
struct nv04_vmm_vn {
/* nvif_vmm_vX ... */
};
#endif
......@@ -26,7 +26,7 @@
#include <core/gpuobj.h>
#include <subdev/fb.h>
#include <subdev/mmu/nv04.h>
#include <subdev/mmu/vmm.h>
#include <nvif/class.h>
......@@ -49,7 +49,8 @@ nv04_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
int ret;
if (dmaobj->clone) {
struct nvkm_memory *pgt = device->mmu->vmm->pgt[0].mem[0];
struct nvkm_memory *pgt =
device->mmu->vmm->pd->pt[0]->memory;
if (!dmaobj->base.start)
return nvkm_gpuobj_wrap(pgt, pgpuobj);
nvkm_kmap(pgt);
......
......@@ -13,3 +13,4 @@ nvkm-y += nvkm/subdev/mmu/gp100.o
nvkm-y += nvkm/subdev/mmu/gp10b.o
nvkm-y += nvkm/subdev/mmu/vmm.o
nvkm-y += nvkm/subdev/mmu/vmmnv04.o
......@@ -22,8 +22,9 @@
* Authors: Ben Skeggs
*/
#include "nv04.h"
#include "vmm.h"
#include <core/gpuobj.h>
#include <nvif/class.h>
#define NV04_PDMA_SIZE (128 * 1024 * 1024)
#define NV04_PDMA_PAGE ( 4 * 1024)
......@@ -73,30 +74,10 @@ nv04_vm_flush(struct nvkm_vm *vm)
******************************************************************************/
static int
nv04_mmu_oneinit(struct nvkm_mmu *base)
nv04_mmu_oneinit(struct nvkm_mmu *mmu)
{
struct nv04_mmu *mmu = nv04_mmu(base);
struct nvkm_device *device = mmu->base.subdev.device;
struct nvkm_memory *dma;
int ret;
ret = nvkm_vm_create(&mmu->base, 0, NV04_PDMA_SIZE, 0, 4096, NULL,
&mmu->base.vmm);
if (ret)
return ret;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
(NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 + 8,
16, true, &dma);
mmu->base.vmm->pgt[0].mem[0] = dma;
mmu->base.vmm->pgt[0].refcount[0] = 1;
if (ret)
return ret;
nvkm_kmap(dma);
nvkm_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
nvkm_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1);
nvkm_done(dma);
mmu->vmm->pgt[0].mem[0] = mmu->vmm->pd->pt[0]->memory;
mmu->vmm->pgt[0].refcount[0] = 1;
return 0;
}
......@@ -129,7 +110,6 @@ nv04_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
const struct nvkm_mmu_func
nv04_mmu = {
.oneinit = nv04_mmu_oneinit,
.dtor = nv04_mmu_dtor,
.limit = NV04_PDMA_SIZE,
.dma_bits = 32,
.pgt_bits = 32 - 12,
......@@ -138,10 +118,11 @@ nv04_mmu = {
.map_sg = nv04_vm_map_sg,
.unmap = nv04_vm_unmap,
.flush = nv04_vm_flush,
.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv04_vmm_new, true },
};
int
nv04_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
{
return nv04_mmu_new_(&nv04_mmu, device, index, pmmu);
return nvkm_mmu_new_(&nv04_mmu, device, index, pmmu);
}
......@@ -34,7 +34,7 @@ struct nvkm_mmu_func {
void (*flush)(struct nvkm_vm *);
struct {
struct nvkm_sclass base;
struct nvkm_sclass user;
int (*ctor)(struct nvkm_mmu *, u64 addr, u64 size,
void *argv, u32 argc, struct lock_class_key *,
const char *name, struct nvkm_vmm **);
......@@ -45,6 +45,8 @@ struct nvkm_mmu_func {
int nvkm_vm_create(struct nvkm_mmu *, u64, u64, u64, u32,
struct lock_class_key *, struct nvkm_vm **);
extern const struct nvkm_mmu_func nv04_mmu;
int nv50_vm_create(struct nvkm_mmu *, u64, u64, u64, struct lock_class_key *,
struct nvkm_vm **);
void nv50_vm_map_pgt(struct nvkm_gpuobj *, u32, struct nvkm_memory **);
......
......@@ -102,10 +102,9 @@ int nvkm_vmm_ctor(const struct nvkm_vmm_func *, struct nvkm_mmu *,
const char *name, struct nvkm_vmm *);
void nvkm_vmm_dtor(struct nvkm_vmm *);
struct nvkm_vmm_user {
struct nvkm_sclass base;
int (*ctor)(struct nvkm_mmu *, u64 addr, u64 size, void *args, u32 argc,
struct lock_class_key *, const char *name,
struct nvkm_vmm **);
};
int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32,
u64, u64, void *, u32, struct lock_class_key *,
const char *, struct nvkm_vmm **);
int nv04_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct nvkm_vmm **);
#endif
/*
* Copyright 2017 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "vmm.h"
#include <nvif/if000d.h>
#include <nvif/unpack.h>
static const struct nvkm_vmm_desc_func
nv04_vmm_desc_pgt = {
};
static const struct nvkm_vmm_desc
nv04_vmm_desc_12[] = {
{ PGT, 15, 4, 0x1000, &nv04_vmm_desc_pgt },
{}
};
static const struct nvkm_vmm_func
nv04_vmm = {
.page = {
{ 12, &nv04_vmm_desc_12[0], NVKM_VMM_PAGE_HOST },
{}
}
};
int
nv04_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
u32 pd_header, u64 addr, u64 size, void *argv, u32 argc,
struct lock_class_key *key, const char *name,
struct nvkm_vmm **pvmm)
{
union {
struct nv04_vmm_vn vn;
} *args = argv;
int ret;
ret = nvkm_vmm_new_(func, mmu, pd_header, addr, size, key, name, pvmm);
if (ret)
return ret;
return nvif_unvers(-ENOSYS, &argv, &argc, args->vn);
}
int
nv04_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
struct lock_class_key *key, const char *name,
struct nvkm_vmm **pvmm)
{
struct nvkm_vmm *vmm;
struct nvkm_memory *mem;
int ret;
ret = nv04_vmm_new_(&nv04_vmm, mmu, 8, addr, size,
argv, argc, key, name, &vmm);
*pvmm = vmm;
if (ret)
return ret;
mem = vmm->pd->pt[0]->memory;
nvkm_kmap(mem);
nvkm_wo32(mem, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
nvkm_wo32(mem, 0x00004, vmm->limit - 1);
nvkm_done(mem);
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment