Commit dd12d158 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/mmu/nv04: implement new vmm backend

Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent eb813999
...@@ -5,4 +5,8 @@ ...@@ -5,4 +5,8 @@
struct nv04_vmm_vn { struct nv04_vmm_vn {
/* nvif_vmm_vX ... */ /* nvif_vmm_vX ... */
}; };
struct nv04_vmm_map_vn {
/* nvif_vmm_map_vX ... */
};
#endif #endif
...@@ -26,71 +26,14 @@ ...@@ -26,71 +26,14 @@
#include <nvif/class.h> #include <nvif/class.h>
#define NV04_PDMA_SIZE (128 * 1024 * 1024) #define NV04_PDMA_SIZE (128 * 1024 * 1024)
#define NV04_PDMA_PAGE ( 4 * 1024)
/*******************************************************************************
* VM map/unmap callbacks
******************************************************************************/
static void
nv04_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
pte = 0x00008 + (pte * 4);
nvkm_kmap(pgt);
while (cnt) {
u32 page = PAGE_SIZE / NV04_PDMA_PAGE;
u32 phys = (u32)*list++;
while (cnt && page--) {
nvkm_wo32(pgt, pte, phys | 3);
phys += NV04_PDMA_PAGE;
pte += 4;
cnt -= 1;
}
}
nvkm_done(pgt);
}
static void
nv04_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
{
pte = 0x00008 + (pte * 4);
nvkm_kmap(pgt);
while (cnt--) {
nvkm_wo32(pgt, pte, 0x00000000);
pte += 4;
}
nvkm_done(pgt);
}
static void
nv04_vm_flush(struct nvkm_vm *vm)
{
}
/*******************************************************************************
* MMU subdev
******************************************************************************/
static int
nv04_mmu_oneinit(struct nvkm_mmu *mmu)
{
mmu->vmm->pgt[0].mem[0] = mmu->vmm->pd->pt[0]->memory;
mmu->vmm->pgt[0].refcount[0] = 1;
return 0;
}
const struct nvkm_mmu_func const struct nvkm_mmu_func
nv04_mmu = { nv04_mmu = {
.oneinit = nv04_mmu_oneinit,
.limit = NV04_PDMA_SIZE, .limit = NV04_PDMA_SIZE,
.dma_bits = 32, .dma_bits = 32,
.pgt_bits = 32 - 12, .pgt_bits = 32 - 12,
.spg_shift = 12, .spg_shift = 12,
.lpg_shift = 12, .lpg_shift = 12,
.map_sg = nv04_vm_map_sg,
.unmap = nv04_vm_unmap,
.flush = nv04_vm_flush,
.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv04_vmm_new, true }, .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv04_vmm_new, true },
}; };
......
...@@ -157,6 +157,7 @@ void nvkm_vmm_ptes_unmap(struct nvkm_vmm *, const struct nvkm_vmm_page *, ...@@ -157,6 +157,7 @@ void nvkm_vmm_ptes_unmap(struct nvkm_vmm *, const struct nvkm_vmm_page *,
int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32, int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32,
u64, u64, void *, u32, struct lock_class_key *, u64, u64, void *, u32, struct lock_class_key *,
const char *, struct nvkm_vmm **); const char *, struct nvkm_vmm **);
int nv04_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
int gf100_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *, int gf100_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *,
struct nvkm_mmu *, u64, u64, void *, u32, struct nvkm_mmu *, u64, u64, void *, u32,
......
...@@ -24,8 +24,50 @@ ...@@ -24,8 +24,50 @@
#include <nvif/if000d.h> #include <nvif/if000d.h>
#include <nvif/unpack.h> #include <nvif/unpack.h>
static inline void
nv04_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
{
u32 data = addr | 0x00000003; /* PRESENT, RW. */
while (ptes--) {
VMM_WO032(pt, vmm, 8 + ptei++ * 4, data);
data += 0x00001000;
}
}
static void
nv04_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
{
VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv04_vmm_pgt_pte);
}
static void
nv04_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
{
#if PAGE_SHIFT == 12
nvkm_kmap(pt->memory);
while (ptes--)
VMM_WO032(pt, vmm, 8 + (ptei++ * 4), *map->dma++ | 0x00000003);
nvkm_done(pt->memory);
#else
VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv04_vmm_pgt_pte);
#endif
}
static void
nv04_vmm_pgt_unmap(struct nvkm_vmm *vmm,
struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
{
VMM_FO032(pt, vmm, 8 + (ptei * 4), 0, ptes);
}
static const struct nvkm_vmm_desc_func static const struct nvkm_vmm_desc_func
nv04_vmm_desc_pgt = { nv04_vmm_desc_pgt = {
.unmap = nv04_vmm_pgt_unmap,
.dma = nv04_vmm_pgt_dma,
.sgl = nv04_vmm_pgt_sgl,
}; };
static const struct nvkm_vmm_desc static const struct nvkm_vmm_desc
...@@ -34,8 +76,22 @@ nv04_vmm_desc_12[] = { ...@@ -34,8 +76,22 @@ nv04_vmm_desc_12[] = {
{} {}
}; };
int
nv04_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
struct nvkm_vmm_map *map)
{
union {
struct nv04_vmm_map_vn vn;
} *args = argv;
int ret = -ENOSYS;
if ((ret = nvif_unvers(ret, &argv, &argc, args->vn)))
VMM_DEBUG(vmm, "args");
return ret;
}
static const struct nvkm_vmm_func static const struct nvkm_vmm_func
nv04_vmm = { nv04_vmm = {
.valid = nv04_vmm_valid,
.page = { .page = {
{ 12, &nv04_vmm_desc_12[0], NVKM_VMM_PAGE_HOST }, { 12, &nv04_vmm_desc_12[0], NVKM_VMM_PAGE_HOST },
{} {}
...@@ -65,8 +121,8 @@ nv04_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc, ...@@ -65,8 +121,8 @@ nv04_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
struct lock_class_key *key, const char *name, struct lock_class_key *key, const char *name,
struct nvkm_vmm **pvmm) struct nvkm_vmm **pvmm)
{ {
struct nvkm_vmm *vmm;
struct nvkm_memory *mem; struct nvkm_memory *mem;
struct nvkm_vmm *vmm;
int ret; int ret;
ret = nv04_vmm_new_(&nv04_vmm, mmu, 8, addr, size, ret = nv04_vmm_new_(&nv04_vmm, mmu, 8, addr, size,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment