Commit cd821077 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/mmu: switch to gpuobj accessor macros

Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
parent 19187075
...@@ -79,8 +79,10 @@ gf100_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 index, struct nvkm_gpuobj *pgt[2]) ...@@ -79,8 +79,10 @@ gf100_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 index, struct nvkm_gpuobj *pgt[2])
if (pgt[1]) if (pgt[1])
pde[0] = 0x00000001 | (pgt[1]->addr >> 8); pde[0] = 0x00000001 | (pgt[1]->addr >> 8);
nv_wo32(pgd, (index * 8) + 0, pde[0]); nvkm_kmap(pgd);
nv_wo32(pgd, (index * 8) + 4, pde[1]); nvkm_wo32(pgd, (index * 8) + 0, pde[0]);
nvkm_wo32(pgd, (index * 8) + 4, pde[1]);
nvkm_done(pgd);
} }
static inline u64 static inline u64
...@@ -114,12 +116,14 @@ gf100_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt, ...@@ -114,12 +116,14 @@ gf100_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
ltc->tags_clear(ltc, tag, cnt); ltc->tags_clear(ltc, tag, cnt);
} }
nvkm_kmap(pgt);
while (cnt--) { while (cnt--) {
nv_wo32(pgt, pte + 0, lower_32_bits(phys)); nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
nv_wo32(pgt, pte + 4, upper_32_bits(phys)); nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
phys += next; phys += next;
pte += 8; pte += 8;
} }
nvkm_done(pgt);
} }
static void static void
...@@ -130,24 +134,28 @@ gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt, ...@@ -130,24 +134,28 @@ gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
/* compressed storage types are invalid for system memory */ /* compressed storage types are invalid for system memory */
u32 memtype = gf100_pte_storage_type_map[mem->memtype & 0xff]; u32 memtype = gf100_pte_storage_type_map[mem->memtype & 0xff];
nvkm_kmap(pgt);
pte <<= 3; pte <<= 3;
while (cnt--) { while (cnt--) {
u64 phys = gf100_vm_addr(vma, *list++, memtype, target); u64 phys = gf100_vm_addr(vma, *list++, memtype, target);
nv_wo32(pgt, pte + 0, lower_32_bits(phys)); nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
nv_wo32(pgt, pte + 4, upper_32_bits(phys)); nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
pte += 8; pte += 8;
} }
nvkm_done(pgt);
} }
static void static void
gf100_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt) gf100_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
{ {
nvkm_kmap(pgt);
pte <<= 3; pte <<= 3;
while (cnt--) { while (cnt--) {
nv_wo32(pgt, pte + 0, 0x00000000); nvkm_wo32(pgt, pte + 0, 0x00000000);
nv_wo32(pgt, pte + 4, 0x00000000); nvkm_wo32(pgt, pte + 4, 0x00000000);
pte += 8; pte += 8;
} }
nvkm_done(pgt);
} }
static void static void
......
...@@ -37,26 +37,30 @@ nv04_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt, ...@@ -37,26 +37,30 @@ nv04_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{ {
pte = 0x00008 + (pte * 4); pte = 0x00008 + (pte * 4);
nvkm_kmap(pgt);
while (cnt) { while (cnt) {
u32 page = PAGE_SIZE / NV04_PDMA_PAGE; u32 page = PAGE_SIZE / NV04_PDMA_PAGE;
u32 phys = (u32)*list++; u32 phys = (u32)*list++;
while (cnt && page--) { while (cnt && page--) {
nv_wo32(pgt, pte, phys | 3); nvkm_wo32(pgt, pte, phys | 3);
phys += NV04_PDMA_PAGE; phys += NV04_PDMA_PAGE;
pte += 4; pte += 4;
cnt -= 1; cnt -= 1;
} }
} }
nvkm_done(pgt);
} }
static void static void
nv04_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt) nv04_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
{ {
pte = 0x00008 + (pte * 4); pte = 0x00008 + (pte * 4);
nvkm_kmap(pgt);
while (cnt--) { while (cnt--) {
nv_wo32(pgt, pte, 0x00000000); nvkm_wo32(pgt, pte, 0x00000000);
pte += 4; pte += 4;
} }
nvkm_done(pgt);
} }
static void static void
...@@ -118,8 +122,10 @@ nv04_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ...@@ -118,8 +122,10 @@ nv04_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret) if (ret)
return ret; return ret;
nv_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */ nvkm_kmap(dma);
nv_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1); nvkm_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
nvkm_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1);
nvkm_done(dma);
return 0; return 0;
} }
......
...@@ -39,26 +39,30 @@ nv41_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt, ...@@ -39,26 +39,30 @@ nv41_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{ {
pte = pte * 4; pte = pte * 4;
nvkm_kmap(pgt);
while (cnt) { while (cnt) {
u32 page = PAGE_SIZE / NV41_GART_PAGE; u32 page = PAGE_SIZE / NV41_GART_PAGE;
u64 phys = (u64)*list++; u64 phys = (u64)*list++;
while (cnt && page--) { while (cnt && page--) {
nv_wo32(pgt, pte, (phys >> 7) | 1); nvkm_wo32(pgt, pte, (phys >> 7) | 1);
phys += NV41_GART_PAGE; phys += NV41_GART_PAGE;
pte += 4; pte += 4;
cnt -= 1; cnt -= 1;
} }
} }
nvkm_done(pgt);
} }
static void static void
nv41_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt) nv41_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
{ {
pte = pte * 4; pte = pte * 4;
nvkm_kmap(pgt);
while (cnt--) { while (cnt--) {
nv_wo32(pgt, pte, 0x00000000); nvkm_wo32(pgt, pte, 0x00000000);
pte += 4; pte += 4;
} }
nvkm_done(pgt);
} }
static void static void
......
...@@ -41,10 +41,10 @@ nv44_vm_fill(struct nvkm_gpuobj *pgt, dma_addr_t null, ...@@ -41,10 +41,10 @@ nv44_vm_fill(struct nvkm_gpuobj *pgt, dma_addr_t null,
u32 base = (pte << 2) & ~0x0000000f; u32 base = (pte << 2) & ~0x0000000f;
u32 tmp[4]; u32 tmp[4];
tmp[0] = nv_ro32(pgt, base + 0x0); tmp[0] = nvkm_ro32(pgt, base + 0x0);
tmp[1] = nv_ro32(pgt, base + 0x4); tmp[1] = nvkm_ro32(pgt, base + 0x4);
tmp[2] = nv_ro32(pgt, base + 0x8); tmp[2] = nvkm_ro32(pgt, base + 0x8);
tmp[3] = nv_ro32(pgt, base + 0xc); tmp[3] = nvkm_ro32(pgt, base + 0xc);
while (cnt--) { while (cnt--) {
u32 addr = list ? (*list++ >> 12) : (null >> 12); u32 addr = list ? (*list++ >> 12) : (null >> 12);
...@@ -74,10 +74,10 @@ nv44_vm_fill(struct nvkm_gpuobj *pgt, dma_addr_t null, ...@@ -74,10 +74,10 @@ nv44_vm_fill(struct nvkm_gpuobj *pgt, dma_addr_t null,
} }
} }
nv_wo32(pgt, base + 0x0, tmp[0]); nvkm_wo32(pgt, base + 0x0, tmp[0]);
nv_wo32(pgt, base + 0x4, tmp[1]); nvkm_wo32(pgt, base + 0x4, tmp[1]);
nv_wo32(pgt, base + 0x8, tmp[2]); nvkm_wo32(pgt, base + 0x8, tmp[2]);
nv_wo32(pgt, base + 0xc, tmp[3] | 0x40000000); nvkm_wo32(pgt, base + 0xc, tmp[3] | 0x40000000);
} }
static void static void
...@@ -88,6 +88,7 @@ nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt, ...@@ -88,6 +88,7 @@ nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
u32 tmp[4]; u32 tmp[4];
int i; int i;
nvkm_kmap(pgt);
if (pte & 3) { if (pte & 3) {
u32 max = 4 - (pte & 3); u32 max = 4 - (pte & 3);
u32 part = (cnt > max) ? max : cnt; u32 part = (cnt > max) ? max : cnt;
...@@ -100,15 +101,16 @@ nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt, ...@@ -100,15 +101,16 @@ nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
while (cnt >= 4) { while (cnt >= 4) {
for (i = 0; i < 4; i++) for (i = 0; i < 4; i++)
tmp[i] = *list++ >> 12; tmp[i] = *list++ >> 12;
nv_wo32(pgt, pte++ * 4, tmp[0] >> 0 | tmp[1] << 27); nvkm_wo32(pgt, pte++ * 4, tmp[0] >> 0 | tmp[1] << 27);
nv_wo32(pgt, pte++ * 4, tmp[1] >> 5 | tmp[2] << 22); nvkm_wo32(pgt, pte++ * 4, tmp[1] >> 5 | tmp[2] << 22);
nv_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17); nvkm_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17);
nv_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000); nvkm_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000);
cnt -= 4; cnt -= 4;
} }
if (cnt) if (cnt)
nv44_vm_fill(pgt, mmu->null, list, pte, cnt); nv44_vm_fill(pgt, mmu->null, list, pte, cnt);
nvkm_done(pgt);
} }
static void static void
...@@ -116,6 +118,7 @@ nv44_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt) ...@@ -116,6 +118,7 @@ nv44_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
{ {
struct nv04_mmu *mmu = (void *)nvkm_mmu(pgt); struct nv04_mmu *mmu = (void *)nvkm_mmu(pgt);
nvkm_kmap(pgt);
if (pte & 3) { if (pte & 3) {
u32 max = 4 - (pte & 3); u32 max = 4 - (pte & 3);
u32 part = (cnt > max) ? max : cnt; u32 part = (cnt > max) ? max : cnt;
...@@ -125,15 +128,16 @@ nv44_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt) ...@@ -125,15 +128,16 @@ nv44_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
} }
while (cnt >= 4) { while (cnt >= 4) {
nv_wo32(pgt, pte++ * 4, 0x00000000); nvkm_wo32(pgt, pte++ * 4, 0x00000000);
nv_wo32(pgt, pte++ * 4, 0x00000000); nvkm_wo32(pgt, pte++ * 4, 0x00000000);
nv_wo32(pgt, pte++ * 4, 0x00000000); nvkm_wo32(pgt, pte++ * 4, 0x00000000);
nv_wo32(pgt, pte++ * 4, 0x00000000); nvkm_wo32(pgt, pte++ * 4, 0x00000000);
cnt -= 4; cnt -= 4;
} }
if (cnt) if (cnt)
nv44_vm_fill(pgt, mmu->null, NULL, pte, cnt); nv44_vm_fill(pgt, mmu->null, NULL, pte, cnt);
nvkm_done(pgt);
} }
static void static void
......
...@@ -53,8 +53,10 @@ nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_gpuobj *pgt[2]) ...@@ -53,8 +53,10 @@ nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_gpuobj *pgt[2])
phys |= 0x20; phys |= 0x20;
} }
nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys)); nvkm_kmap(pgd);
nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys)); nvkm_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
nvkm_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
nvkm_done(pgd);
} }
static inline u64 static inline u64
...@@ -89,6 +91,7 @@ nv50_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt, ...@@ -89,6 +91,7 @@ nv50_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
pte <<= 3; pte <<= 3;
cnt <<= 3; cnt <<= 3;
nvkm_kmap(pgt);
while (cnt) { while (cnt) {
u32 offset_h = upper_32_bits(phys); u32 offset_h = upper_32_bits(phys);
u32 offset_l = lower_32_bits(phys); u32 offset_l = lower_32_bits(phys);
...@@ -109,12 +112,13 @@ nv50_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt, ...@@ -109,12 +112,13 @@ nv50_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
} }
while (block) { while (block) {
nv_wo32(pgt, pte + 0, offset_l); nvkm_wo32(pgt, pte + 0, offset_l);
nv_wo32(pgt, pte + 4, offset_h); nvkm_wo32(pgt, pte + 4, offset_h);
pte += 8; pte += 8;
block -= 8; block -= 8;
} }
} }
nvkm_done(pgt);
} }
static void static void
...@@ -123,23 +127,27 @@ nv50_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt, ...@@ -123,23 +127,27 @@ nv50_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
{ {
u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 3 : 2; u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 3 : 2;
pte <<= 3; pte <<= 3;
nvkm_kmap(pgt);
while (cnt--) { while (cnt--) {
u64 phys = vm_addr(vma, (u64)*list++, mem->memtype, target); u64 phys = vm_addr(vma, (u64)*list++, mem->memtype, target);
nv_wo32(pgt, pte + 0, lower_32_bits(phys)); nvkm_wo32(pgt, pte + 0, lower_32_bits(phys));
nv_wo32(pgt, pte + 4, upper_32_bits(phys)); nvkm_wo32(pgt, pte + 4, upper_32_bits(phys));
pte += 8; pte += 8;
} }
nvkm_done(pgt);
} }
static void static void
nv50_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt) nv50_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
{ {
pte <<= 3; pte <<= 3;
nvkm_kmap(pgt);
while (cnt--) { while (cnt--) {
nv_wo32(pgt, pte + 0, 0x00000000); nvkm_wo32(pgt, pte + 0, 0x00000000);
nv_wo32(pgt, pte + 4, 0x00000000); nvkm_wo32(pgt, pte + 4, 0x00000000);
pte += 8; pte += 8;
} }
nvkm_done(pgt);
} }
static void static void
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment