Commit fa87e62d authored by Dmitry Cherkasov's avatar Dmitry Cherkasov Committed by Alex Deucher

drm/radeon: add 2-level VM pagetables support v9

PDE/PTE update code uses CP ring for memory writes.
All page table entries are preallocated for now in alloc_pt().

It is made as whole because it's hard to divide it to several patches
that compile and doesn't break anything being applied separately.

Tested on cayman card.

v2: rebased on top of "refactor set_page chipset interface v3",
    code cleanups

v3: switched offsets calc macros to inline funcs where possible,
    remove pd_addr from radeon_vm, switched RADEON_BLOCK_SIZE define,
    to 9 (and PTE_COUNT to 1 << BLOCK_SIZE)

v4 (ck): move "incr" documentation to previous patch, cleanup and
         document RADEON_VM_* constants, change commit message to
         our usual format, simplify patch allot by removing
         everything current not necessary, disable SI workaround.

v5: (agd5f): Fix typo in tables_size calculation in
             radeon_vm_alloc_pt().  Second line should have been
             '+=' rather than '='.

v6: fix npdes calculation. In scenario when pfns to be mapped overlap
two PDE spans:

   +-----------+-------------+
   | PDE span  | PDE span    |
   +-----------+----+--------+
          |         |
          +---------+
          | pfns    |
          +---------+

the following npdes calculation gives incorrect result:

npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 1;

For the case above picture it should give npdes = 2, but gives one.

This patch corrects it by rounding last pfn up to 512 border,
first - down to 512 border and then subtracting and dividing by 512.

v7: Make npde calculation clearer, fix ndw calculation.

v8: (agd5f): reserve enough for 2 full VM PTs, add some
             additional comments.

v9: fix typo in npde calculation
Signed-off-by: default avatarDmitry Cherkasov <Dmitrii.Cherkasov@amd.com>
Signed-off-by: default avatarChristian König <deathsimple@vodafone.de>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent dce34bfd
...@@ -782,7 +782,7 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev) ...@@ -782,7 +782,7 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
(u32)(rdev->dummy_page.addr >> 12)); (u32)(rdev->dummy_page.addr >> 12));
WREG32(VM_CONTEXT1_CNTL2, 0); WREG32(VM_CONTEXT1_CNTL2, 0);
WREG32(VM_CONTEXT1_CNTL, 0); WREG32(VM_CONTEXT1_CNTL, 0);
WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
cayman_pcie_gart_tlb_flush(rdev); cayman_pcie_gart_tlb_flush(rdev);
...@@ -1580,7 +1580,7 @@ void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ib *ib) ...@@ -1580,7 +1580,7 @@ void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_ring_write(ring, vm->last_pfn); radeon_ring_write(ring, vm->last_pfn);
radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0)); radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0));
radeon_ring_write(ring, vm->pt_gpu_addr >> 12); radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
/* flush hdp cache */ /* flush hdp cache */
radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0)); radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
......
...@@ -648,15 +648,23 @@ struct radeon_ring { ...@@ -648,15 +648,23 @@ struct radeon_ring {
* VM * VM
*/ */
/* maximum number of VMIDs */
#define RADEON_NUM_VM 16 #define RADEON_NUM_VM 16
/* defines number of bits in page table versus page directory,
* a page is 4KB so we have 12 bits offset, 9 bits in the page
* table and the remaining 19 bits are in the page directory */
#define RADEON_VM_BLOCK_SIZE 9
/* number of entries in page table */
#define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE)
struct radeon_vm { struct radeon_vm {
struct list_head list; struct list_head list;
struct list_head va; struct list_head va;
unsigned id; unsigned id;
unsigned last_pfn; unsigned last_pfn;
u64 pt_gpu_addr; u64 pd_gpu_addr;
u64 *pt;
struct radeon_sa_bo *sa_bo; struct radeon_sa_bo *sa_bo;
struct mutex mutex; struct mutex mutex;
/* last fence for cs using this vm */ /* last fence for cs using this vm */
......
...@@ -422,6 +422,18 @@ void radeon_gart_fini(struct radeon_device *rdev) ...@@ -422,6 +422,18 @@ void radeon_gart_fini(struct radeon_device *rdev)
* TODO bind a default page at vm initialization for default address * TODO bind a default page at vm initialization for default address
*/ */
/**
* radeon_vm_directory_size - returns the size of the page directory in bytes
*
* @rdev: radeon_device pointer
*
* Calculate the size of the page directory in bytes (cayman+).
*/
static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
{
return (rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE) * 8;
}
/** /**
* radeon_vm_manager_init - init the vm manager * radeon_vm_manager_init - init the vm manager
* *
...@@ -435,11 +447,15 @@ int radeon_vm_manager_init(struct radeon_device *rdev) ...@@ -435,11 +447,15 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
struct radeon_vm *vm; struct radeon_vm *vm;
struct radeon_bo_va *bo_va; struct radeon_bo_va *bo_va;
int r; int r;
unsigned size;
if (!rdev->vm_manager.enabled) { if (!rdev->vm_manager.enabled) {
/* allocate enough for 2 full VM pts */ /* allocate enough for 2 full VM pts */
size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
size += RADEON_GPU_PAGE_ALIGN(rdev->vm_manager.max_pfn * 8);
size *= 2;
r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
rdev->vm_manager.max_pfn * 8 * 2, size,
RADEON_GEM_DOMAIN_VRAM); RADEON_GEM_DOMAIN_VRAM);
if (r) { if (r) {
dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n", dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
...@@ -490,7 +506,6 @@ static void radeon_vm_free_pt(struct radeon_device *rdev, ...@@ -490,7 +506,6 @@ static void radeon_vm_free_pt(struct radeon_device *rdev,
list_del_init(&vm->list); list_del_init(&vm->list);
radeon_sa_bo_free(rdev, &vm->sa_bo, vm->fence); radeon_sa_bo_free(rdev, &vm->sa_bo, vm->fence);
vm->pt = NULL;
list_for_each_entry(bo_va, &vm->va, vm_list) { list_for_each_entry(bo_va, &vm->va, vm_list) {
bo_va->valid = false; bo_va->valid = false;
...@@ -546,11 +561,17 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm) ...@@ -546,11 +561,17 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
{ {
struct radeon_vm *vm_evict; struct radeon_vm *vm_evict;
int r; int r;
u64 *pd_addr;
int tables_size;
if (vm == NULL) { if (vm == NULL) {
return -EINVAL; return -EINVAL;
} }
/* allocate enough to cover the current VM size */
tables_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
tables_size += RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8);
if (vm->sa_bo != NULL) { if (vm->sa_bo != NULL) {
/* update lru */ /* update lru */
list_del_init(&vm->list); list_del_init(&vm->list);
...@@ -560,8 +581,7 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm) ...@@ -560,8 +581,7 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
retry: retry:
r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo, r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8), tables_size, RADEON_GPU_PAGE_SIZE, false);
RADEON_GPU_PAGE_SIZE, false);
if (r == -ENOMEM) { if (r == -ENOMEM) {
if (list_empty(&rdev->vm_manager.lru_vm)) { if (list_empty(&rdev->vm_manager.lru_vm)) {
return r; return r;
...@@ -576,9 +596,9 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm) ...@@ -576,9 +596,9 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
return r; return r;
} }
vm->pt = radeon_sa_bo_cpu_addr(vm->sa_bo); pd_addr = radeon_sa_bo_cpu_addr(vm->sa_bo);
vm->pt_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo); vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo);
memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8)); memset(pd_addr, 0, tables_size);
list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo,
...@@ -866,8 +886,9 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, ...@@ -866,8 +886,9 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
struct radeon_ring *ring = &rdev->ring[ridx]; struct radeon_ring *ring = &rdev->ring[ridx];
struct radeon_semaphore *sem = NULL; struct radeon_semaphore *sem = NULL;
struct radeon_bo_va *bo_va; struct radeon_bo_va *bo_va;
unsigned ngpu_pages, ndw; unsigned nptes, npdes, ndw;
uint64_t pfn, addr; uint64_t pe, addr;
uint64_t pfn;
int r; int r;
/* nothing to do if vm isn't bound */ /* nothing to do if vm isn't bound */
...@@ -889,10 +910,8 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, ...@@ -889,10 +910,8 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL)) if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
return 0; return 0;
ngpu_pages = radeon_bo_ngpu_pages(bo);
bo_va->flags &= ~RADEON_VM_PAGE_VALID; bo_va->flags &= ~RADEON_VM_PAGE_VALID;
bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
pfn = bo_va->soffset / RADEON_GPU_PAGE_SIZE;
if (mem) { if (mem) {
addr = mem->start << PAGE_SHIFT; addr = mem->start << PAGE_SHIFT;
if (mem->mem_type != TTM_PL_SYSTEM) { if (mem->mem_type != TTM_PL_SYSTEM) {
...@@ -921,9 +940,26 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, ...@@ -921,9 +940,26 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
} }
/* estimate number of dw needed */ /* estimate number of dw needed */
/* reserve space for 32-bit padding */
ndw = 32; ndw = 32;
ndw += (ngpu_pages >> 12) * 3;
ndw += ngpu_pages * 2; nptes = radeon_bo_ngpu_pages(bo);
pfn = (bo_va->soffset / RADEON_GPU_PAGE_SIZE);
/* handle cases where a bo spans several pdes */
npdes = (ALIGN(pfn + nptes, RADEON_VM_PTE_COUNT) -
(pfn & ~(RADEON_VM_PTE_COUNT - 1))) >> RADEON_VM_BLOCK_SIZE;
/* reserve space for one header for every 2k dwords */
ndw += (nptes >> 11) * 3;
/* reserve space for pte addresses */
ndw += nptes * 2;
/* reserve space for one header for every 2k dwords */
ndw += (npdes >> 11) * 3;
/* reserve space for pde addresses */
ndw += npdes * 2;
r = radeon_ring_lock(rdev, ring, ndw); r = radeon_ring_lock(rdev, ring, ndw);
if (r) { if (r) {
...@@ -935,8 +971,22 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, ...@@ -935,8 +971,22 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
radeon_fence_note_sync(vm->fence, ridx); radeon_fence_note_sync(vm->fence, ridx);
} }
radeon_asic_vm_set_page(rdev, vm->pt_gpu_addr + pfn * 8, addr, /* update page table entries */
ngpu_pages, RADEON_GPU_PAGE_SIZE, bo_va->flags); pe = vm->pd_gpu_addr;
pe += radeon_vm_directory_size(rdev);
pe += (bo_va->soffset / RADEON_GPU_PAGE_SIZE) * 8;
radeon_asic_vm_set_page(rdev, pe, addr, nptes,
RADEON_GPU_PAGE_SIZE, bo_va->flags);
/* update page directory entries */
addr = pe;
pe = vm->pd_gpu_addr;
pe += ((bo_va->soffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE) * 8;
radeon_asic_vm_set_page(rdev, pe, addr, npdes,
RADEON_VM_PTE_COUNT * 8, RADEON_VM_PAGE_VALID);
radeon_fence_unref(&vm->fence); radeon_fence_unref(&vm->fence);
r = radeon_fence_emit(rdev, &vm->fence, ridx); r = radeon_fence_emit(rdev, &vm->fence, ridx);
...@@ -1018,18 +1068,11 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) ...@@ -1018,18 +1068,11 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
vm->id = 0; vm->id = 0;
vm->fence = NULL; vm->fence = NULL;
vm->last_pfn = 0;
mutex_init(&vm->mutex); mutex_init(&vm->mutex);
INIT_LIST_HEAD(&vm->list); INIT_LIST_HEAD(&vm->list);
INIT_LIST_HEAD(&vm->va); INIT_LIST_HEAD(&vm->va);
/* SI requires equal sized PTs for all VMs, so always set
* last_pfn to max_pfn. cayman allows variable sized
* pts so we can grow then as needed. Once we switch
* to two level pts we can unify this again.
*/
if (rdev->family >= CHIP_TAHITI)
vm->last_pfn = rdev->vm_manager.max_pfn;
else
vm->last_pfn = 0;
/* map the ib pool buffer at 0 in virtual address space, set /* map the ib pool buffer at 0 in virtual address space, set
* read only * read only
*/ */
......
...@@ -2426,7 +2426,7 @@ static int si_pcie_gart_enable(struct radeon_device *rdev) ...@@ -2426,7 +2426,7 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(rdev->dummy_page.addr >> 12)); (u32)(rdev->dummy_page.addr >> 12));
WREG32(VM_CONTEXT1_CNTL2, 0); WREG32(VM_CONTEXT1_CNTL2, 0);
WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
si_pcie_gart_tlb_flush(rdev); si_pcie_gart_tlb_flush(rdev);
...@@ -2804,7 +2804,7 @@ void si_vm_flush(struct radeon_device *rdev, struct radeon_ib *ib) ...@@ -2804,7 +2804,7 @@ void si_vm_flush(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_ring_write(ring, PACKET0(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR radeon_ring_write(ring, PACKET0(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR
+ ((vm->id - 8) << 2), 0)); + ((vm->id - 8) << 2), 0));
} }
radeon_ring_write(ring, vm->pt_gpu_addr >> 12); radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
/* flush hdp cache */ /* flush hdp cache */
radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0)); radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment