Commit d72a6887 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: use leaf iterator for allocating PD/PT

Less code and allows for easier error handling.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: default avatarJunwei Zhang <Jerry.Zhang@amd.com>
Reviewed-by: default avatarHuang Rui <ray.huang@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 73633e32
...@@ -845,142 +845,96 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -845,142 +845,96 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
} }
/** /**
* amdgpu_vm_alloc_levels - allocate the PD/PT levels * amdgpu_vm_alloc_pts - Allocate page tables.
* *
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @vm: requested vm * @vm: VM to allocate page tables for
* @parent: parent PT * @saddr: Start address which needs to be allocated
* @saddr: start of the address range * @size: Size from start address we need.
* @eaddr: end of the address range
* @level: VMPT level
* @ats: indicate ATS support from PTE
* *
* Make sure the page directories and page tables are allocated * Make sure the page directories and page tables are allocated
* *
* Returns: * Returns:
* 0 on success, errno otherwise. * 0 on success, errno otherwise.
*/ */
static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
struct amdgpu_vm *vm, struct amdgpu_vm *vm,
struct amdgpu_vm_pt *parent, uint64_t saddr, uint64_t size)
uint64_t saddr, uint64_t eaddr,
unsigned level, bool ats)
{ {
unsigned shift = amdgpu_vm_level_shift(adev, level); struct amdgpu_vm_pt_cursor cursor;
struct amdgpu_bo_param bp; struct amdgpu_bo *pt;
unsigned pt_idx, from, to; bool ats = false;
uint64_t eaddr;
int r; int r;
if (!parent->entries) { /* validate the parameters */
unsigned num_entries = amdgpu_vm_num_entries(adev, level); if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
return -EINVAL;
parent->entries = kvmalloc_array(num_entries, eaddr = saddr + size - 1;
sizeof(struct amdgpu_vm_pt),
GFP_KERNEL | __GFP_ZERO); if (vm->pte_support_ats)
if (!parent->entries) ats = saddr < AMDGPU_GMC_HOLE_START;
return -ENOMEM;
} saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
from = saddr >> shift; if (eaddr >= adev->vm_manager.max_pfn) {
to = eaddr >> shift; dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
if (from >= amdgpu_vm_num_entries(adev, level) || eaddr, adev->vm_manager.max_pfn);
to >= amdgpu_vm_num_entries(adev, level))
return -EINVAL; return -EINVAL;
}
for_each_amdgpu_vm_pt_leaf(adev, vm, saddr, eaddr, cursor) {
struct amdgpu_vm_pt *entry = cursor.entry;
struct amdgpu_bo_param bp;
if (cursor.level < AMDGPU_VM_PTB) {
unsigned num_entries;
++level; num_entries = amdgpu_vm_num_entries(adev, cursor.level);
saddr = saddr & ((1 << shift) - 1); entry->entries = kvmalloc_array(num_entries,
eaddr = eaddr & ((1 << shift) - 1); sizeof(*entry->entries),
GFP_KERNEL |
__GFP_ZERO);
if (!entry->entries)
return -ENOMEM;
}
amdgpu_vm_bo_param(adev, vm, level, &bp);
/* walk over the address space and allocate the page tables */ if (entry->base.bo)
for (pt_idx = from; pt_idx <= to; ++pt_idx) { continue;
struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
struct amdgpu_bo *pt; amdgpu_vm_bo_param(adev, vm, cursor.level, &bp);
if (!entry->base.bo) {
r = amdgpu_bo_create(adev, &bp, &pt); r = amdgpu_bo_create(adev, &bp, &pt);
if (r) if (r)
return r; return r;
r = amdgpu_vm_clear_bo(adev, vm, pt, level, ats); r = amdgpu_vm_clear_bo(adev, vm, pt, cursor.level, ats);
if (r) { if (r)
amdgpu_bo_unref(&pt->shadow); goto error_free_pt;
amdgpu_bo_unref(&pt);
return r;
}
if (vm->use_cpu_for_update) { if (vm->use_cpu_for_update) {
r = amdgpu_bo_kmap(pt, NULL); r = amdgpu_bo_kmap(pt, NULL);
if (r) { if (r)
amdgpu_bo_unref(&pt->shadow); goto error_free_pt;
amdgpu_bo_unref(&pt);
return r;
}
} }
/* Keep a reference to the root directory to avoid /* Keep a reference to the root directory to avoid
* freeing them up in the wrong order. * freeing them up in the wrong order.
*/ */
pt->parent = amdgpu_bo_ref(parent->base.bo); pt->parent = amdgpu_bo_ref(cursor.parent->base.bo);
amdgpu_vm_bo_base_init(&entry->base, vm, pt); amdgpu_vm_bo_base_init(&entry->base, vm, pt);
} }
if (level < AMDGPU_VM_PTB) {
uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
((1 << shift) - 1);
r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
sub_eaddr, level, ats);
if (r)
return r;
}
}
return 0; return 0;
}
/**
* amdgpu_vm_alloc_pts - Allocate page tables.
*
* @adev: amdgpu_device pointer
* @vm: VM to allocate page tables for
* @saddr: Start address which needs to be allocated
* @size: Size from start address we need.
*
* Make sure the page tables are allocated.
*
* Returns:
* 0 on success, errno otherwise.
*/
int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
uint64_t saddr, uint64_t size)
{
uint64_t eaddr;
bool ats = false;
/* validate the parameters */
if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
return -EINVAL;
eaddr = saddr + size - 1;
if (vm->pte_support_ats)
ats = saddr < AMDGPU_GMC_HOLE_START;
saddr /= AMDGPU_GPU_PAGE_SIZE; error_free_pt:
eaddr /= AMDGPU_GPU_PAGE_SIZE; amdgpu_bo_unref(&pt->shadow);
amdgpu_bo_unref(&pt);
if (eaddr >= adev->vm_manager.max_pfn) { return r;
dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
eaddr, adev->vm_manager.max_pfn);
return -EINVAL;
}
return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
adev->vm_manager.root_level, ats);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment