Commit ec681545 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: separate bo_va structure

Split that into vm_bo_base and bo_va to allow other uses as well.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 4ab4016a
...@@ -1487,7 +1487,7 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, ...@@ -1487,7 +1487,7 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
addr > mapping->last) addr > mapping->last)
continue; continue;
*bo = lobj->bo_va->bo; *bo = lobj->bo_va->base.bo;
return mapping; return mapping;
} }
...@@ -1496,7 +1496,7 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, ...@@ -1496,7 +1496,7 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
addr > mapping->last) addr > mapping->last)
continue; continue;
*bo = lobj->bo_va->bo; *bo = lobj->bo_va->base.bo;
return mapping; return mapping;
} }
} }
......
...@@ -621,7 +621,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -621,7 +621,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
switch (args->operation) { switch (args->operation) {
case AMDGPU_VA_OP_MAP: case AMDGPU_VA_OP_MAP:
r = amdgpu_vm_alloc_pts(adev, bo_va->vm, args->va_address, r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
args->map_size); args->map_size);
if (r) if (r)
goto error_backoff; goto error_backoff;
...@@ -641,7 +641,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -641,7 +641,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->map_size); args->map_size);
break; break;
case AMDGPU_VA_OP_REPLACE: case AMDGPU_VA_OP_REPLACE:
r = amdgpu_vm_alloc_pts(adev, bo_va->vm, args->va_address, r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
args->map_size); args->map_size);
if (r) if (r)
goto error_backoff; goto error_backoff;
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#define AMDGPU_BO_INVALID_OFFSET LONG_MAX #define AMDGPU_BO_INVALID_OFFSET LONG_MAX
/* bo virtual addresses in a vm */
struct amdgpu_bo_va_mapping { struct amdgpu_bo_va_mapping {
struct list_head list; struct list_head list;
struct rb_node rb; struct rb_node rb;
...@@ -43,26 +44,19 @@ struct amdgpu_bo_va_mapping { ...@@ -43,26 +44,19 @@ struct amdgpu_bo_va_mapping {
uint64_t flags; uint64_t flags;
}; };
/* bo virtual addresses in a specific vm */ /* User space allocated BO in a VM */
struct amdgpu_bo_va { struct amdgpu_bo_va {
struct amdgpu_vm_bo_base base;
/* protected by bo being reserved */ /* protected by bo being reserved */
struct list_head bo_list;
struct dma_fence *last_pt_update; struct dma_fence *last_pt_update;
unsigned ref_count; unsigned ref_count;
/* protected by vm mutex and spinlock */
struct list_head vm_status;
/* mappings for this bo_va */ /* mappings for this bo_va */
struct list_head invalids; struct list_head invalids;
struct list_head valids; struct list_head valids;
/* constant after initialization */
struct amdgpu_vm *vm;
struct amdgpu_bo *bo;
}; };
struct amdgpu_bo { struct amdgpu_bo {
/* Protected by tbo.reserved */ /* Protected by tbo.reserved */
u32 preferred_domains; u32 preferred_domains;
......
...@@ -284,7 +284,7 @@ TRACE_EVENT(amdgpu_vm_bo_map, ...@@ -284,7 +284,7 @@ TRACE_EVENT(amdgpu_vm_bo_map,
), ),
TP_fast_assign( TP_fast_assign(
__entry->bo = bo_va ? bo_va->bo : NULL; __entry->bo = bo_va ? bo_va->base.bo : NULL;
__entry->start = mapping->start; __entry->start = mapping->start;
__entry->last = mapping->last; __entry->last = mapping->last;
__entry->offset = mapping->offset; __entry->offset = mapping->offset;
...@@ -308,7 +308,7 @@ TRACE_EVENT(amdgpu_vm_bo_unmap, ...@@ -308,7 +308,7 @@ TRACE_EVENT(amdgpu_vm_bo_unmap,
), ),
TP_fast_assign( TP_fast_assign(
__entry->bo = bo_va->bo; __entry->bo = bo_va->base.bo;
__entry->start = mapping->start; __entry->start = mapping->start;
__entry->last = mapping->last; __entry->last = mapping->last;
__entry->offset = mapping->offset; __entry->offset = mapping->offset;
......
...@@ -76,7 +76,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -76,7 +76,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return -ENOMEM; return -ENOMEM;
} }
r = amdgpu_vm_alloc_pts(adev, (*bo_va)->vm, AMDGPU_CSA_VADDR, r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, AMDGPU_CSA_VADDR,
AMDGPU_CSA_SIZE); AMDGPU_CSA_SIZE);
if (r) { if (r) {
DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r); DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
......
...@@ -870,8 +870,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, ...@@ -870,8 +870,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
{ {
struct amdgpu_bo_va *bo_va; struct amdgpu_bo_va *bo_va;
list_for_each_entry(bo_va, &bo->va, bo_list) { list_for_each_entry(bo_va, &bo->va, base.bo_list) {
if (bo_va->vm == vm) { if (bo_va->base.vm == vm) {
return bo_va; return bo_va;
} }
} }
...@@ -1726,7 +1726,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, ...@@ -1726,7 +1726,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va, struct amdgpu_bo_va *bo_va,
bool clear) bool clear)
{ {
struct amdgpu_vm *vm = bo_va->vm; struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm;
struct amdgpu_bo_va_mapping *mapping; struct amdgpu_bo_va_mapping *mapping;
dma_addr_t *pages_addr = NULL; dma_addr_t *pages_addr = NULL;
uint64_t gtt_flags, flags; uint64_t gtt_flags, flags;
...@@ -1735,27 +1736,27 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, ...@@ -1735,27 +1736,27 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct dma_fence *exclusive; struct dma_fence *exclusive;
int r; int r;
if (clear || !bo_va->bo) { if (clear || !bo_va->base.bo) {
mem = NULL; mem = NULL;
nodes = NULL; nodes = NULL;
exclusive = NULL; exclusive = NULL;
} else { } else {
struct ttm_dma_tt *ttm; struct ttm_dma_tt *ttm;
mem = &bo_va->bo->tbo.mem; mem = &bo_va->base.bo->tbo.mem;
nodes = mem->mm_node; nodes = mem->mm_node;
if (mem->mem_type == TTM_PL_TT) { if (mem->mem_type == TTM_PL_TT) {
ttm = container_of(bo_va->bo->tbo.ttm, struct ttm = container_of(bo_va->base.bo->tbo.ttm,
ttm_dma_tt, ttm); struct ttm_dma_tt, ttm);
pages_addr = ttm->dma_address; pages_addr = ttm->dma_address;
} }
exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv); exclusive = reservation_object_get_excl(bo->tbo.resv);
} }
if (bo_va->bo) { if (bo) {
flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) && gtt_flags = (amdgpu_ttm_is_bound(bo->tbo.ttm) &&
adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ? adev == amdgpu_ttm_adev(bo->tbo.bdev)) ?
flags : 0; flags : 0;
} else { } else {
flags = 0x0; flags = 0x0;
...@@ -1763,7 +1764,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, ...@@ -1763,7 +1764,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
} }
spin_lock(&vm->status_lock); spin_lock(&vm->status_lock);
if (!list_empty(&bo_va->vm_status)) if (!list_empty(&bo_va->base.vm_status))
list_splice_init(&bo_va->valids, &bo_va->invalids); list_splice_init(&bo_va->valids, &bo_va->invalids);
spin_unlock(&vm->status_lock); spin_unlock(&vm->status_lock);
...@@ -1786,9 +1787,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, ...@@ -1786,9 +1787,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
spin_lock(&vm->status_lock); spin_lock(&vm->status_lock);
list_splice_init(&bo_va->invalids, &bo_va->valids); list_splice_init(&bo_va->invalids, &bo_va->valids);
list_del_init(&bo_va->vm_status); list_del_init(&bo_va->base.vm_status);
if (clear) if (clear)
list_add(&bo_va->vm_status, &vm->cleared); list_add(&bo_va->base.vm_status, &vm->cleared);
spin_unlock(&vm->status_lock); spin_unlock(&vm->status_lock);
if (vm->use_cpu_for_update) { if (vm->use_cpu_for_update) {
...@@ -2001,7 +2002,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, ...@@ -2001,7 +2002,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
spin_lock(&vm->status_lock); spin_lock(&vm->status_lock);
while (!list_empty(&vm->invalidated)) { while (!list_empty(&vm->invalidated)) {
bo_va = list_first_entry(&vm->invalidated, bo_va = list_first_entry(&vm->invalidated,
struct amdgpu_bo_va, vm_status); struct amdgpu_bo_va, base.vm_status);
spin_unlock(&vm->status_lock); spin_unlock(&vm->status_lock);
r = amdgpu_vm_bo_update(adev, bo_va, true); r = amdgpu_vm_bo_update(adev, bo_va, true);
...@@ -2041,16 +2042,17 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, ...@@ -2041,16 +2042,17 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
if (bo_va == NULL) { if (bo_va == NULL) {
return NULL; return NULL;
} }
bo_va->vm = vm; bo_va->base.vm = vm;
bo_va->bo = bo; bo_va->base.bo = bo;
INIT_LIST_HEAD(&bo_va->base.bo_list);
INIT_LIST_HEAD(&bo_va->base.vm_status);
bo_va->ref_count = 1; bo_va->ref_count = 1;
INIT_LIST_HEAD(&bo_va->bo_list);
INIT_LIST_HEAD(&bo_va->valids); INIT_LIST_HEAD(&bo_va->valids);
INIT_LIST_HEAD(&bo_va->invalids); INIT_LIST_HEAD(&bo_va->invalids);
INIT_LIST_HEAD(&bo_va->vm_status);
if (bo) if (bo)
list_add_tail(&bo_va->bo_list, &bo->va); list_add_tail(&bo_va->base.bo_list, &bo->va);
return bo_va; return bo_va;
} }
...@@ -2075,7 +2077,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, ...@@ -2075,7 +2077,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
uint64_t size, uint64_t flags) uint64_t size, uint64_t flags)
{ {
struct amdgpu_bo_va_mapping *mapping, *tmp; struct amdgpu_bo_va_mapping *mapping, *tmp;
struct amdgpu_vm *vm = bo_va->vm; struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm;
uint64_t eaddr; uint64_t eaddr;
/* validate the parameters */ /* validate the parameters */
...@@ -2086,7 +2089,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, ...@@ -2086,7 +2089,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
/* make sure object fit at this offset */ /* make sure object fit at this offset */
eaddr = saddr + size - 1; eaddr = saddr + size - 1;
if (saddr >= eaddr || if (saddr >= eaddr ||
(bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo))) (bo && offset + size > amdgpu_bo_size(bo)))
return -EINVAL; return -EINVAL;
saddr /= AMDGPU_GPU_PAGE_SIZE; saddr /= AMDGPU_GPU_PAGE_SIZE;
...@@ -2096,7 +2099,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, ...@@ -2096,7 +2099,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
if (tmp) { if (tmp) {
/* bo and tmp overlap, invalid addr */ /* bo and tmp overlap, invalid addr */
dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
"0x%010Lx-0x%010Lx\n", bo_va->bo, saddr, eaddr, "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
tmp->start, tmp->last + 1); tmp->start, tmp->last + 1);
return -EINVAL; return -EINVAL;
} }
...@@ -2141,7 +2144,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, ...@@ -2141,7 +2144,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
uint64_t size, uint64_t flags) uint64_t size, uint64_t flags)
{ {
struct amdgpu_bo_va_mapping *mapping; struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_vm *vm = bo_va->vm; struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm;
uint64_t eaddr; uint64_t eaddr;
int r; int r;
...@@ -2153,7 +2157,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, ...@@ -2153,7 +2157,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
/* make sure object fit at this offset */ /* make sure object fit at this offset */
eaddr = saddr + size - 1; eaddr = saddr + size - 1;
if (saddr >= eaddr || if (saddr >= eaddr ||
(bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo))) (bo && offset + size > amdgpu_bo_size(bo)))
return -EINVAL; return -EINVAL;
/* Allocate all the needed memory */ /* Allocate all the needed memory */
...@@ -2161,7 +2165,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, ...@@ -2161,7 +2165,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
if (!mapping) if (!mapping)
return -ENOMEM; return -ENOMEM;
r = amdgpu_vm_bo_clear_mappings(adev, bo_va->vm, saddr, size); r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
if (r) { if (r) {
kfree(mapping); kfree(mapping);
return r; return r;
...@@ -2201,7 +2205,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, ...@@ -2201,7 +2205,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
uint64_t saddr) uint64_t saddr)
{ {
struct amdgpu_bo_va_mapping *mapping; struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_vm *vm = bo_va->vm; struct amdgpu_vm *vm = bo_va->base.vm;
bool valid = true; bool valid = true;
saddr /= AMDGPU_GPU_PAGE_SIZE; saddr /= AMDGPU_GPU_PAGE_SIZE;
...@@ -2349,12 +2353,12 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, ...@@ -2349,12 +2353,12 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va) struct amdgpu_bo_va *bo_va)
{ {
struct amdgpu_bo_va_mapping *mapping, *next; struct amdgpu_bo_va_mapping *mapping, *next;
struct amdgpu_vm *vm = bo_va->vm; struct amdgpu_vm *vm = bo_va->base.vm;
list_del(&bo_va->bo_list); list_del(&bo_va->base.bo_list);
spin_lock(&vm->status_lock); spin_lock(&vm->status_lock);
list_del(&bo_va->vm_status); list_del(&bo_va->base.vm_status);
spin_unlock(&vm->status_lock); spin_unlock(&vm->status_lock);
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
...@@ -2386,13 +2390,14 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, ...@@ -2386,13 +2390,14 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo) struct amdgpu_bo *bo)
{ {
struct amdgpu_bo_va *bo_va; struct amdgpu_vm_bo_base *bo_base;
list_for_each_entry(bo_va, &bo->va, bo_list) { list_for_each_entry(bo_base, &bo->va, bo_list) {
spin_lock(&bo_va->vm->status_lock); spin_lock(&bo_base->vm->status_lock);
if (list_empty(&bo_va->vm_status)) if (list_empty(&bo_base->vm_status))
list_add(&bo_va->vm_status, &bo_va->vm->invalidated); list_add(&bo_base->vm_status,
spin_unlock(&bo_va->vm->status_lock); &bo_base->vm->invalidated);
spin_unlock(&bo_base->vm->status_lock);
} }
} }
......
...@@ -99,6 +99,18 @@ struct amdgpu_bo_list_entry; ...@@ -99,6 +99,18 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0) #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
#define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1) #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
/* base structure for tracking BO usage in a VM */
struct amdgpu_vm_bo_base {
/* constant after initialization */
struct amdgpu_vm *vm;
struct amdgpu_bo *bo;
/* protected by bo being reserved */
struct list_head bo_list;
/* protected by spinlock */
struct list_head vm_status;
};
struct amdgpu_vm_pt { struct amdgpu_vm_pt {
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment