Commit b96f3e7c authored by Gerd Hoffmann's avatar Gerd Hoffmann

drm/ttm: use gem vma_node

Drop vma_node from ttm_buffer_object, use the gem struct
(base.vma_node) instead.
Signed-off-by: default avatarGerd Hoffmann <kraxel@redhat.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20190805140119.7337-9-kraxel@redhat.com
parent 1e053b10
...@@ -191,7 +191,7 @@ static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo) ...@@ -191,7 +191,7 @@ static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
*/ */
static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo) static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
{ {
return drm_vma_node_offset_addr(&bo->tbo.vma_node); return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
} }
/** /**
......
...@@ -168,7 +168,7 @@ EXPORT_SYMBOL(drm_gem_vram_put); ...@@ -168,7 +168,7 @@ EXPORT_SYMBOL(drm_gem_vram_put);
*/ */
u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo) u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo)
{ {
return drm_vma_node_offset_addr(&gbo->bo.vma_node); return drm_vma_node_offset_addr(&gbo->bo.base.vma_node);
} }
EXPORT_SYMBOL(drm_gem_vram_mmap_offset); EXPORT_SYMBOL(drm_gem_vram_mmap_offset);
......
...@@ -675,7 +675,7 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv, ...@@ -675,7 +675,7 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
gem = drm_gem_object_lookup(file_priv, handle); gem = drm_gem_object_lookup(file_priv, handle);
if (gem) { if (gem) {
struct nouveau_bo *bo = nouveau_gem_object(gem); struct nouveau_bo *bo = nouveau_gem_object(gem);
*poffset = drm_vma_node_offset_addr(&bo->bo.vma_node); *poffset = drm_vma_node_offset_addr(&bo->bo.base.vma_node);
drm_gem_object_put_unlocked(gem); drm_gem_object_put_unlocked(gem);
return 0; return 0;
} }
......
...@@ -240,7 +240,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, ...@@ -240,7 +240,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
} }
rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node); rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
rep->tile_mode = nvbo->mode; rep->tile_mode = nvbo->mode;
rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG; rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
......
...@@ -60,7 +60,7 @@ static inline unsigned long qxl_bo_size(struct qxl_bo *bo) ...@@ -60,7 +60,7 @@ static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo) static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
{ {
return drm_vma_node_offset_addr(&bo->tbo.vma_node); return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
} }
static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type, static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
......
...@@ -116,7 +116,7 @@ static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo) ...@@ -116,7 +116,7 @@ static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
*/ */
static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo) static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
{ {
return drm_vma_node_offset_addr(&bo->tbo.vma_node); return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
} }
extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
......
...@@ -672,7 +672,7 @@ static void ttm_bo_release(struct kref *kref) ...@@ -672,7 +672,7 @@ static void ttm_bo_release(struct kref *kref)
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node); drm_vma_offset_remove(&bdev->vma_manager, &bo->base.vma_node);
ttm_mem_io_lock(man, false); ttm_mem_io_lock(man, false);
ttm_mem_io_free_vm(bo); ttm_mem_io_free_vm(bo);
ttm_mem_io_unlock(man); ttm_mem_io_unlock(man);
...@@ -1343,9 +1343,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, ...@@ -1343,9 +1343,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
* struct elements we want use regardless. * struct elements we want use regardless.
*/ */
reservation_object_init(&bo->base._resv); reservation_object_init(&bo->base._resv);
drm_vma_node_reset(&bo->base.vma_node);
} }
atomic_inc(&bo->bdev->glob->bo_count); atomic_inc(&bo->bdev->glob->bo_count);
drm_vma_node_reset(&bo->vma_node);
/* /*
* For ttm_bo_type_device buffers, allocate * For ttm_bo_type_device buffers, allocate
...@@ -1353,7 +1353,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, ...@@ -1353,7 +1353,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
*/ */
if (bo->type == ttm_bo_type_device || if (bo->type == ttm_bo_type_device ||
bo->type == ttm_bo_type_sg) bo->type == ttm_bo_type_sg)
ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, ret = drm_vma_offset_add(&bdev->vma_manager, &bo->base.vma_node,
bo->mem.num_pages); bo->mem.num_pages);
/* passed reservation objects should already be locked, /* passed reservation objects should already be locked,
...@@ -1781,7 +1781,7 @@ void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) ...@@ -1781,7 +1781,7 @@ void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping); drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
ttm_mem_io_free_vm(bo); ttm_mem_io_free_vm(bo);
} }
......
...@@ -510,7 +510,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, ...@@ -510,7 +510,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
INIT_LIST_HEAD(&fbo->base.io_reserve_lru); INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
mutex_init(&fbo->base.wu_mutex); mutex_init(&fbo->base.wu_mutex);
fbo->base.moving = NULL; fbo->base.moving = NULL;
drm_vma_node_reset(&fbo->base.vma_node); drm_vma_node_reset(&fbo->base.base.vma_node);
atomic_set(&fbo->base.cpu_writers, 0); atomic_set(&fbo->base.cpu_writers, 0);
kref_init(&fbo->base.list_kref); kref_init(&fbo->base.list_kref);
......
...@@ -211,9 +211,9 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) ...@@ -211,9 +211,9 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
} }
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
vma->vm_pgoff - drm_vma_node_start(&bo->vma_node); vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node);
page_last = vma_pages(vma) + vma->vm_pgoff - page_last = vma_pages(vma) + vma->vm_pgoff -
drm_vma_node_start(&bo->vma_node); drm_vma_node_start(&bo->base.vma_node);
if (unlikely(page_offset >= bo->num_pages)) { if (unlikely(page_offset >= bo->num_pages)) {
ret = VM_FAULT_SIGBUS; ret = VM_FAULT_SIGBUS;
...@@ -267,7 +267,7 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) ...@@ -267,7 +267,7 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
} else if (unlikely(!page)) { } else if (unlikely(!page)) {
break; break;
} }
page->index = drm_vma_node_start(&bo->vma_node) + page->index = drm_vma_node_start(&bo->base.vma_node) +
page_offset; page_offset;
pfn = page_to_pfn(page); pfn = page_to_pfn(page);
} }
...@@ -413,7 +413,8 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev, ...@@ -413,7 +413,8 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages); node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
if (likely(node)) { if (likely(node)) {
bo = container_of(node, struct ttm_buffer_object, vma_node); bo = container_of(node, struct ttm_buffer_object,
base.vma_node);
bo = ttm_bo_get_unless_zero(bo); bo = ttm_bo_get_unless_zero(bo);
} }
......
...@@ -396,7 +396,7 @@ static inline void virtio_gpu_object_unref(struct virtio_gpu_object **bo) ...@@ -396,7 +396,7 @@ static inline void virtio_gpu_object_unref(struct virtio_gpu_object **bo)
static inline u64 virtio_gpu_object_mmap_offset(struct virtio_gpu_object *bo) static inline u64 virtio_gpu_object_mmap_offset(struct virtio_gpu_object *bo)
{ {
return drm_vma_node_offset_addr(&bo->tbo.vma_node); return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
} }
static inline int virtio_gpu_object_reserve(struct virtio_gpu_object *bo, static inline int virtio_gpu_object_reserve(struct virtio_gpu_object *bo,
......
...@@ -68,8 +68,5 @@ void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) ...@@ -68,8 +68,5 @@ void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma) struct vm_area_struct *vma)
{ {
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
bo->gem_base.vma_node.vm_node.start = bo->tbo.vma_node.vm_node.start;
return drm_gem_prime_mmap(obj, vma); return drm_gem_prime_mmap(obj, vma);
} }
...@@ -835,7 +835,7 @@ int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data, ...@@ -835,7 +835,7 @@ int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
goto out_no_bo; goto out_no_bo;
rep->handle = handle; rep->handle = handle;
rep->map_handle = drm_vma_node_offset_addr(&vbo->base.vma_node); rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
rep->cur_gmr_id = handle; rep->cur_gmr_id = handle;
rep->cur_gmr_offset = 0; rep->cur_gmr_offset = 0;
...@@ -1077,7 +1077,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv, ...@@ -1077,7 +1077,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
if (ret != 0) if (ret != 0)
return -EINVAL; return -EINVAL;
*offset = drm_vma_node_offset_addr(&out_buf->base.vma_node); *offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node);
vmw_bo_unreference(&out_buf); vmw_bo_unreference(&out_buf);
return 0; return 0;
} }
......
...@@ -1669,7 +1669,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev, ...@@ -1669,7 +1669,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
rep->backup_size = res->backup_size; rep->backup_size = res->backup_size;
if (res->backup) { if (res->backup) {
rep->buffer_map_handle = rep->buffer_map_handle =
drm_vma_node_offset_addr(&res->backup->base.vma_node); drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE; rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
rep->buffer_handle = backup_handle; rep->buffer_handle = backup_handle;
} else { } else {
...@@ -1745,7 +1745,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev, ...@@ -1745,7 +1745,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
rep->crep.backup_size = srf->res.backup_size; rep->crep.backup_size = srf->res.backup_size;
rep->crep.buffer_handle = backup_handle; rep->crep.buffer_handle = backup_handle;
rep->crep.buffer_map_handle = rep->crep.buffer_map_handle =
drm_vma_node_offset_addr(&srf->res.backup->base.vma_node); drm_vma_node_offset_addr(&srf->res.backup->base.base.vma_node);
rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE; rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
rep->creq.version = drm_vmw_gb_surface_v1; rep->creq.version = drm_vmw_gb_surface_v1;
......
...@@ -152,7 +152,6 @@ struct ttm_tt; ...@@ -152,7 +152,6 @@ struct ttm_tt;
* @ddestroy: List head for the delayed destroy list. * @ddestroy: List head for the delayed destroy list.
* @swap: List head for swap LRU list. * @swap: List head for swap LRU list.
* @moving: Fence set when BO is moving * @moving: Fence set when BO is moving
* @vma_node: Address space manager node.
* @offset: The current GPU offset, which can have different meanings * @offset: The current GPU offset, which can have different meanings
* depending on the memory type. For SYSTEM type memory, it should be 0. * depending on the memory type. For SYSTEM type memory, it should be 0.
* @cur_placement: Hint of current placement. * @cur_placement: Hint of current placement.
...@@ -219,9 +218,6 @@ struct ttm_buffer_object { ...@@ -219,9 +218,6 @@ struct ttm_buffer_object {
*/ */
struct dma_fence *moving; struct dma_fence *moving;
struct drm_vma_offset_node vma_node;
unsigned priority; unsigned priority;
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment