Commit 8af8a109 authored by Christian König's avatar Christian König

drm/ttm: device naming cleanup

Rename ttm_bo_device to ttm_device.
Rename ttm_bo_driver to ttm_device_funcs.
Rename ttm_bo_global to ttm_global.

Move global and device related functions to ttm_device.[ch].

No functional change.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/415222/
parent b99c2c95
...@@ -1053,7 +1053,7 @@ static inline struct drm_device *adev_to_drm(struct amdgpu_device *adev) ...@@ -1053,7 +1053,7 @@ static inline struct drm_device *adev_to_drm(struct amdgpu_device *adev)
return &adev->ddev; return &adev->ddev;
} }
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev)
{ {
return container_of(bdev, struct amdgpu_device, mman.bdev); return container_of(bdev, struct amdgpu_device, mman.bdev);
} }
......
...@@ -40,13 +40,13 @@ static atomic_t fence_seq = ATOMIC_INIT(0); ...@@ -40,13 +40,13 @@ static atomic_t fence_seq = ATOMIC_INIT(0);
* All the BOs in a process share an eviction fence. When process X wants * All the BOs in a process share an eviction fence. When process X wants
* to map VRAM memory but TTM can't find enough space, TTM will attempt to * to map VRAM memory but TTM can't find enough space, TTM will attempt to
* evict BOs from its LRU list. TTM checks if the BO is valuable to evict * evict BOs from its LRU list. TTM checks if the BO is valuable to evict
* by calling ttm_bo_driver->eviction_valuable(). * by calling ttm_device_funcs->eviction_valuable().
* *
* ttm_bo_driver->eviction_valuable() - will return false if the BO belongs * ttm_device_funcs->eviction_valuable() - will return false if the BO belongs
* to process X. Otherwise, it will return true to indicate BO can be * to process X. Otherwise, it will return true to indicate BO can be
* evicted by TTM. * evicted by TTM.
* *
* If ttm_bo_driver->eviction_valuable returns true, then TTM will continue * If ttm_device_funcs->eviction_valuable returns true, then TTM will continue
* the evcition process for that BO by calling ttm_bo_evict --> amdgpu_bo_move * the evcition process for that BO by calling ttm_bo_evict --> amdgpu_bo_move
* --> amdgpu_copy_buffer(). This sets up job in GPU scheduler. * --> amdgpu_copy_buffer(). This sets up job in GPU scheduler.
* *
......
...@@ -71,7 +71,7 @@ ...@@ -71,7 +71,7 @@
*/ */
static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev) static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
{ {
struct page *dummy_page = ttm_bo_glob.dummy_read_page; struct page *dummy_page = ttm_glob.dummy_read_page;
if (adev->dummy_page_addr) if (adev->dummy_page_addr)
return 0; return 0;
......
...@@ -61,10 +61,10 @@ ...@@ -61,10 +61,10 @@
#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128 #define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev, static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_tt *ttm,
struct ttm_resource *bo_mem); struct ttm_resource *bo_mem);
static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev, static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
struct ttm_tt *ttm); struct ttm_tt *ttm);
static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev, static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
...@@ -646,7 +646,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, ...@@ -646,7 +646,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
* *
* Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault() * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
*/ */
static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem) static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct drm_mm_node *mm_node = mem->mm_node; struct drm_mm_node *mm_node = mem->mm_node;
...@@ -893,7 +893,7 @@ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages) ...@@ -893,7 +893,7 @@ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
* *
* Called by amdgpu_ttm_backend_bind() * Called by amdgpu_ttm_backend_bind()
**/ **/
static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
struct ttm_tt *ttm) struct ttm_tt *ttm)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
...@@ -931,7 +931,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, ...@@ -931,7 +931,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev,
/* /*
* amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
*/ */
static void amdgpu_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev, static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
struct ttm_tt *ttm) struct ttm_tt *ttm)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
...@@ -1015,7 +1015,7 @@ static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, ...@@ -1015,7 +1015,7 @@ static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
* Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem(). * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
* This handles binding GTT memory to the device address space. * This handles binding GTT memory to the device address space.
*/ */
static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev, static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_tt *ttm,
struct ttm_resource *bo_mem) struct ttm_resource *bo_mem)
{ {
...@@ -1155,7 +1155,7 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) ...@@ -1155,7 +1155,7 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
* Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
* ttm_tt_destroy(). * ttm_tt_destroy().
*/ */
static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev, static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
struct ttm_tt *ttm) struct ttm_tt *ttm)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
...@@ -1180,7 +1180,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev, ...@@ -1180,7 +1180,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
gtt->bound = false; gtt->bound = false;
} }
static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev, static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
struct ttm_tt *ttm) struct ttm_tt *ttm)
{ {
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
...@@ -1234,7 +1234,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, ...@@ -1234,7 +1234,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
* Map the pages of a ttm_tt object to an address space visible * Map the pages of a ttm_tt object to an address space visible
* to the underlying device. * to the underlying device.
*/ */
static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev, static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_tt *ttm,
struct ttm_operation_ctx *ctx) struct ttm_operation_ctx *ctx)
{ {
...@@ -1278,7 +1278,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev, ...@@ -1278,7 +1278,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
* Unmaps pages of a ttm_tt object from the device address space and * Unmaps pages of a ttm_tt object from the device address space and
* unpopulates the page array backing it. * unpopulates the page array backing it.
*/ */
static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev, static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
struct ttm_tt *ttm) struct ttm_tt *ttm)
{ {
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
...@@ -1603,7 +1603,7 @@ amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo) ...@@ -1603,7 +1603,7 @@ amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
amdgpu_bo_move_notify(bo, false, NULL); amdgpu_bo_move_notify(bo, false, NULL);
} }
static struct ttm_bo_driver amdgpu_bo_driver = { static struct ttm_device_funcs amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create, .ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate, .ttm_tt_populate = &amdgpu_ttm_tt_populate,
.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
...@@ -1785,7 +1785,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) ...@@ -1785,7 +1785,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
mutex_init(&adev->mman.gtt_window_lock); mutex_init(&adev->mman.gtt_window_lock);
/* No others user of address space so set it to 0 */ /* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev, r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
adev_to_drm(adev)->anon_inode->i_mapping, adev_to_drm(adev)->anon_inode->i_mapping,
adev_to_drm(adev)->vma_offset_manager, adev_to_drm(adev)->vma_offset_manager,
adev->need_swiotlb, adev->need_swiotlb,
...@@ -1926,7 +1926,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) ...@@ -1926,7 +1926,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS); ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS); ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA); ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
ttm_bo_device_release(&adev->mman.bdev); ttm_device_fini(&adev->mman.bdev);
adev->mman.initialized = false; adev->mman.initialized = false;
DRM_INFO("amdgpu: ttm finalized\n"); DRM_INFO("amdgpu: ttm finalized\n");
} }
......
...@@ -60,7 +60,7 @@ struct amdgpu_gtt_mgr { ...@@ -60,7 +60,7 @@ struct amdgpu_gtt_mgr {
}; };
struct amdgpu_mman { struct amdgpu_mman {
struct ttm_bo_device bdev; struct ttm_device bdev;
bool initialized; bool initialized;
void __iomem *aper_base_kaddr; void __iomem *aper_base_kaddr;
......
...@@ -638,15 +638,15 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, ...@@ -638,15 +638,15 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm_bo_base *bo_base; struct amdgpu_vm_bo_base *bo_base;
if (vm->bulk_moveable) { if (vm->bulk_moveable) {
spin_lock(&ttm_bo_glob.lru_lock); spin_lock(&ttm_glob.lru_lock);
ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move); ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
spin_unlock(&ttm_bo_glob.lru_lock); spin_unlock(&ttm_glob.lru_lock);
return; return;
} }
memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move)); memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
spin_lock(&ttm_bo_glob.lru_lock); spin_lock(&ttm_glob.lru_lock);
list_for_each_entry(bo_base, &vm->idle, vm_status) { list_for_each_entry(bo_base, &vm->idle, vm_status) {
struct amdgpu_bo *bo = bo_base->bo; struct amdgpu_bo *bo = bo_base->bo;
...@@ -660,7 +660,7 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, ...@@ -660,7 +660,7 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
&bo->shadow->tbo.mem, &bo->shadow->tbo.mem,
&vm->lru_bulk_move); &vm->lru_bulk_move);
} }
spin_unlock(&ttm_bo_glob.lru_lock); spin_unlock(&ttm_glob.lru_lock);
vm->bulk_moveable = true; vm->bulk_moveable = true;
} }
......
...@@ -187,7 +187,7 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev, ...@@ -187,7 +187,7 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
struct drm_gem_vram_object *gbo; struct drm_gem_vram_object *gbo;
struct drm_gem_object *gem; struct drm_gem_object *gem;
struct drm_vram_mm *vmm = dev->vram_mm; struct drm_vram_mm *vmm = dev->vram_mm;
struct ttm_bo_device *bdev; struct ttm_device *bdev;
int ret; int ret;
size_t acc_size; size_t acc_size;
...@@ -551,7 +551,7 @@ int drm_gem_vram_fill_create_dumb(struct drm_file *file, ...@@ -551,7 +551,7 @@ int drm_gem_vram_fill_create_dumb(struct drm_file *file,
EXPORT_SYMBOL(drm_gem_vram_fill_create_dumb); EXPORT_SYMBOL(drm_gem_vram_fill_create_dumb);
/* /*
* Helpers for struct ttm_bo_driver * Helpers for struct ttm_device_funcs
*/ */
static bool drm_is_gem_vram(struct ttm_buffer_object *bo) static bool drm_is_gem_vram(struct ttm_buffer_object *bo)
...@@ -893,7 +893,7 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = { ...@@ -893,7 +893,7 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
* TTM TT * TTM TT
*/ */
static void bo_driver_ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *tt) static void bo_driver_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *tt)
{ {
ttm_tt_destroy_common(bdev, tt); ttm_tt_destroy_common(bdev, tt);
ttm_tt_fini(tt); ttm_tt_fini(tt);
...@@ -965,7 +965,7 @@ static int bo_driver_move(struct ttm_buffer_object *bo, ...@@ -965,7 +965,7 @@ static int bo_driver_move(struct ttm_buffer_object *bo,
return drm_gem_vram_bo_driver_move(gbo, evict, ctx, new_mem); return drm_gem_vram_bo_driver_move(gbo, evict, ctx, new_mem);
} }
static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev, static int bo_driver_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem) struct ttm_resource *mem)
{ {
struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev); struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev);
...@@ -985,7 +985,7 @@ static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev, ...@@ -985,7 +985,7 @@ static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
return 0; return 0;
} }
static struct ttm_bo_driver bo_driver = { static struct ttm_device_funcs bo_driver = {
.ttm_tt_create = bo_driver_ttm_tt_create, .ttm_tt_create = bo_driver_ttm_tt_create,
.ttm_tt_destroy = bo_driver_ttm_tt_destroy, .ttm_tt_destroy = bo_driver_ttm_tt_destroy,
.eviction_valuable = ttm_bo_eviction_valuable, .eviction_valuable = ttm_bo_eviction_valuable,
...@@ -1036,7 +1036,7 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev, ...@@ -1036,7 +1036,7 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
vmm->vram_base = vram_base; vmm->vram_base = vram_base;
vmm->vram_size = vram_size; vmm->vram_size = vram_size;
ret = ttm_bo_device_init(&vmm->bdev, &bo_driver, dev->dev, ret = ttm_device_init(&vmm->bdev, &bo_driver, dev->dev,
dev->anon_inode->i_mapping, dev->anon_inode->i_mapping,
dev->vma_offset_manager, dev->vma_offset_manager,
false, true); false, true);
...@@ -1054,7 +1054,7 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev, ...@@ -1054,7 +1054,7 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm) static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm)
{ {
ttm_range_man_fini(&vmm->bdev, TTM_PL_VRAM); ttm_range_man_fini(&vmm->bdev, TTM_PL_VRAM);
ttm_bo_device_release(&vmm->bdev); ttm_device_fini(&vmm->bdev);
} }
/* /*
......
...@@ -43,9 +43,9 @@ ...@@ -43,9 +43,9 @@
#include <nvif/if500b.h> #include <nvif/if500b.h>
#include <nvif/if900b.h> #include <nvif/if900b.h>
static int nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, static int nouveau_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm,
struct ttm_resource *reg); struct ttm_resource *reg);
static void nouveau_ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm); static void nouveau_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm);
/* /*
* NV10-NV40 tiling helpers * NV10-NV40 tiling helpers
...@@ -674,7 +674,7 @@ nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) ...@@ -674,7 +674,7 @@ nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags)
} }
static int static int
nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, nouveau_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm,
struct ttm_resource *reg) struct ttm_resource *reg)
{ {
#if IS_ENABLED(CONFIG_AGP) #if IS_ENABLED(CONFIG_AGP)
...@@ -690,7 +690,7 @@ nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, ...@@ -690,7 +690,7 @@ nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm,
} }
static void static void
nouveau_ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm) nouveau_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm)
{ {
#if IS_ENABLED(CONFIG_AGP) #if IS_ENABLED(CONFIG_AGP)
struct nouveau_drm *drm = nouveau_bdev(bdev); struct nouveau_drm *drm = nouveau_bdev(bdev);
...@@ -1055,7 +1055,7 @@ nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm, ...@@ -1055,7 +1055,7 @@ nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm,
} }
static int static int
nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg) nouveau_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *reg)
{ {
struct nouveau_drm *drm = nouveau_bdev(bdev); struct nouveau_drm *drm = nouveau_bdev(bdev);
struct nvkm_device *device = nvxx_device(&drm->client.device); struct nvkm_device *device = nvxx_device(&drm->client.device);
...@@ -1163,7 +1163,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg) ...@@ -1163,7 +1163,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg)
} }
static void static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg) nouveau_ttm_io_mem_free(struct ttm_device *bdev, struct ttm_resource *reg)
{ {
struct nouveau_drm *drm = nouveau_bdev(bdev); struct nouveau_drm *drm = nouveau_bdev(bdev);
...@@ -1223,7 +1223,7 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) ...@@ -1223,7 +1223,7 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
} }
static int static int
nouveau_ttm_tt_populate(struct ttm_bo_device *bdev, nouveau_ttm_tt_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{ {
struct ttm_tt *ttm_dma = (void *)ttm; struct ttm_tt *ttm_dma = (void *)ttm;
...@@ -1247,7 +1247,7 @@ nouveau_ttm_tt_populate(struct ttm_bo_device *bdev, ...@@ -1247,7 +1247,7 @@ nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
} }
static void static void
nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev, nouveau_ttm_tt_unpopulate(struct ttm_device *bdev,
struct ttm_tt *ttm) struct ttm_tt *ttm)
{ {
struct nouveau_drm *drm; struct nouveau_drm *drm;
...@@ -1264,7 +1264,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev, ...@@ -1264,7 +1264,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
} }
static void static void
nouveau_ttm_tt_destroy(struct ttm_bo_device *bdev, nouveau_ttm_tt_destroy(struct ttm_device *bdev,
struct ttm_tt *ttm) struct ttm_tt *ttm)
{ {
#if IS_ENABLED(CONFIG_AGP) #if IS_ENABLED(CONFIG_AGP)
...@@ -1296,7 +1296,7 @@ nouveau_bo_delete_mem_notify(struct ttm_buffer_object *bo) ...@@ -1296,7 +1296,7 @@ nouveau_bo_delete_mem_notify(struct ttm_buffer_object *bo)
nouveau_bo_move_ntfy(bo, false, NULL); nouveau_bo_move_ntfy(bo, false, NULL);
} }
struct ttm_bo_driver nouveau_bo_driver = { struct ttm_device_funcs nouveau_bo_driver = {
.ttm_tt_create = &nouveau_ttm_tt_create, .ttm_tt_create = &nouveau_ttm_tt_create,
.ttm_tt_populate = &nouveau_ttm_tt_populate, .ttm_tt_populate = &nouveau_ttm_tt_populate,
.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate, .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
......
...@@ -68,7 +68,7 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo) ...@@ -68,7 +68,7 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
return 0; return 0;
} }
extern struct ttm_bo_driver nouveau_bo_driver; extern struct ttm_device_funcs nouveau_bo_driver;
void nouveau_bo_move_init(struct nouveau_drm *); void nouveau_bo_move_init(struct nouveau_drm *);
struct nouveau_bo *nouveau_bo_alloc(struct nouveau_cli *, u64 *size, int *align, struct nouveau_bo *nouveau_bo_alloc(struct nouveau_cli *, u64 *size, int *align,
......
...@@ -151,7 +151,7 @@ struct nouveau_drm { ...@@ -151,7 +151,7 @@ struct nouveau_drm {
/* TTM interface support */ /* TTM interface support */
struct { struct {
struct ttm_bo_device bdev; struct ttm_device bdev;
atomic_t validate_sequence; atomic_t validate_sequence;
int (*move)(struct nouveau_channel *, int (*move)(struct nouveau_channel *,
struct ttm_buffer_object *, struct ttm_buffer_object *,
......
...@@ -16,7 +16,7 @@ struct nouveau_sgdma_be { ...@@ -16,7 +16,7 @@ struct nouveau_sgdma_be {
}; };
void void
nouveau_sgdma_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) nouveau_sgdma_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{ {
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
...@@ -29,7 +29,7 @@ nouveau_sgdma_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) ...@@ -29,7 +29,7 @@ nouveau_sgdma_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
} }
int int
nouveau_sgdma_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg) nouveau_sgdma_bind(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg)
{ {
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct nouveau_drm *drm = nouveau_bdev(bdev); struct nouveau_drm *drm = nouveau_bdev(bdev);
...@@ -56,7 +56,7 @@ nouveau_sgdma_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_re ...@@ -56,7 +56,7 @@ nouveau_sgdma_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_re
} }
void void
nouveau_sgdma_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm) nouveau_sgdma_unbind(struct ttm_device *bdev, struct ttm_tt *ttm)
{ {
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
if (nvbe->mem) { if (nvbe->mem) {
......
...@@ -324,10 +324,10 @@ nouveau_ttm_init(struct nouveau_drm *drm) ...@@ -324,10 +324,10 @@ nouveau_ttm_init(struct nouveau_drm *drm)
need_swiotlb = !!swiotlb_nr_tbl(); need_swiotlb = !!swiotlb_nr_tbl();
#endif #endif
ret = ttm_bo_device_init(&drm->ttm.bdev, &nouveau_bo_driver, ret = ttm_device_init(&drm->ttm.bdev, &nouveau_bo_driver, drm->dev->dev,
drm->dev->dev, dev->anon_inode->i_mapping, dev->anon_inode->i_mapping,
dev->vma_offset_manager, need_swiotlb, dev->vma_offset_manager, need_swiotlb,
drm->client.mmu.dmabits <= 32); drm->client.mmu.dmabits <= 32);
if (ret) { if (ret) {
NV_ERROR(drm, "error initialising bo driver, %d\n", ret); NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
return ret; return ret;
...@@ -377,7 +377,7 @@ nouveau_ttm_fini(struct nouveau_drm *drm) ...@@ -377,7 +377,7 @@ nouveau_ttm_fini(struct nouveau_drm *drm)
nouveau_ttm_fini_vram(drm); nouveau_ttm_fini_vram(drm);
nouveau_ttm_fini_gtt(drm); nouveau_ttm_fini_gtt(drm);
ttm_bo_device_release(&drm->ttm.bdev); ttm_device_fini(&drm->ttm.bdev);
arch_phys_wc_del(drm->ttm.mtrr); arch_phys_wc_del(drm->ttm.mtrr);
drm->ttm.mtrr = 0; drm->ttm.mtrr = 0;
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
#define __NOUVEAU_TTM_H__ #define __NOUVEAU_TTM_H__
static inline struct nouveau_drm * static inline struct nouveau_drm *
nouveau_bdev(struct ttm_bo_device *bd) nouveau_bdev(struct ttm_device *bd)
{ {
return container_of(bd, struct nouveau_drm, ttm.bdev); return container_of(bd, struct nouveau_drm, ttm.bdev);
} }
...@@ -22,7 +22,7 @@ int nouveau_ttm_mmap(struct file *, struct vm_area_struct *); ...@@ -22,7 +22,7 @@ int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
int nouveau_ttm_global_init(struct nouveau_drm *); int nouveau_ttm_global_init(struct nouveau_drm *);
void nouveau_ttm_global_release(struct nouveau_drm *); void nouveau_ttm_global_release(struct nouveau_drm *);
int nouveau_sgdma_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg); int nouveau_sgdma_bind(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg);
void nouveau_sgdma_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm); void nouveau_sgdma_unbind(struct ttm_device *bdev, struct ttm_tt *ttm);
void nouveau_sgdma_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm); void nouveau_sgdma_destroy(struct ttm_device *bdev, struct ttm_tt *ttm);
#endif #endif
...@@ -125,7 +125,7 @@ struct qxl_output { ...@@ -125,7 +125,7 @@ struct qxl_output {
#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc) #define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc)
struct qxl_mman { struct qxl_mman {
struct ttm_bo_device bdev; struct ttm_device bdev;
}; };
struct qxl_memslot { struct qxl_memslot {
...@@ -335,7 +335,7 @@ int qxl_mode_dumb_mmap(struct drm_file *filp, ...@@ -335,7 +335,7 @@ int qxl_mode_dumb_mmap(struct drm_file *filp,
/* qxl ttm */ /* qxl ttm */
int qxl_ttm_init(struct qxl_device *qdev); int qxl_ttm_init(struct qxl_device *qdev);
void qxl_ttm_fini(struct qxl_device *qdev); void qxl_ttm_fini(struct qxl_device *qdev);
int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, int qxl_ttm_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem); struct ttm_resource *mem);
/* qxl image */ /* qxl image */
......
...@@ -429,7 +429,7 @@ void qxl_release_unmap(struct qxl_device *qdev, ...@@ -429,7 +429,7 @@ void qxl_release_unmap(struct qxl_device *qdev,
void qxl_release_fence_buffer_objects(struct qxl_release *release) void qxl_release_fence_buffer_objects(struct qxl_release *release)
{ {
struct ttm_buffer_object *bo; struct ttm_buffer_object *bo;
struct ttm_bo_device *bdev; struct ttm_device *bdev;
struct ttm_validate_buffer *entry; struct ttm_validate_buffer *entry;
struct qxl_device *qdev; struct qxl_device *qdev;
...@@ -450,7 +450,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) ...@@ -450,7 +450,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
release->id | 0xf0000000, release->base.seqno); release->id | 0xf0000000, release->base.seqno);
trace_dma_fence_emit(&release->base); trace_dma_fence_emit(&release->base);
spin_lock(&ttm_bo_glob.lru_lock); spin_lock(&ttm_glob.lru_lock);
list_for_each_entry(entry, &release->bos, head) { list_for_each_entry(entry, &release->bos, head) {
bo = entry->bo; bo = entry->bo;
...@@ -459,7 +459,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) ...@@ -459,7 +459,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
dma_resv_unlock(bo->base.resv); dma_resv_unlock(bo->base.resv);
} }
spin_unlock(&ttm_bo_glob.lru_lock); spin_unlock(&ttm_glob.lru_lock);
ww_acquire_fini(&release->ticket); ww_acquire_fini(&release->ticket);
} }
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
#include "qxl_drv.h" #include "qxl_drv.h"
#include "qxl_object.h" #include "qxl_object.h"
static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev) static struct qxl_device *qxl_get_qdev(struct ttm_device *bdev)
{ {
struct qxl_mman *mman; struct qxl_mman *mman;
struct qxl_device *qdev; struct qxl_device *qdev;
...@@ -69,7 +69,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo, ...@@ -69,7 +69,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
*placement = qbo->placement; *placement = qbo->placement;
} }
int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, int qxl_ttm_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem) struct ttm_resource *mem)
{ {
struct qxl_device *qdev = qxl_get_qdev(bdev); struct qxl_device *qdev = qxl_get_qdev(bdev);
...@@ -98,8 +98,7 @@ int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, ...@@ -98,8 +98,7 @@ int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
/* /*
* TTM backend functions. * TTM backend functions.
*/ */
static void qxl_ttm_backend_destroy(struct ttm_bo_device *bdev, static void qxl_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
struct ttm_tt *ttm)
{ {
ttm_tt_destroy_common(bdev, ttm); ttm_tt_destroy_common(bdev, ttm);
ttm_tt_fini(ttm); ttm_tt_fini(ttm);
...@@ -170,7 +169,7 @@ static void qxl_bo_delete_mem_notify(struct ttm_buffer_object *bo) ...@@ -170,7 +169,7 @@ static void qxl_bo_delete_mem_notify(struct ttm_buffer_object *bo)
qxl_bo_move_notify(bo, false, NULL); qxl_bo_move_notify(bo, false, NULL);
} }
static struct ttm_bo_driver qxl_bo_driver = { static struct ttm_device_funcs qxl_bo_driver = {
.ttm_tt_create = &qxl_ttm_tt_create, .ttm_tt_create = &qxl_ttm_tt_create,
.ttm_tt_destroy = &qxl_ttm_backend_destroy, .ttm_tt_destroy = &qxl_ttm_backend_destroy,
.eviction_valuable = ttm_bo_eviction_valuable, .eviction_valuable = ttm_bo_eviction_valuable,
...@@ -193,10 +192,10 @@ int qxl_ttm_init(struct qxl_device *qdev) ...@@ -193,10 +192,10 @@ int qxl_ttm_init(struct qxl_device *qdev)
int num_io_pages; /* != rom->num_io_pages, we include surface0 */ int num_io_pages; /* != rom->num_io_pages, we include surface0 */
/* No others user of address space so set it to 0 */ /* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&qdev->mman.bdev, &qxl_bo_driver, NULL, r = ttm_device_init(&qdev->mman.bdev, &qxl_bo_driver, NULL,
qdev->ddev.anon_inode->i_mapping, qdev->ddev.anon_inode->i_mapping,
qdev->ddev.vma_offset_manager, qdev->ddev.vma_offset_manager,
false, false); false, false);
if (r) { if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r); DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r; return r;
...@@ -227,7 +226,7 @@ void qxl_ttm_fini(struct qxl_device *qdev) ...@@ -227,7 +226,7 @@ void qxl_ttm_fini(struct qxl_device *qdev)
{ {
ttm_range_man_fini(&qdev->mman.bdev, TTM_PL_VRAM); ttm_range_man_fini(&qdev->mman.bdev, TTM_PL_VRAM);
ttm_range_man_fini(&qdev->mman.bdev, TTM_PL_PRIV); ttm_range_man_fini(&qdev->mman.bdev, TTM_PL_PRIV);
ttm_bo_device_release(&qdev->mman.bdev); ttm_device_fini(&qdev->mman.bdev);
DRM_INFO("qxl: ttm finalized\n"); DRM_INFO("qxl: ttm finalized\n");
} }
......
...@@ -451,7 +451,7 @@ struct radeon_surface_reg { ...@@ -451,7 +451,7 @@ struct radeon_surface_reg {
* TTM. * TTM.
*/ */
struct radeon_mman { struct radeon_mman {
struct ttm_bo_device bdev; struct ttm_device bdev;
bool initialized; bool initialized;
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
...@@ -2822,7 +2822,7 @@ extern int radeon_ttm_tt_set_userptr(struct radeon_device *rdev, ...@@ -2822,7 +2822,7 @@ extern int radeon_ttm_tt_set_userptr(struct radeon_device *rdev,
uint32_t flags); uint32_t flags);
extern bool radeon_ttm_tt_has_userptr(struct radeon_device *rdev, struct ttm_tt *ttm); extern bool radeon_ttm_tt_has_userptr(struct radeon_device *rdev, struct ttm_tt *ttm);
extern bool radeon_ttm_tt_is_readonly(struct radeon_device *rdev, struct ttm_tt *ttm); extern bool radeon_ttm_tt_is_readonly(struct radeon_device *rdev, struct ttm_tt *ttm);
bool radeon_ttm_tt_is_bound(struct ttm_bo_device *bdev, struct ttm_tt *ttm); bool radeon_ttm_tt_is_bound(struct ttm_device *bdev, struct ttm_tt *ttm);
extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base); extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon); extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
...@@ -2832,7 +2832,7 @@ extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size ...@@ -2832,7 +2832,7 @@ extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size
extern void radeon_program_register_sequence(struct radeon_device *rdev, extern void radeon_program_register_sequence(struct radeon_device *rdev,
const u32 *registers, const u32 *registers,
const u32 array_size); const u32 array_size);
struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev); struct radeon_device *radeon_get_rdev(struct ttm_device *bdev);
/* KMS */ /* KMS */
......
...@@ -372,7 +372,7 @@ void radeon_bo_unpin(struct radeon_bo *bo) ...@@ -372,7 +372,7 @@ void radeon_bo_unpin(struct radeon_bo *bo)
int radeon_bo_evict_vram(struct radeon_device *rdev) int radeon_bo_evict_vram(struct radeon_device *rdev)
{ {
struct ttm_bo_device *bdev = &rdev->mman.bdev; struct ttm_device *bdev = &rdev->mman.bdev;
struct ttm_resource_manager *man; struct ttm_resource_manager *man;
/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
......
...@@ -55,13 +55,11 @@ ...@@ -55,13 +55,11 @@
static int radeon_ttm_debugfs_init(struct radeon_device *rdev); static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
static void radeon_ttm_debugfs_fini(struct radeon_device *rdev); static void radeon_ttm_debugfs_fini(struct radeon_device *rdev);
static int radeon_ttm_tt_bind(struct ttm_bo_device *bdev, static int radeon_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm,
struct ttm_tt *ttm,
struct ttm_resource *bo_mem); struct ttm_resource *bo_mem);
static void radeon_ttm_tt_unbind(struct ttm_bo_device *bdev, static void radeon_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm);
struct ttm_tt *ttm);
struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) struct radeon_device *radeon_get_rdev(struct ttm_device *bdev)
{ {
struct radeon_mman *mman; struct radeon_mman *mman;
struct radeon_device *rdev; struct radeon_device *rdev;
...@@ -280,7 +278,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, ...@@ -280,7 +278,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
return 0; return 0;
} }
static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem) static int radeon_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
{ {
struct radeon_device *rdev = radeon_get_rdev(bdev); struct radeon_device *rdev = radeon_get_rdev(bdev);
size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT; size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
...@@ -347,7 +345,7 @@ struct radeon_ttm_tt { ...@@ -347,7 +345,7 @@ struct radeon_ttm_tt {
}; };
/* prepare the sg table with the user pages */ /* prepare the sg table with the user pages */
static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *ttm) static int radeon_ttm_tt_pin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm)
{ {
struct radeon_device *rdev = radeon_get_rdev(bdev); struct radeon_device *rdev = radeon_get_rdev(bdev);
struct radeon_ttm_tt *gtt = (void *)ttm; struct radeon_ttm_tt *gtt = (void *)ttm;
...@@ -408,7 +406,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt * ...@@ -408,7 +406,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *
return r; return r;
} }
static void radeon_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *ttm) static void radeon_ttm_tt_unpin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm)
{ {
struct radeon_device *rdev = radeon_get_rdev(bdev); struct radeon_device *rdev = radeon_get_rdev(bdev);
struct radeon_ttm_tt *gtt = (void *)ttm; struct radeon_ttm_tt *gtt = (void *)ttm;
...@@ -444,7 +442,7 @@ static bool radeon_ttm_backend_is_bound(struct ttm_tt *ttm) ...@@ -444,7 +442,7 @@ static bool radeon_ttm_backend_is_bound(struct ttm_tt *ttm)
return (gtt->bound); return (gtt->bound);
} }
static int radeon_ttm_backend_bind(struct ttm_bo_device *bdev, static int radeon_ttm_backend_bind(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_tt *ttm,
struct ttm_resource *bo_mem) struct ttm_resource *bo_mem)
{ {
...@@ -480,7 +478,7 @@ static int radeon_ttm_backend_bind(struct ttm_bo_device *bdev, ...@@ -480,7 +478,7 @@ static int radeon_ttm_backend_bind(struct ttm_bo_device *bdev,
return 0; return 0;
} }
static void radeon_ttm_backend_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm) static void radeon_ttm_backend_unbind(struct ttm_device *bdev, struct ttm_tt *ttm)
{ {
struct radeon_ttm_tt *gtt = (void *)ttm; struct radeon_ttm_tt *gtt = (void *)ttm;
struct radeon_device *rdev = radeon_get_rdev(bdev); struct radeon_device *rdev = radeon_get_rdev(bdev);
...@@ -495,7 +493,7 @@ static void radeon_ttm_backend_unbind(struct ttm_bo_device *bdev, struct ttm_tt ...@@ -495,7 +493,7 @@ static void radeon_ttm_backend_unbind(struct ttm_bo_device *bdev, struct ttm_tt
gtt->bound = false; gtt->bound = false;
} }
static void radeon_ttm_backend_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) static void radeon_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{ {
struct radeon_ttm_tt *gtt = (void *)ttm; struct radeon_ttm_tt *gtt = (void *)ttm;
...@@ -554,7 +552,7 @@ static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct radeon_device *rdev, ...@@ -554,7 +552,7 @@ static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct radeon_device *rdev,
return container_of(ttm, struct radeon_ttm_tt, ttm); return container_of(ttm, struct radeon_ttm_tt, ttm);
} }
static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev, static int radeon_ttm_tt_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_tt *ttm,
struct ttm_operation_ctx *ctx) struct ttm_operation_ctx *ctx)
{ {
...@@ -580,7 +578,7 @@ static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev, ...@@ -580,7 +578,7 @@ static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev,
return ttm_pool_alloc(&rdev->mman.bdev.pool, ttm, ctx); return ttm_pool_alloc(&rdev->mman.bdev.pool, ttm, ctx);
} }
static void radeon_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm) static void radeon_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
{ {
struct radeon_device *rdev = radeon_get_rdev(bdev); struct radeon_device *rdev = radeon_get_rdev(bdev);
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm); struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
...@@ -613,7 +611,7 @@ int radeon_ttm_tt_set_userptr(struct radeon_device *rdev, ...@@ -613,7 +611,7 @@ int radeon_ttm_tt_set_userptr(struct radeon_device *rdev,
return 0; return 0;
} }
bool radeon_ttm_tt_is_bound(struct ttm_bo_device *bdev, bool radeon_ttm_tt_is_bound(struct ttm_device *bdev,
struct ttm_tt *ttm) struct ttm_tt *ttm)
{ {
#if IS_ENABLED(CONFIG_AGP) #if IS_ENABLED(CONFIG_AGP)
...@@ -624,7 +622,7 @@ bool radeon_ttm_tt_is_bound(struct ttm_bo_device *bdev, ...@@ -624,7 +622,7 @@ bool radeon_ttm_tt_is_bound(struct ttm_bo_device *bdev,
return radeon_ttm_backend_is_bound(ttm); return radeon_ttm_backend_is_bound(ttm);
} }
static int radeon_ttm_tt_bind(struct ttm_bo_device *bdev, static int radeon_ttm_tt_bind(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_tt *ttm,
struct ttm_resource *bo_mem) struct ttm_resource *bo_mem)
{ {
...@@ -642,7 +640,7 @@ static int radeon_ttm_tt_bind(struct ttm_bo_device *bdev, ...@@ -642,7 +640,7 @@ static int radeon_ttm_tt_bind(struct ttm_bo_device *bdev,
return radeon_ttm_backend_bind(bdev, ttm, bo_mem); return radeon_ttm_backend_bind(bdev, ttm, bo_mem);
} }
static void radeon_ttm_tt_unbind(struct ttm_bo_device *bdev, static void radeon_ttm_tt_unbind(struct ttm_device *bdev,
struct ttm_tt *ttm) struct ttm_tt *ttm)
{ {
#if IS_ENABLED(CONFIG_AGP) #if IS_ENABLED(CONFIG_AGP)
...@@ -656,7 +654,7 @@ static void radeon_ttm_tt_unbind(struct ttm_bo_device *bdev, ...@@ -656,7 +654,7 @@ static void radeon_ttm_tt_unbind(struct ttm_bo_device *bdev,
radeon_ttm_backend_unbind(bdev, ttm); radeon_ttm_backend_unbind(bdev, ttm);
} }
static void radeon_ttm_tt_destroy(struct ttm_bo_device *bdev, static void radeon_ttm_tt_destroy(struct ttm_device *bdev,
struct ttm_tt *ttm) struct ttm_tt *ttm)
{ {
#if IS_ENABLED(CONFIG_AGP) #if IS_ENABLED(CONFIG_AGP)
...@@ -700,7 +698,7 @@ radeon_bo_delete_mem_notify(struct ttm_buffer_object *bo) ...@@ -700,7 +698,7 @@ radeon_bo_delete_mem_notify(struct ttm_buffer_object *bo)
radeon_bo_move_notify(bo, false, NULL); radeon_bo_move_notify(bo, false, NULL);
} }
static struct ttm_bo_driver radeon_bo_driver = { static struct ttm_device_funcs radeon_bo_driver = {
.ttm_tt_create = &radeon_ttm_tt_create, .ttm_tt_create = &radeon_ttm_tt_create,
.ttm_tt_populate = &radeon_ttm_tt_populate, .ttm_tt_populate = &radeon_ttm_tt_populate,
.ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate, .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
...@@ -718,7 +716,7 @@ int radeon_ttm_init(struct radeon_device *rdev) ...@@ -718,7 +716,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
int r; int r;
/* No others user of address space so set it to 0 */ /* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->dev, r = ttm_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->dev,
rdev->ddev->anon_inode->i_mapping, rdev->ddev->anon_inode->i_mapping,
rdev->ddev->vma_offset_manager, rdev->ddev->vma_offset_manager,
rdev->need_swiotlb, rdev->need_swiotlb,
...@@ -791,7 +789,7 @@ void radeon_ttm_fini(struct radeon_device *rdev) ...@@ -791,7 +789,7 @@ void radeon_ttm_fini(struct radeon_device *rdev)
} }
ttm_range_man_fini(&rdev->mman.bdev, TTM_PL_VRAM); ttm_range_man_fini(&rdev->mman.bdev, TTM_PL_VRAM);
ttm_range_man_fini(&rdev->mman.bdev, TTM_PL_TT); ttm_range_man_fini(&rdev->mman.bdev, TTM_PL_TT);
ttm_bo_device_release(&rdev->mman.bdev); ttm_device_fini(&rdev->mman.bdev);
radeon_gart_fini(rdev); radeon_gart_fini(rdev);
rdev->mman.initialized = false; rdev->mman.initialized = false;
DRM_INFO("radeon: ttm finalized\n"); DRM_INFO("radeon: ttm finalized\n");
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \ ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
ttm_execbuf_util.o ttm_range_manager.o \ ttm_execbuf_util.o ttm_range_manager.o \
ttm_resource.o ttm_pool.o ttm_resource.o ttm_pool.o ttm_device.o
ttm-$(CONFIG_AGP) += ttm_agp_backend.o ttm-$(CONFIG_AGP) += ttm_agp_backend.o
obj-$(CONFIG_DRM_TTM) += ttm.o obj-$(CONFIG_DRM_TTM) += ttm.o
...@@ -49,7 +49,7 @@ struct ttm_agp_backend { ...@@ -49,7 +49,7 @@ struct ttm_agp_backend {
int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem) int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem)
{ {
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
struct page *dummy_read_page = ttm_bo_glob.dummy_read_page; struct page *dummy_read_page = ttm_glob.dummy_read_page;
struct drm_mm_node *node = bo_mem->mm_node; struct drm_mm_node *node = bo_mem->mm_node;
struct agp_memory *mem; struct agp_memory *mem;
int ret, cached = ttm->caching == ttm_cached; int ret, cached = ttm->caching == ttm_cached;
......
This diff is collapsed.
...@@ -46,33 +46,33 @@ struct ttm_transfer_obj { ...@@ -46,33 +46,33 @@ struct ttm_transfer_obj {
struct ttm_buffer_object *bo; struct ttm_buffer_object *bo;
}; };
int ttm_mem_io_reserve(struct ttm_bo_device *bdev, int ttm_mem_io_reserve(struct ttm_device *bdev,
struct ttm_resource *mem) struct ttm_resource *mem)
{ {
if (mem->bus.offset || mem->bus.addr) if (mem->bus.offset || mem->bus.addr)
return 0; return 0;
mem->bus.is_iomem = false; mem->bus.is_iomem = false;
if (!bdev->driver->io_mem_reserve) if (!bdev->funcs->io_mem_reserve)
return 0; return 0;
return bdev->driver->io_mem_reserve(bdev, mem); return bdev->funcs->io_mem_reserve(bdev, mem);
} }
void ttm_mem_io_free(struct ttm_bo_device *bdev, void ttm_mem_io_free(struct ttm_device *bdev,
struct ttm_resource *mem) struct ttm_resource *mem)
{ {
if (!mem->bus.offset && !mem->bus.addr) if (!mem->bus.offset && !mem->bus.addr)
return; return;
if (bdev->driver->io_mem_free) if (bdev->funcs->io_mem_free)
bdev->driver->io_mem_free(bdev, mem); bdev->funcs->io_mem_free(bdev, mem);
mem->bus.offset = 0; mem->bus.offset = 0;
mem->bus.addr = NULL; mem->bus.addr = NULL;
} }
static int ttm_resource_ioremap(struct ttm_bo_device *bdev, static int ttm_resource_ioremap(struct ttm_device *bdev,
struct ttm_resource *mem, struct ttm_resource *mem,
void **virtual) void **virtual)
{ {
...@@ -102,7 +102,7 @@ static int ttm_resource_ioremap(struct ttm_bo_device *bdev, ...@@ -102,7 +102,7 @@ static int ttm_resource_ioremap(struct ttm_bo_device *bdev,
return 0; return 0;
} }
static void ttm_resource_iounmap(struct ttm_bo_device *bdev, static void ttm_resource_iounmap(struct ttm_device *bdev,
struct ttm_resource *mem, struct ttm_resource *mem,
void *virtual) void *virtual)
{ {
...@@ -172,7 +172,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, ...@@ -172,7 +172,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx, struct ttm_operation_ctx *ctx,
struct ttm_resource *new_mem) struct ttm_resource *new_mem)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_device *bdev = bo->bdev;
struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
struct ttm_tt *ttm = bo->ttm; struct ttm_tt *ttm = bo->ttm;
struct ttm_resource *old_mem = &bo->mem; struct ttm_resource *old_mem = &bo->mem;
...@@ -300,7 +300,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, ...@@ -300,7 +300,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
* TODO: Explicit member copy would probably be better here. * TODO: Explicit member copy would probably be better here.
*/ */
atomic_inc(&ttm_bo_glob.bo_count); atomic_inc(&ttm_glob.bo_count);
INIT_LIST_HEAD(&fbo->base.ddestroy); INIT_LIST_HEAD(&fbo->base.ddestroy);
INIT_LIST_HEAD(&fbo->base.lru); INIT_LIST_HEAD(&fbo->base.lru);
INIT_LIST_HEAD(&fbo->base.swap); INIT_LIST_HEAD(&fbo->base.swap);
...@@ -602,7 +602,7 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo, ...@@ -602,7 +602,7 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo, static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
struct dma_fence *fence) struct dma_fence *fence)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_device *bdev = bo->bdev;
struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type); struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
/** /**
...@@ -628,7 +628,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, ...@@ -628,7 +628,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
bool pipeline, bool pipeline,
struct ttm_resource *new_mem) struct ttm_resource *new_mem)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_device *bdev = bo->bdev;
struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type); struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
int ret = 0; int ret = 0;
......
...@@ -95,10 +95,10 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, ...@@ -95,10 +95,10 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo, static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
unsigned long page_offset) unsigned long page_offset)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_device *bdev = bo->bdev;
if (bdev->driver->io_mem_pfn) if (bdev->funcs->io_mem_pfn)
return bdev->driver->io_mem_pfn(bo, page_offset); return bdev->funcs->io_mem_pfn(bo, page_offset);
return (bo->mem.bus.offset >> PAGE_SHIFT) + page_offset; return (bo->mem.bus.offset >> PAGE_SHIFT) + page_offset;
} }
...@@ -216,7 +216,7 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, ...@@ -216,7 +216,7 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
if (page_to_pfn(ttm->pages[page_offset + i]) != pfn + i) if (page_to_pfn(ttm->pages[page_offset + i]) != pfn + i)
goto out_fallback; goto out_fallback;
} }
} else if (bo->bdev->driver->io_mem_pfn) { } else if (bo->bdev->funcs->io_mem_pfn) {
for (i = 1; i < fault_page_size; ++i) { for (i = 1; i < fault_page_size; ++i) {
if (ttm_bo_io_mem_pfn(bo, page_offset + i) != pfn + i) if (ttm_bo_io_mem_pfn(bo, page_offset + i) != pfn + i)
goto out_fallback; goto out_fallback;
...@@ -278,7 +278,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, ...@@ -278,7 +278,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
{ {
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
struct ttm_buffer_object *bo = vma->vm_private_data; struct ttm_buffer_object *bo = vma->vm_private_data;
struct ttm_bo_device *bdev = bo->bdev; struct ttm_device *bdev = bo->bdev;
unsigned long page_offset; unsigned long page_offset;
unsigned long page_last; unsigned long page_last;
unsigned long pfn; unsigned long pfn;
...@@ -488,8 +488,8 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, ...@@ -488,8 +488,8 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write); ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
break; break;
default: default:
if (bo->bdev->driver->access_memory) if (bo->bdev->funcs->access_memory)
ret = bo->bdev->driver->access_memory( ret = bo->bdev->funcs->access_memory(
bo, offset, buf, len, write); bo, offset, buf, len, write);
else else
ret = -EIO; ret = -EIO;
...@@ -508,7 +508,7 @@ static const struct vm_operations_struct ttm_bo_vm_ops = { ...@@ -508,7 +508,7 @@ static const struct vm_operations_struct ttm_bo_vm_ops = {
.access = ttm_bo_vm_access, .access = ttm_bo_vm_access,
}; };
static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev, static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_device *bdev,
unsigned long offset, unsigned long offset,
unsigned long pages) unsigned long pages)
{ {
...@@ -555,9 +555,8 @@ static void ttm_bo_mmap_vma_setup(struct ttm_buffer_object *bo, struct vm_area_s ...@@ -555,9 +555,8 @@ static void ttm_bo_mmap_vma_setup(struct ttm_buffer_object *bo, struct vm_area_s
} }
int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_bo_device *bdev) struct ttm_device *bdev)
{ {
struct ttm_bo_driver *driver;
struct ttm_buffer_object *bo; struct ttm_buffer_object *bo;
int ret; int ret;
...@@ -568,12 +567,11 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, ...@@ -568,12 +567,11 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
if (unlikely(!bo)) if (unlikely(!bo))
return -EINVAL; return -EINVAL;
driver = bo->bdev->driver; if (unlikely(!bo->bdev->funcs->verify_access)) {
if (unlikely(!driver->verify_access)) {
ret = -EPERM; ret = -EPERM;
goto out_unref; goto out_unref;
} }
ret = driver->verify_access(bo, filp); ret = bo->bdev->funcs->verify_access(bo, filp);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_unref; goto out_unref;
......
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Christian König
*/
#define pr_fmt(fmt) "[TTM DEVICE] " fmt
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_memory.h>
#include <drm/ttm/ttm_placement.h>
#include "ttm_module.h"
/**
* ttm_global_mutex - protecting the global state
*/
DEFINE_MUTEX(ttm_global_mutex);
unsigned ttm_glob_use_count;
struct ttm_global ttm_glob;
EXPORT_SYMBOL(ttm_glob);
static void ttm_global_release(void)
{
struct ttm_global *glob = &ttm_glob;
mutex_lock(&ttm_global_mutex);
if (--ttm_glob_use_count > 0)
goto out;
kobject_del(&glob->kobj);
kobject_put(&glob->kobj);
ttm_mem_global_release(&ttm_mem_glob);
__free_page(glob->dummy_read_page);
memset(glob, 0, sizeof(*glob));
out:
mutex_unlock(&ttm_global_mutex);
}
static int ttm_global_init(void)
{
struct ttm_global *glob = &ttm_glob;
int ret = 0;
unsigned i;
mutex_lock(&ttm_global_mutex);
if (++ttm_glob_use_count > 1)
goto out;
ret = ttm_mem_global_init(&ttm_mem_glob);
if (ret)
goto out;
spin_lock_init(&glob->lru_lock);
glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
if (unlikely(glob->dummy_read_page == NULL)) {
ret = -ENOMEM;
goto out;
}
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
INIT_LIST_HEAD(&glob->swap_lru[i]);
INIT_LIST_HEAD(&glob->device_list);
atomic_set(&glob->bo_count, 0);
debugfs_create_atomic_t("buffer_objects", 0444, ttm_debugfs_root,
&glob->bo_count);
out:
mutex_unlock(&ttm_global_mutex);
return ret;
}
static void ttm_init_sysman(struct ttm_device *bdev)
{
struct ttm_resource_manager *man = &bdev->sysman;
/*
* Initialize the system memory buffer type.
* Other types need to be driver / IOCTL initialized.
*/
man->use_tt = true;
ttm_resource_manager_init(man, 0);
ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man);
ttm_resource_manager_set_used(man, true);
}
static void ttm_device_delayed_workqueue(struct work_struct *work)
{
struct ttm_device *bdev =
container_of(work, struct ttm_device, wq.work);
if (!ttm_bo_delayed_delete(bdev, false))
schedule_delayed_work(&bdev->wq,
((HZ / 100) < 1) ? 1 : HZ / 100);
}
/**
* ttm_device_init
*
* @bdev: A pointer to a struct ttm_device to initialize.
* @funcs: Function table for the device.
* @dev: The core kernel device pointer for DMA mappings and allocations.
* @mapping: The address space to use for this bo.
* @vma_manager: A pointer to a vma manager.
* @use_dma_alloc: If coherent DMA allocation API should be used.
* @use_dma32: If we should use GFP_DMA32 for device memory allocations.
*
* Initializes a struct ttm_device:
* Returns:
* !0: Failure.
*/
int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
struct device *dev, struct address_space *mapping,
struct drm_vma_offset_manager *vma_manager,
bool use_dma_alloc, bool use_dma32)
{
struct ttm_global *glob = &ttm_glob;
int ret;
if (WARN_ON(vma_manager == NULL))
return -EINVAL;
ret = ttm_global_init();
if (ret)
return ret;
bdev->funcs = funcs;
ttm_init_sysman(bdev);
ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32);
bdev->vma_manager = vma_manager;
INIT_DELAYED_WORK(&bdev->wq, ttm_device_delayed_workqueue);
INIT_LIST_HEAD(&bdev->ddestroy);
bdev->dev_mapping = mapping;
mutex_lock(&ttm_global_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
mutex_unlock(&ttm_global_mutex);
return 0;
}
EXPORT_SYMBOL(ttm_device_init);
void ttm_device_fini(struct ttm_device *bdev)
{
struct ttm_global *glob = &ttm_glob;
struct ttm_resource_manager *man;
unsigned i;
man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
ttm_resource_manager_set_used(man, false);
ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
mutex_lock(&ttm_global_mutex);
list_del(&bdev->device_list);
mutex_unlock(&ttm_global_mutex);
cancel_delayed_work_sync(&bdev->wq);
if (ttm_bo_delayed_delete(bdev, true))
pr_debug("Delayed destroy list was clean\n");
spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
if (list_empty(&man->lru[0]))
pr_debug("Swap list %d was clean\n", i);
spin_unlock(&glob->lru_lock);
ttm_pool_fini(&bdev->pool);
ttm_global_release();
}
EXPORT_SYMBOL(ttm_device_fini);
...@@ -51,14 +51,14 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, ...@@ -51,14 +51,14 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
if (list_empty(list)) if (list_empty(list))
return; return;
spin_lock(&ttm_bo_glob.lru_lock); spin_lock(&ttm_glob.lru_lock);
list_for_each_entry(entry, list, head) { list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo; struct ttm_buffer_object *bo = entry->bo;
ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
dma_resv_unlock(bo->base.resv); dma_resv_unlock(bo->base.resv);
} }
spin_unlock(&ttm_bo_glob.lru_lock); spin_unlock(&ttm_glob.lru_lock);
if (ticket) if (ticket)
ww_acquire_fini(ticket); ww_acquire_fini(ticket);
...@@ -154,7 +154,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, ...@@ -154,7 +154,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
if (list_empty(list)) if (list_empty(list))
return; return;
spin_lock(&ttm_bo_glob.lru_lock); spin_lock(&ttm_glob.lru_lock);
list_for_each_entry(entry, list, head) { list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo; struct ttm_buffer_object *bo = entry->bo;
...@@ -165,7 +165,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, ...@@ -165,7 +165,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
dma_resv_unlock(bo->base.resv); dma_resv_unlock(bo->base.resv);
} }
spin_unlock(&ttm_bo_glob.lru_lock); spin_unlock(&ttm_glob.lru_lock);
if (ticket) if (ticket)
ww_acquire_fini(ticket); ww_acquire_fini(ticket);
} }
......
...@@ -111,7 +111,7 @@ static void ttm_range_man_free(struct ttm_resource_manager *man, ...@@ -111,7 +111,7 @@ static void ttm_range_man_free(struct ttm_resource_manager *man,
static const struct ttm_resource_manager_func ttm_range_manager_func; static const struct ttm_resource_manager_func ttm_range_manager_func;
int ttm_range_man_init(struct ttm_bo_device *bdev, int ttm_range_man_init(struct ttm_device *bdev,
unsigned type, bool use_tt, unsigned type, bool use_tt,
unsigned long p_size) unsigned long p_size)
{ {
...@@ -138,7 +138,7 @@ int ttm_range_man_init(struct ttm_bo_device *bdev, ...@@ -138,7 +138,7 @@ int ttm_range_man_init(struct ttm_bo_device *bdev,
} }
EXPORT_SYMBOL(ttm_range_man_init); EXPORT_SYMBOL(ttm_range_man_init);
int ttm_range_man_fini(struct ttm_bo_device *bdev, int ttm_range_man_fini(struct ttm_device *bdev,
unsigned type) unsigned type)
{ {
struct ttm_resource_manager *man = ttm_manager_type(bdev, type); struct ttm_resource_manager *man = ttm_manager_type(bdev, type);
......
...@@ -83,7 +83,7 @@ EXPORT_SYMBOL(ttm_resource_manager_init); ...@@ -83,7 +83,7 @@ EXPORT_SYMBOL(ttm_resource_manager_init);
* Evict all the objects out of a memory manager until it is empty. * Evict all the objects out of a memory manager until it is empty.
* Part of memory manager cleanup sequence. * Part of memory manager cleanup sequence.
*/ */
int ttm_resource_manager_evict_all(struct ttm_bo_device *bdev, int ttm_resource_manager_evict_all(struct ttm_device *bdev,
struct ttm_resource_manager *man) struct ttm_resource_manager *man)
{ {
struct ttm_operation_ctx ctx = { struct ttm_operation_ctx ctx = {
...@@ -91,7 +91,7 @@ int ttm_resource_manager_evict_all(struct ttm_bo_device *bdev, ...@@ -91,7 +91,7 @@ int ttm_resource_manager_evict_all(struct ttm_bo_device *bdev,
.no_wait_gpu = false, .no_wait_gpu = false,
.force_alloc = true .force_alloc = true
}; };
struct ttm_bo_global *glob = &ttm_bo_glob; struct ttm_global *glob = &ttm_glob;
struct dma_fence *fence; struct dma_fence *fence;
int ret; int ret;
unsigned i; unsigned i;
......
...@@ -43,7 +43,7 @@ ...@@ -43,7 +43,7 @@
*/ */
int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc) int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_device *bdev = bo->bdev;
uint32_t page_flags = 0; uint32_t page_flags = 0;
dma_resv_assert_held(bo->base.resv); dma_resv_assert_held(bo->base.resv);
...@@ -66,7 +66,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc) ...@@ -66,7 +66,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
return -EINVAL; return -EINVAL;
} }
bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags); bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
if (unlikely(bo->ttm == NULL)) if (unlikely(bo->ttm == NULL))
return -ENOMEM; return -ENOMEM;
...@@ -108,7 +108,7 @@ static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm) ...@@ -108,7 +108,7 @@ static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
return 0; return 0;
} }
void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm) void ttm_tt_destroy_common(struct ttm_device *bdev, struct ttm_tt *ttm)
{ {
ttm_tt_unpopulate(bdev, ttm); ttm_tt_unpopulate(bdev, ttm);
...@@ -119,9 +119,9 @@ void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm) ...@@ -119,9 +119,9 @@ void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
} }
EXPORT_SYMBOL(ttm_tt_destroy_common); EXPORT_SYMBOL(ttm_tt_destroy_common);
void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{ {
bdev->driver->ttm_tt_destroy(bdev, ttm); bdev->funcs->ttm_tt_destroy(bdev, ttm);
} }
static void ttm_tt_init_fields(struct ttm_tt *ttm, static void ttm_tt_init_fields(struct ttm_tt *ttm,
...@@ -223,7 +223,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm) ...@@ -223,7 +223,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
return ret; return ret;
} }
int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm) int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm)
{ {
struct address_space *swap_space; struct address_space *swap_space;
struct file *swap_storage; struct file *swap_storage;
...@@ -271,7 +271,7 @@ int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm) ...@@ -271,7 +271,7 @@ int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
return ret; return ret;
} }
static void ttm_tt_add_mapping(struct ttm_bo_device *bdev, struct ttm_tt *ttm) static void ttm_tt_add_mapping(struct ttm_device *bdev, struct ttm_tt *ttm)
{ {
pgoff_t i; pgoff_t i;
...@@ -282,7 +282,7 @@ static void ttm_tt_add_mapping(struct ttm_bo_device *bdev, struct ttm_tt *ttm) ...@@ -282,7 +282,7 @@ static void ttm_tt_add_mapping(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
ttm->pages[i]->mapping = bdev->dev_mapping; ttm->pages[i]->mapping = bdev->dev_mapping;
} }
int ttm_tt_populate(struct ttm_bo_device *bdev, int ttm_tt_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{ {
int ret; int ret;
...@@ -293,8 +293,8 @@ int ttm_tt_populate(struct ttm_bo_device *bdev, ...@@ -293,8 +293,8 @@ int ttm_tt_populate(struct ttm_bo_device *bdev,
if (ttm_tt_is_populated(ttm)) if (ttm_tt_is_populated(ttm))
return 0; return 0;
if (bdev->driver->ttm_tt_populate) if (bdev->funcs->ttm_tt_populate)
ret = bdev->driver->ttm_tt_populate(bdev, ttm, ctx); ret = bdev->funcs->ttm_tt_populate(bdev, ttm, ctx);
else else
ret = ttm_pool_alloc(&bdev->pool, ttm, ctx); ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
if (ret) if (ret)
...@@ -328,15 +328,15 @@ static void ttm_tt_clear_mapping(struct ttm_tt *ttm) ...@@ -328,15 +328,15 @@ static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
} }
} }
void ttm_tt_unpopulate(struct ttm_bo_device *bdev, void ttm_tt_unpopulate(struct ttm_device *bdev,
struct ttm_tt *ttm) struct ttm_tt *ttm)
{ {
if (!ttm_tt_is_populated(ttm)) if (!ttm_tt_is_populated(ttm))
return; return;
ttm_tt_clear_mapping(ttm); ttm_tt_clear_mapping(ttm);
if (bdev->driver->ttm_tt_unpopulate) if (bdev->funcs->ttm_tt_unpopulate)
bdev->driver->ttm_tt_unpopulate(bdev, ttm); bdev->funcs->ttm_tt_unpopulate(bdev, ttm);
else else
ttm_pool_free(&bdev->pool, ttm); ttm_pool_free(&bdev->pool, ttm);
ttm->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED; ttm->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED;
......
...@@ -466,13 +466,13 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst, ...@@ -466,13 +466,13 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
dma_resv_assert_held(src->base.resv); dma_resv_assert_held(src->base.resv);
if (!ttm_tt_is_populated(dst->ttm)) { if (!ttm_tt_is_populated(dst->ttm)) {
ret = dst->bdev->driver->ttm_tt_populate(dst->bdev, dst->ttm, &ctx); ret = dst->bdev->funcs->ttm_tt_populate(dst->bdev, dst->ttm, &ctx);
if (ret) if (ret)
return ret; return ret;
} }
if (!ttm_tt_is_populated(src->ttm)) { if (!ttm_tt_is_populated(src->ttm)) {
ret = src->bdev->driver->ttm_tt_populate(src->bdev, src->ttm, &ctx); ret = src->bdev->funcs->ttm_tt_populate(src->bdev, src->ttm, &ctx);
if (ret) if (ret)
return ret; return ret;
} }
......
...@@ -545,7 +545,7 @@ int vmw_bo_init(struct vmw_private *dev_priv, ...@@ -545,7 +545,7 @@ int vmw_bo_init(struct vmw_private *dev_priv,
void (*bo_free)(struct ttm_buffer_object *bo)) void (*bo_free)(struct ttm_buffer_object *bo))
{ {
struct ttm_operation_ctx ctx = { interruptible, false }; struct ttm_operation_ctx ctx = { interruptible, false };
struct ttm_bo_device *bdev = &dev_priv->bdev; struct ttm_device *bdev = &dev_priv->bdev;
size_t acc_size; size_t acc_size;
int ret; int ret;
bool user = (bo_free == &vmw_user_bo_destroy); bool user = (bo_free == &vmw_user_bo_destroy);
...@@ -1058,7 +1058,7 @@ int vmw_user_bo_reference(struct ttm_object_file *tfile, ...@@ -1058,7 +1058,7 @@ int vmw_user_bo_reference(struct ttm_object_file *tfile,
void vmw_bo_fence_single(struct ttm_buffer_object *bo, void vmw_bo_fence_single(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence) struct vmw_fence_obj *fence)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_device *bdev = bo->bdev;
struct vmw_private *dev_priv = struct vmw_private *dev_priv =
container_of(bdev, struct vmw_private, bdev); container_of(bdev, struct vmw_private, bdev);
......
...@@ -884,12 +884,12 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) ...@@ -884,12 +884,12 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
drm_vma_offset_manager_init(&dev_priv->vma_manager, drm_vma_offset_manager_init(&dev_priv->vma_manager,
DRM_FILE_PAGE_OFFSET_START, DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE); DRM_FILE_PAGE_OFFSET_SIZE);
ret = ttm_bo_device_init(&dev_priv->bdev, &vmw_bo_driver, ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver,
dev_priv->drm.dev, dev_priv->drm.dev,
dev_priv->drm.anon_inode->i_mapping, dev_priv->drm.anon_inode->i_mapping,
&dev_priv->vma_manager, &dev_priv->vma_manager,
dev_priv->map_mode == vmw_dma_alloc_coherent, dev_priv->map_mode == vmw_dma_alloc_coherent,
false); false);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Failed initializing TTM buffer object driver.\n"); DRM_ERROR("Failed initializing TTM buffer object driver.\n");
goto out_no_bdev; goto out_no_bdev;
...@@ -1006,7 +1006,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) ...@@ -1006,7 +1006,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR); vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
vmw_vram_manager_fini(dev_priv); vmw_vram_manager_fini(dev_priv);
out_no_vram: out_no_vram:
(void)ttm_bo_device_release(&dev_priv->bdev); ttm_device_fini(&dev_priv->bdev);
out_no_bdev: out_no_bdev:
vmw_fence_manager_takedown(dev_priv->fman); vmw_fence_manager_takedown(dev_priv->fman);
out_no_fman: out_no_fman:
...@@ -1053,7 +1053,7 @@ static void vmw_driver_unload(struct drm_device *dev) ...@@ -1053,7 +1053,7 @@ static void vmw_driver_unload(struct drm_device *dev)
if (dev_priv->has_mob) if (dev_priv->has_mob)
vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB); vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
vmw_vram_manager_fini(dev_priv); vmw_vram_manager_fini(dev_priv);
(void) ttm_bo_device_release(&dev_priv->bdev); ttm_device_fini(&dev_priv->bdev);
drm_vma_offset_manager_destroy(&dev_priv->vma_manager); drm_vma_offset_manager_destroy(&dev_priv->vma_manager);
vmw_release_device_late(dev_priv); vmw_release_device_late(dev_priv);
vmw_fence_manager_takedown(dev_priv->fman); vmw_fence_manager_takedown(dev_priv->fman);
......
...@@ -484,7 +484,7 @@ enum vmw_sm_type { ...@@ -484,7 +484,7 @@ enum vmw_sm_type {
struct vmw_private { struct vmw_private {
struct drm_device drm; struct drm_device drm;
struct ttm_bo_device bdev; struct ttm_device bdev;
struct vmw_fifo_state fifo; struct vmw_fifo_state fifo;
...@@ -999,7 +999,7 @@ extern struct ttm_placement vmw_evictable_placement; ...@@ -999,7 +999,7 @@ extern struct ttm_placement vmw_evictable_placement;
extern struct ttm_placement vmw_srf_placement; extern struct ttm_placement vmw_srf_placement;
extern struct ttm_placement vmw_mob_placement; extern struct ttm_placement vmw_mob_placement;
extern struct ttm_placement vmw_nonfixed_placement; extern struct ttm_placement vmw_nonfixed_placement;
extern struct ttm_bo_driver vmw_bo_driver; extern struct ttm_device_funcs vmw_bo_driver;
extern const struct vmw_sg_table * extern const struct vmw_sg_table *
vmw_bo_sg_table(struct ttm_buffer_object *bo); vmw_bo_sg_table(struct ttm_buffer_object *bo);
extern int vmw_bo_create_and_populate(struct vmw_private *dev_priv, extern int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
......
...@@ -856,7 +856,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo, ...@@ -856,7 +856,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *mem) struct ttm_resource *mem)
{ {
struct vmw_buffer_object *dx_query_mob; struct vmw_buffer_object *dx_query_mob;
struct ttm_bo_device *bdev = bo->bdev; struct ttm_device *bdev = bo->bdev;
struct vmw_private *dev_priv; struct vmw_private *dev_priv;
......
...@@ -483,7 +483,7 @@ const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) ...@@ -483,7 +483,7 @@ const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
} }
static int vmw_ttm_bind(struct ttm_bo_device *bdev, static int vmw_ttm_bind(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_resource *bo_mem) struct ttm_tt *ttm, struct ttm_resource *bo_mem)
{ {
struct vmw_ttm_tt *vmw_be = struct vmw_ttm_tt *vmw_be =
...@@ -527,7 +527,7 @@ static int vmw_ttm_bind(struct ttm_bo_device *bdev, ...@@ -527,7 +527,7 @@ static int vmw_ttm_bind(struct ttm_bo_device *bdev,
return ret; return ret;
} }
static void vmw_ttm_unbind(struct ttm_bo_device *bdev, static void vmw_ttm_unbind(struct ttm_device *bdev,
struct ttm_tt *ttm) struct ttm_tt *ttm)
{ {
struct vmw_ttm_tt *vmw_be = struct vmw_ttm_tt *vmw_be =
...@@ -553,7 +553,7 @@ static void vmw_ttm_unbind(struct ttm_bo_device *bdev, ...@@ -553,7 +553,7 @@ static void vmw_ttm_unbind(struct ttm_bo_device *bdev,
} }
static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{ {
struct vmw_ttm_tt *vmw_be = struct vmw_ttm_tt *vmw_be =
container_of(ttm, struct vmw_ttm_tt, dma_ttm); container_of(ttm, struct vmw_ttm_tt, dma_ttm);
...@@ -573,7 +573,7 @@ static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) ...@@ -573,7 +573,7 @@ static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
} }
static int vmw_ttm_populate(struct ttm_bo_device *bdev, static int vmw_ttm_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{ {
/* TODO: maybe completely drop this ? */ /* TODO: maybe completely drop this ? */
...@@ -583,7 +583,7 @@ static int vmw_ttm_populate(struct ttm_bo_device *bdev, ...@@ -583,7 +583,7 @@ static int vmw_ttm_populate(struct ttm_bo_device *bdev,
return ttm_pool_alloc(&bdev->pool, ttm, ctx); return ttm_pool_alloc(&bdev->pool, ttm, ctx);
} }
static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev, static void vmw_ttm_unpopulate(struct ttm_device *bdev,
struct ttm_tt *ttm) struct ttm_tt *ttm)
{ {
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
...@@ -640,7 +640,7 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) ...@@ -640,7 +640,7 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
return vmw_user_bo_verify_access(bo, tfile); return vmw_user_bo_verify_access(bo, tfile);
} }
static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem) static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
{ {
struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
...@@ -744,7 +744,7 @@ vmw_delete_mem_notify(struct ttm_buffer_object *bo) ...@@ -744,7 +744,7 @@ vmw_delete_mem_notify(struct ttm_buffer_object *bo)
vmw_move_notify(bo, false, NULL); vmw_move_notify(bo, false, NULL);
} }
struct ttm_bo_driver vmw_bo_driver = { struct ttm_device_funcs vmw_bo_driver = {
.ttm_tt_create = &vmw_ttm_tt_create, .ttm_tt_create = &vmw_ttm_tt_create,
.ttm_tt_populate = &vmw_ttm_populate, .ttm_tt_populate = &vmw_ttm_populate,
.ttm_tt_unpopulate = &vmw_ttm_unpopulate, .ttm_tt_unpopulate = &vmw_ttm_unpopulate,
......
...@@ -172,19 +172,19 @@ struct drm_vram_mm { ...@@ -172,19 +172,19 @@ struct drm_vram_mm {
uint64_t vram_base; uint64_t vram_base;
size_t vram_size; size_t vram_size;
struct ttm_bo_device bdev; struct ttm_device bdev;
}; };
/** /**
* drm_vram_mm_of_bdev() - \ * drm_vram_mm_of_bdev() - \
Returns the container of type &struct ttm_bo_device for field bdev. Returns the container of type &struct ttm_device for field bdev.
* @bdev: the TTM BO device * @bdev: the TTM BO device
* *
* Returns: * Returns:
* The containing instance of &struct drm_vram_mm * The containing instance of &struct drm_vram_mm
*/ */
static inline struct drm_vram_mm *drm_vram_mm_of_bdev( static inline struct drm_vram_mm *drm_vram_mm_of_bdev(
struct ttm_bo_device *bdev) struct ttm_device *bdev)
{ {
return container_of(bdev, struct drm_vram_mm, bdev); return container_of(bdev, struct drm_vram_mm, bdev);
} }
......
...@@ -44,9 +44,9 @@ ...@@ -44,9 +44,9 @@
#include "ttm_resource.h" #include "ttm_resource.h"
struct ttm_bo_global; struct ttm_global;
struct ttm_bo_device; struct ttm_device;
struct dma_buf_map; struct dma_buf_map;
...@@ -122,7 +122,7 @@ struct ttm_buffer_object { ...@@ -122,7 +122,7 @@ struct ttm_buffer_object {
* Members constant at init. * Members constant at init.
*/ */
struct ttm_bo_device *bdev; struct ttm_device *bdev;
enum ttm_bo_type type; enum ttm_bo_type type;
void (*destroy) (struct ttm_buffer_object *); void (*destroy) (struct ttm_buffer_object *);
size_t acc_size; size_t acc_size;
...@@ -313,7 +313,7 @@ void ttm_bo_put(struct ttm_buffer_object *bo); ...@@ -313,7 +313,7 @@ void ttm_bo_put(struct ttm_buffer_object *bo);
* @bulk: optional bulk move structure to remember BO positions * @bulk: optional bulk move structure to remember BO positions
* *
* Move this BO to the tail of all lru lists used to lookup and reserve an * Move this BO to the tail of all lru lists used to lookup and reserve an
* object. This function must be called with struct ttm_bo_global::lru_lock * object. This function must be called with struct ttm_global::lru_lock
* held, and is used to make a BO less likely to be considered for eviction. * held, and is used to make a BO less likely to be considered for eviction.
*/ */
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
...@@ -326,7 +326,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, ...@@ -326,7 +326,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
* @bulk: bulk move structure * @bulk: bulk move structure
* *
* Bulk move BOs to the LRU tail, only valid to use when driver makes sure that * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that
* BO order never changes. Should be called with ttm_bo_global::lru_lock held. * BO order never changes. Should be called with ttm_global::lru_lock held.
*/ */
void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk); void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk);
...@@ -337,14 +337,14 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk); ...@@ -337,14 +337,14 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk);
* Returns * Returns
* True if the workqueue was queued at the time * True if the workqueue was queued at the time
*/ */
int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev); int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev);
/** /**
* ttm_bo_unlock_delayed_workqueue * ttm_bo_unlock_delayed_workqueue
* *
* Allows the delayed workqueue to run. * Allows the delayed workqueue to run.
*/ */
void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched); void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched);
/** /**
* ttm_bo_eviction_valuable * ttm_bo_eviction_valuable
...@@ -357,14 +357,14 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched); ...@@ -357,14 +357,14 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched);
bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place); const struct ttm_place *place);
size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, size_t ttm_bo_dma_acc_size(struct ttm_device *bdev,
unsigned long bo_size, unsigned long bo_size,
unsigned struct_size); unsigned struct_size);
/** /**
* ttm_bo_init_reserved * ttm_bo_init_reserved
* *
* @bdev: Pointer to a ttm_bo_device struct. * @bdev: Pointer to a ttm_device struct.
* @bo: Pointer to a ttm_buffer_object to be initialized. * @bo: Pointer to a ttm_buffer_object to be initialized.
* @size: Requested size of buffer object. * @size: Requested size of buffer object.
* @type: Requested type of buffer object. * @type: Requested type of buffer object.
...@@ -396,7 +396,7 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, ...@@ -396,7 +396,7 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
* -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
*/ */
int ttm_bo_init_reserved(struct ttm_bo_device *bdev, int ttm_bo_init_reserved(struct ttm_device *bdev,
struct ttm_buffer_object *bo, struct ttm_buffer_object *bo,
size_t size, enum ttm_bo_type type, size_t size, enum ttm_bo_type type,
struct ttm_placement *placement, struct ttm_placement *placement,
...@@ -409,7 +409,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, ...@@ -409,7 +409,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
/** /**
* ttm_bo_init * ttm_bo_init
* *
* @bdev: Pointer to a ttm_bo_device struct. * @bdev: Pointer to a ttm_device struct.
* @bo: Pointer to a ttm_buffer_object to be initialized. * @bo: Pointer to a ttm_buffer_object to be initialized.
* @size: Requested size of buffer object. * @size: Requested size of buffer object.
* @type: Requested type of buffer object. * @type: Requested type of buffer object.
...@@ -443,7 +443,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, ...@@ -443,7 +443,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
* -EINVAL: Invalid placement flags. * -EINVAL: Invalid placement flags.
* -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources. * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
*/ */
int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo, int ttm_bo_init(struct ttm_device *bdev, struct ttm_buffer_object *bo,
size_t size, enum ttm_bo_type type, size_t size, enum ttm_bo_type type,
struct ttm_placement *placement, struct ttm_placement *placement,
uint32_t page_alignment, bool interrubtible, size_t acc_size, uint32_t page_alignment, bool interrubtible, size_t acc_size,
...@@ -537,18 +537,18 @@ int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo); ...@@ -537,18 +537,18 @@ int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
* *
* @filp: filp as input from the mmap method. * @filp: filp as input from the mmap method.
* @vma: vma as input from the mmap method. * @vma: vma as input from the mmap method.
* @bdev: Pointer to the ttm_bo_device with the address space manager. * @bdev: Pointer to the ttm_device with the address space manager.
* *
* This function is intended to be called by the device mmap method. * This function is intended to be called by the device mmap method.
* if the device address space is to be backed by the bo manager. * if the device address space is to be backed by the bo manager.
*/ */
int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_bo_device *bdev); struct ttm_device *bdev);
/** /**
* ttm_bo_io * ttm_bo_io
* *
* @bdev: Pointer to the struct ttm_bo_device. * @bdev: Pointer to the struct ttm_device.
* @filp: Pointer to the struct file attempting to read / write. * @filp: Pointer to the struct file attempting to read / write.
* @wbuf: User-space pointer to address of buffer to write. NULL on read. * @wbuf: User-space pointer to address of buffer to write. NULL on read.
* @rbuf: User-space pointer to address of buffer to read into. * @rbuf: User-space pointer to address of buffer to read into.
...@@ -565,7 +565,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, ...@@ -565,7 +565,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
* the function may return -ERESTARTSYS if * the function may return -ERESTARTSYS if
* interrupted by a signal. * interrupted by a signal.
*/ */
ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, ssize_t ttm_bo_io(struct ttm_device *bdev, struct file *filp,
const char __user *wbuf, char __user *rbuf, const char __user *wbuf, char __user *rbuf,
size_t count, loff_t *f_pos, bool write); size_t count, loff_t *f_pos, bool write);
...@@ -617,7 +617,7 @@ static inline void ttm_bo_unpin(struct ttm_buffer_object *bo) ...@@ -617,7 +617,7 @@ static inline void ttm_bo_unpin(struct ttm_buffer_object *bo)
--bo->pin_count; --bo->pin_count;
} }
int ttm_mem_evict_first(struct ttm_bo_device *bdev, int ttm_mem_evict_first(struct ttm_device *bdev,
struct ttm_resource_manager *man, struct ttm_resource_manager *man,
const struct ttm_place *place, const struct ttm_place *place,
struct ttm_operation_ctx *ctx, struct ttm_operation_ctx *ctx,
...@@ -642,5 +642,6 @@ void ttm_bo_vm_close(struct vm_area_struct *vma); ...@@ -642,5 +642,6 @@ void ttm_bo_vm_close(struct vm_area_struct *vma);
int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write); void *buf, int len, int write);
bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all);
#endif #endif
This diff is collapsed.
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Christian König
*/
#ifndef _TTM_DEVICE_H_
#define _TTM_DEVICE_H_
#include <linux/types.h>
#include <linux/workqueue.h>
#include <drm/ttm/ttm_resource.h>
#include <drm/ttm/ttm_pool.h>
#define TTM_NUM_MEM_TYPES 8
struct ttm_device;
struct ttm_placement;
struct ttm_buffer_object;
struct ttm_operation_ctx;
/**
* struct ttm_global - Buffer object driver global data.
*
* @dummy_read_page: Pointer to a dummy page used for mapping requests
* of unpopulated pages.
* @shrink: A shrink callback object used for buffer object swap.
* @device_list_mutex: Mutex protecting the device list.
* This mutex is held while traversing the device list for pm options.
* @lru_lock: Spinlock protecting the bo subsystem lru lists.
* @device_list: List of buffer object devices.
* @swap_lru: Lru list of buffer objects used for swapping.
*/
extern struct ttm_global {
/**
* Constant after init.
*/
struct kobject kobj;
struct page *dummy_read_page;
spinlock_t lru_lock;
/**
* Protected by ttm_global_mutex.
*/
struct list_head device_list;
/**
* Protected by the lru_lock.
*/
struct list_head swap_lru[TTM_MAX_BO_PRIORITY];
/**
* Internal protection.
*/
atomic_t bo_count;
} ttm_glob;
struct ttm_device_funcs {
/**
* ttm_tt_create
*
* @bo: The buffer object to create the ttm for.
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
*
* Create a struct ttm_tt to back data with system memory pages.
* No pages are actually allocated.
* Returns:
* NULL: Out of memory.
*/
struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo,
uint32_t page_flags);
/**
* ttm_tt_populate
*
* @ttm: The struct ttm_tt to contain the backing pages.
*
* Allocate all backing pages
* Returns:
* -ENOMEM: Out of memory.
*/
int (*ttm_tt_populate)(struct ttm_device *bdev,
struct ttm_tt *ttm,
struct ttm_operation_ctx *ctx);
/**
* ttm_tt_unpopulate
*
* @ttm: The struct ttm_tt to contain the backing pages.
*
* Free all backing page
*/
void (*ttm_tt_unpopulate)(struct ttm_device *bdev,
struct ttm_tt *ttm);
/**
* ttm_tt_destroy
*
* @bdev: Pointer to a ttm device
* @ttm: Pointer to a struct ttm_tt.
*
* Destroy the backend. This will be call back from ttm_tt_destroy so
* don't call ttm_tt_destroy from the callback or infinite loop.
*/
void (*ttm_tt_destroy)(struct ttm_device *bdev, struct ttm_tt *ttm);
/**
* struct ttm_bo_driver member eviction_valuable
*
* @bo: the buffer object to be evicted
* @place: placement we need room for
*
* Check with the driver if it is valuable to evict a BO to make room
* for a certain placement.
*/
bool (*eviction_valuable)(struct ttm_buffer_object *bo,
const struct ttm_place *place);
/**
* struct ttm_bo_driver member evict_flags:
*
* @bo: the buffer object to be evicted
*
* Return the bo flags for a buffer which is not mapped to the hardware.
* These will be placed in proposed_flags so that when the move is
* finished, they'll end up in bo->mem.flags
* This should not cause multihop evictions, and the core will warn
* if one is proposed.
*/
void (*evict_flags)(struct ttm_buffer_object *bo,
struct ttm_placement *placement);
/**
* struct ttm_bo_driver member move:
*
* @bo: the buffer to move
* @evict: whether this motion is evicting the buffer from
* the graphics address space
* @ctx: context for this move with parameters
* @new_mem: the new memory region receiving the buffer
@ @hop: placement for driver directed intermediate hop
*
* Move a buffer between two memory regions.
* Returns errno -EMULTIHOP if driver requests a hop
*/
int (*move)(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
struct ttm_resource *new_mem,
struct ttm_place *hop);
/**
* struct ttm_bo_driver_member verify_access
*
* @bo: Pointer to a buffer object.
* @filp: Pointer to a struct file trying to access the object.
*
* Called from the map / write / read methods to verify that the
* caller is permitted to access the buffer object.
* This member may be set to NULL, which will refuse this kind of
* access for all buffer objects.
* This function should return 0 if access is granted, -EPERM otherwise.
*/
int (*verify_access)(struct ttm_buffer_object *bo,
struct file *filp);
/**
* Hook to notify driver about a resource delete.
*/
void (*delete_mem_notify)(struct ttm_buffer_object *bo);
/**
* notify the driver that we're about to swap out this bo
*/
void (*swap_notify)(struct ttm_buffer_object *bo);
/**
* Driver callback on when mapping io memory (for bo_move_memcpy
* for instance). TTM will take care to call io_mem_free whenever
* the mapping is not use anymore. io_mem_reserve & io_mem_free
* are balanced.
*/
int (*io_mem_reserve)(struct ttm_device *bdev,
struct ttm_resource *mem);
void (*io_mem_free)(struct ttm_device *bdev,
struct ttm_resource *mem);
/**
* Return the pfn for a given page_offset inside the BO.
*
* @bo: the BO to look up the pfn for
* @page_offset: the offset to look up
*/
unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
unsigned long page_offset);
/**
* Read/write memory buffers for ptrace access
*
* @bo: the BO to access
* @offset: the offset from the start of the BO
* @buf: pointer to source/destination buffer
* @len: number of bytes to copy
* @write: whether to read (0) from or write (non-0) to BO
*
* If successful, this function should return the number of
* bytes copied, -EIO otherwise. If the number of bytes
* returned is < len, the function may be called again with
* the remainder of the buffer to copy.
*/
int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset,
void *buf, int len, int write);
/**
* struct ttm_bo_driver member del_from_lru_notify
*
* @bo: the buffer object deleted from lru
*
* notify driver that a BO was deleted from LRU.
*/
void (*del_from_lru_notify)(struct ttm_buffer_object *bo);
/**
* Notify the driver that we're about to release a BO
*
* @bo: BO that is about to be released
*
* Gives the driver a chance to do any cleanup, including
* adding fences that may force a delayed delete
*/
void (*release_notify)(struct ttm_buffer_object *bo);
};
/**
* struct ttm_device - Buffer object driver device-specific data.
*
* @device_list: Our entry in the global device list.
* @funcs: Function table for the device.
* @sysman: Resource manager for the system domain.
* @man_drv: An array of resource_managers.
* @vma_manager: Address space manager.
* @pool: page pool for the device.
* @dev_mapping: A pointer to the struct address_space representing the
* device address space.
* @wq: Work queue structure for the delayed delete workqueue.
*/
struct ttm_device {
/*
* Constant after bo device init
*/
struct list_head device_list;
struct ttm_device_funcs *funcs;
/*
* Access via ttm_manager_type.
*/
struct ttm_resource_manager sysman;
struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES];
/*
* Protected by internal locks.
*/
struct drm_vma_offset_manager *vma_manager;
struct ttm_pool pool;
/*
* Protected by the global:lru lock.
*/
struct list_head ddestroy;
/*
* Protected by load / firstopen / lastclose /unload sync.
*/
struct address_space *dev_mapping;
/*
* Internal protection.
*/
struct delayed_work wq;
};
static inline struct ttm_resource_manager *
ttm_manager_type(struct ttm_device *bdev, int mem_type)
{
return bdev->man_drv[mem_type];
}
static inline void ttm_set_driver_manager(struct ttm_device *bdev, int type,
struct ttm_resource_manager *manager)
{
bdev->man_drv[type] = manager;
}
int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
struct device *dev, struct address_space *mapping,
struct drm_vma_offset_manager *vma_manager,
bool use_dma_alloc, bool use_dma32);
void ttm_device_fini(struct ttm_device *bdev);
#endif
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
#define TTM_MAX_BO_PRIORITY 4U #define TTM_MAX_BO_PRIORITY 4U
struct ttm_bo_device; struct ttm_device;
struct ttm_resource_manager; struct ttm_resource_manager;
struct ttm_resource; struct ttm_resource;
struct ttm_place; struct ttm_place;
...@@ -233,7 +233,7 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res); ...@@ -233,7 +233,7 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res);
void ttm_resource_manager_init(struct ttm_resource_manager *man, void ttm_resource_manager_init(struct ttm_resource_manager *man,
unsigned long p_size); unsigned long p_size);
int ttm_resource_manager_evict_all(struct ttm_bo_device *bdev, int ttm_resource_manager_evict_all(struct ttm_device *bdev,
struct ttm_resource_manager *man); struct ttm_resource_manager *man);
void ttm_resource_manager_debug(struct ttm_resource_manager *man, void ttm_resource_manager_debug(struct ttm_resource_manager *man,
......
...@@ -118,14 +118,14 @@ void ttm_tt_fini(struct ttm_tt *ttm); ...@@ -118,14 +118,14 @@ void ttm_tt_fini(struct ttm_tt *ttm);
* *
* Unbind, unpopulate and destroy common struct ttm_tt. * Unbind, unpopulate and destroy common struct ttm_tt.
*/ */
void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm); void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm);
/** /**
* ttm_tt_destroy_common: * ttm_tt_destroy_common:
* *
* Called from driver to destroy common path. * Called from driver to destroy common path.
*/ */
void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm); void ttm_tt_destroy_common(struct ttm_device *bdev, struct ttm_tt *ttm);
/** /**
* ttm_tt_swapin: * ttm_tt_swapin:
...@@ -135,7 +135,7 @@ void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm); ...@@ -135,7 +135,7 @@ void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
* Swap in a previously swap out ttm_tt. * Swap in a previously swap out ttm_tt.
*/ */
int ttm_tt_swapin(struct ttm_tt *ttm); int ttm_tt_swapin(struct ttm_tt *ttm);
int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm); int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm);
/** /**
* ttm_tt_populate - allocate pages for a ttm * ttm_tt_populate - allocate pages for a ttm
...@@ -144,7 +144,7 @@ int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm); ...@@ -144,7 +144,7 @@ int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
* *
* Calls the driver method to allocate pages for a ttm * Calls the driver method to allocate pages for a ttm
*/ */
int ttm_tt_populate(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
/** /**
* ttm_tt_unpopulate - free pages from a ttm * ttm_tt_unpopulate - free pages from a ttm
...@@ -153,7 +153,7 @@ int ttm_tt_populate(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_o ...@@ -153,7 +153,7 @@ int ttm_tt_populate(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_o
* *
* Calls the driver method to free all pages from a ttm * Calls the driver method to free all pages from a ttm
*/ */
void ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm); void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm);
#if IS_ENABLED(CONFIG_AGP) #if IS_ENABLED(CONFIG_AGP)
#include <linux/agp_backend.h> #include <linux/agp_backend.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment