Commit d0cef9fa authored by Roger He's avatar Roger He Committed by Alex Deucher

drm/ttm: use an operation ctx for ttm_tt_populate in ttm_bo_driver (v2)

forward the operation context to ttm_tt_populate as well,
and the ultimate goal is swapout enablement for reserved BOs.

v2: squash in fix for vboxvideo
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarRoger He <Hongbo.He@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 9de2fb99
...@@ -990,7 +990,8 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev, ...@@ -990,7 +990,8 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
return &gtt->ttm.ttm; return &gtt->ttm.ttm;
} }
static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm) static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
struct ttm_operation_ctx *ctx)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
...@@ -1018,11 +1019,11 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm) ...@@ -1018,11 +1019,11 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
#ifdef CONFIG_SWIOTLB #ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) { if (swiotlb_nr_tbl()) {
return ttm_dma_populate(&gtt->ttm, adev->dev); return ttm_dma_populate(&gtt->ttm, adev->dev, ctx);
} }
#endif #endif
return ttm_populate_and_map_pages(adev->dev, &gtt->ttm); return ttm_populate_and_map_pages(adev->dev, &gtt->ttm, ctx);
} }
static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
......
...@@ -216,9 +216,10 @@ static struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev, ...@@ -216,9 +216,10 @@ static struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev,
return tt; return tt;
} }
static int ast_ttm_tt_populate(struct ttm_tt *ttm) static int ast_ttm_tt_populate(struct ttm_tt *ttm,
struct ttm_operation_ctx *ctx)
{ {
return ttm_pool_populate(ttm); return ttm_pool_populate(ttm, ctx);
} }
static void ast_ttm_tt_unpopulate(struct ttm_tt *ttm) static void ast_ttm_tt_unpopulate(struct ttm_tt *ttm)
......
...@@ -216,9 +216,10 @@ static struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev, ...@@ -216,9 +216,10 @@ static struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev,
return tt; return tt;
} }
static int cirrus_ttm_tt_populate(struct ttm_tt *ttm) static int cirrus_ttm_tt_populate(struct ttm_tt *ttm,
struct ttm_operation_ctx *ctx)
{ {
return ttm_pool_populate(ttm); return ttm_pool_populate(ttm, ctx);
} }
static void cirrus_ttm_tt_unpopulate(struct ttm_tt *ttm) static void cirrus_ttm_tt_unpopulate(struct ttm_tt *ttm)
......
...@@ -223,9 +223,10 @@ static struct ttm_tt *hibmc_ttm_tt_create(struct ttm_bo_device *bdev, ...@@ -223,9 +223,10 @@ static struct ttm_tt *hibmc_ttm_tt_create(struct ttm_bo_device *bdev,
return tt; return tt;
} }
static int hibmc_ttm_tt_populate(struct ttm_tt *ttm) static int hibmc_ttm_tt_populate(struct ttm_tt *ttm,
struct ttm_operation_ctx *ctx)
{ {
return ttm_pool_populate(ttm); return ttm_pool_populate(ttm, ctx);
} }
static void hibmc_ttm_tt_unpopulate(struct ttm_tt *ttm) static void hibmc_ttm_tt_unpopulate(struct ttm_tt *ttm)
......
...@@ -216,9 +216,10 @@ static struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev, ...@@ -216,9 +216,10 @@ static struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev,
return tt; return tt;
} }
static int mgag200_ttm_tt_populate(struct ttm_tt *ttm) static int mgag200_ttm_tt_populate(struct ttm_tt *ttm,
struct ttm_operation_ctx *ctx)
{ {
return ttm_pool_populate(ttm); return ttm_pool_populate(ttm, ctx);
} }
static void mgag200_ttm_tt_unpopulate(struct ttm_tt *ttm) static void mgag200_ttm_tt_unpopulate(struct ttm_tt *ttm)
......
...@@ -1547,7 +1547,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) ...@@ -1547,7 +1547,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
} }
static int static int
nouveau_ttm_tt_populate(struct ttm_tt *ttm) nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{ {
struct ttm_dma_tt *ttm_dma = (void *)ttm; struct ttm_dma_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm; struct nouveau_drm *drm;
...@@ -1572,17 +1572,17 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm) ...@@ -1572,17 +1572,17 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
#if IS_ENABLED(CONFIG_AGP) #if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge) { if (drm->agp.bridge) {
return ttm_agp_tt_populate(ttm); return ttm_agp_tt_populate(ttm, ctx);
} }
#endif #endif
#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86) #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
if (swiotlb_nr_tbl()) { if (swiotlb_nr_tbl()) {
return ttm_dma_populate((void *)ttm, dev); return ttm_dma_populate((void *)ttm, dev, ctx);
} }
#endif #endif
r = ttm_pool_populate(ttm); r = ttm_pool_populate(ttm, ctx);
if (r) { if (r) {
return r; return r;
} }
......
...@@ -291,14 +291,15 @@ static struct ttm_backend_func qxl_backend_func = { ...@@ -291,14 +291,15 @@ static struct ttm_backend_func qxl_backend_func = {
.destroy = &qxl_ttm_backend_destroy, .destroy = &qxl_ttm_backend_destroy,
}; };
static int qxl_ttm_tt_populate(struct ttm_tt *ttm) static int qxl_ttm_tt_populate(struct ttm_tt *ttm,
struct ttm_operation_ctx *ctx)
{ {
int r; int r;
if (ttm->state != tt_unpopulated) if (ttm->state != tt_unpopulated)
return 0; return 0;
r = ttm_pool_populate(ttm); r = ttm_pool_populate(ttm, ctx);
if (r) if (r)
return r; return r;
......
...@@ -721,7 +721,8 @@ static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm) ...@@ -721,7 +721,8 @@ static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm)
return (struct radeon_ttm_tt *)ttm; return (struct radeon_ttm_tt *)ttm;
} }
static int radeon_ttm_tt_populate(struct ttm_tt *ttm) static int radeon_ttm_tt_populate(struct ttm_tt *ttm,
struct ttm_operation_ctx *ctx)
{ {
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
struct radeon_device *rdev; struct radeon_device *rdev;
...@@ -750,17 +751,17 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm) ...@@ -750,17 +751,17 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
rdev = radeon_get_rdev(ttm->bdev); rdev = radeon_get_rdev(ttm->bdev);
#if IS_ENABLED(CONFIG_AGP) #if IS_ENABLED(CONFIG_AGP)
if (rdev->flags & RADEON_IS_AGP) { if (rdev->flags & RADEON_IS_AGP) {
return ttm_agp_tt_populate(ttm); return ttm_agp_tt_populate(ttm, ctx);
} }
#endif #endif
#ifdef CONFIG_SWIOTLB #ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) { if (swiotlb_nr_tbl()) {
return ttm_dma_populate(&gtt->ttm, rdev->dev); return ttm_dma_populate(&gtt->ttm, rdev->dev, ctx);
} }
#endif #endif
return ttm_populate_and_map_pages(rdev->dev, &gtt->ttm); return ttm_populate_and_map_pages(rdev->dev, &gtt->ttm, ctx);
} }
static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
......
...@@ -133,12 +133,12 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev, ...@@ -133,12 +133,12 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
} }
EXPORT_SYMBOL(ttm_agp_tt_create); EXPORT_SYMBOL(ttm_agp_tt_create);
int ttm_agp_tt_populate(struct ttm_tt *ttm) int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{ {
if (ttm->state != tt_unpopulated) if (ttm->state != tt_unpopulated)
return 0; return 0;
return ttm_pool_populate(ttm); return ttm_pool_populate(ttm, ctx);
} }
EXPORT_SYMBOL(ttm_agp_tt_populate); EXPORT_SYMBOL(ttm_agp_tt_populate);
......
...@@ -376,7 +376,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, ...@@ -376,7 +376,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
* TTM might be null for moves within the same region. * TTM might be null for moves within the same region.
*/ */
if (ttm && ttm->state == tt_unpopulated) { if (ttm && ttm->state == tt_unpopulated) {
ret = ttm->bdev->driver->ttm_tt_populate(ttm); ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx);
if (ret) if (ret)
goto out1; goto out1;
} }
...@@ -545,14 +545,19 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, ...@@ -545,14 +545,19 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
unsigned long num_pages, unsigned long num_pages,
struct ttm_bo_kmap_obj *map) struct ttm_bo_kmap_obj *map)
{ {
struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot; struct ttm_mem_reg *mem = &bo->mem;
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
struct ttm_tt *ttm = bo->ttm; struct ttm_tt *ttm = bo->ttm;
pgprot_t prot;
int ret; int ret;
BUG_ON(!ttm); BUG_ON(!ttm);
if (ttm->state == tt_unpopulated) { if (ttm->state == tt_unpopulated) {
ret = ttm->bdev->driver->ttm_tt_populate(ttm); ret = ttm->bdev->driver->ttm_tt_populate(ttm, &ctx);
if (ret) if (ret)
return ret; return ret;
} }
......
...@@ -226,12 +226,17 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) ...@@ -226,12 +226,17 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
cvma.vm_page_prot); cvma.vm_page_prot);
} else { } else {
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
ttm = bo->ttm; ttm = bo->ttm;
cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
cvma.vm_page_prot); cvma.vm_page_prot);
/* Allocate all page at once, most common usage */ /* Allocate all page at once, most common usage */
if (ttm->bdev->driver->ttm_tt_populate(ttm)) { if (ttm->bdev->driver->ttm_tt_populate(ttm, &ctx)) {
retval = VM_FAULT_OOM; retval = VM_FAULT_OOM;
goto out_io_unlock; goto out_io_unlock;
} }
......
...@@ -1058,13 +1058,9 @@ void ttm_page_alloc_fini(void) ...@@ -1058,13 +1058,9 @@ void ttm_page_alloc_fini(void)
_manager = NULL; _manager = NULL;
} }
int ttm_pool_populate(struct ttm_tt *ttm) int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{ {
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
unsigned i; unsigned i;
int ret; int ret;
...@@ -1080,7 +1076,7 @@ int ttm_pool_populate(struct ttm_tt *ttm) ...@@ -1080,7 +1076,7 @@ int ttm_pool_populate(struct ttm_tt *ttm)
for (i = 0; i < ttm->num_pages; ++i) { for (i = 0; i < ttm->num_pages; ++i) {
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
PAGE_SIZE, &ctx); PAGE_SIZE, ctx);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
ttm_pool_unpopulate(ttm); ttm_pool_unpopulate(ttm);
return -ENOMEM; return -ENOMEM;
...@@ -1117,12 +1113,13 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm) ...@@ -1117,12 +1113,13 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm)
} }
EXPORT_SYMBOL(ttm_pool_unpopulate); EXPORT_SYMBOL(ttm_pool_unpopulate);
int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt) int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
struct ttm_operation_ctx *ctx)
{ {
unsigned i, j; unsigned i, j;
int r; int r;
r = ttm_pool_populate(&tt->ttm); r = ttm_pool_populate(&tt->ttm, ctx);
if (r) if (r)
return r; return r;
......
...@@ -923,14 +923,11 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge) ...@@ -923,14 +923,11 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
* On success pages list will hold count number of correctly * On success pages list will hold count number of correctly
* cached pages. On failure will hold the negative return value (-ENOMEM, etc). * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
*/ */
int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev) int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
struct ttm_operation_ctx *ctx)
{ {
struct ttm_tt *ttm = &ttm_dma->ttm; struct ttm_tt *ttm = &ttm_dma->ttm;
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
unsigned long num_pages = ttm->num_pages; unsigned long num_pages = ttm->num_pages;
struct dma_pool *pool; struct dma_pool *pool;
enum pool_type type; enum pool_type type;
...@@ -966,7 +963,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev) ...@@ -966,7 +963,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
break; break;
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
pool->size, &ctx); pool->size, ctx);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
ttm_dma_unpopulate(ttm_dma, dev); ttm_dma_unpopulate(ttm_dma, dev);
return -ENOMEM; return -ENOMEM;
...@@ -1002,7 +999,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev) ...@@ -1002,7 +999,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
} }
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
pool->size, &ctx); pool->size, ctx);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
ttm_dma_unpopulate(ttm_dma, dev); ttm_dma_unpopulate(ttm_dma, dev);
return -ENOMEM; return -ENOMEM;
......
...@@ -263,6 +263,10 @@ void ttm_tt_unbind(struct ttm_tt *ttm) ...@@ -263,6 +263,10 @@ void ttm_tt_unbind(struct ttm_tt *ttm)
int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{ {
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
int ret = 0; int ret = 0;
if (!ttm) if (!ttm)
...@@ -271,7 +275,7 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) ...@@ -271,7 +275,7 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
if (ttm->state == tt_bound) if (ttm->state == tt_bound)
return 0; return 0;
ret = ttm->bdev->driver->ttm_tt_populate(ttm); ret = ttm->bdev->driver->ttm_tt_populate(ttm, &ctx);
if (ret) if (ret)
return ret; return ret;
......
...@@ -124,13 +124,17 @@ int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev, ...@@ -124,13 +124,17 @@ int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
int ret; int ret;
struct page **pages = bo->tbo.ttm->pages; struct page **pages = bo->tbo.ttm->pages;
int nr_pages = bo->tbo.num_pages; int nr_pages = bo->tbo.num_pages;
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
/* wtf swapping */ /* wtf swapping */
if (bo->pages) if (bo->pages)
return 0; return 0;
if (bo->tbo.ttm->state == tt_unpopulated) if (bo->tbo.ttm->state == tt_unpopulated)
bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm); bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm, &ctx);
bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL); bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!bo->pages) if (!bo->pages)
goto out; goto out;
......
...@@ -324,12 +324,13 @@ static struct ttm_backend_func virtio_gpu_backend_func = { ...@@ -324,12 +324,13 @@ static struct ttm_backend_func virtio_gpu_backend_func = {
.destroy = &virtio_gpu_ttm_backend_destroy, .destroy = &virtio_gpu_ttm_backend_destroy,
}; };
static int virtio_gpu_ttm_tt_populate(struct ttm_tt *ttm) static int virtio_gpu_ttm_tt_populate(struct ttm_tt *ttm,
struct ttm_operation_ctx *ctx)
{ {
if (ttm->state != tt_unpopulated) if (ttm->state != tt_unpopulated)
return 0; return 0;
return ttm_pool_populate(ttm); return ttm_pool_populate(ttm, ctx);
} }
static void virtio_gpu_ttm_tt_unpopulate(struct ttm_tt *ttm) static void virtio_gpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
......
...@@ -635,16 +635,12 @@ static void vmw_ttm_destroy(struct ttm_tt *ttm) ...@@ -635,16 +635,12 @@ static void vmw_ttm_destroy(struct ttm_tt *ttm)
} }
static int vmw_ttm_populate(struct ttm_tt *ttm) static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{ {
struct vmw_ttm_tt *vmw_tt = struct vmw_ttm_tt *vmw_tt =
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
struct vmw_private *dev_priv = vmw_tt->dev_priv; struct vmw_private *dev_priv = vmw_tt->dev_priv;
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false
};
int ret; int ret;
if (ttm->state != tt_unpopulated) if (ttm->state != tt_unpopulated)
...@@ -653,15 +649,16 @@ static int vmw_ttm_populate(struct ttm_tt *ttm) ...@@ -653,15 +649,16 @@ static int vmw_ttm_populate(struct ttm_tt *ttm)
if (dev_priv->map_mode == vmw_dma_alloc_coherent) { if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
size_t size = size_t size =
ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
ret = ttm_mem_global_alloc(glob, size, &ctx); ret = ttm_mem_global_alloc(glob, size, ctx);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev); ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev,
ctx);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
ttm_mem_global_free(glob, size); ttm_mem_global_free(glob, size);
} else } else
ret = ttm_pool_populate(ttm); ret = ttm_pool_populate(ttm, ctx);
return ret; return ret;
} }
......
...@@ -240,6 +240,10 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv, ...@@ -240,6 +240,10 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
unsigned long offset; unsigned long offset;
unsigned long bo_size; unsigned long bo_size;
struct vmw_otable *otables = batch->otables; struct vmw_otable *otables = batch->otables;
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
SVGAOTableType i; SVGAOTableType i;
int ret; int ret;
...@@ -264,7 +268,7 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv, ...@@ -264,7 +268,7 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
ret = ttm_bo_reserve(batch->otable_bo, false, true, NULL); ret = ttm_bo_reserve(batch->otable_bo, false, true, NULL);
BUG_ON(ret != 0); BUG_ON(ret != 0);
ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm); ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm, &ctx);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_unreserve; goto out_unreserve;
ret = vmw_bo_map_dma(batch->otable_bo); ret = vmw_bo_map_dma(batch->otable_bo);
...@@ -430,6 +434,11 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv, ...@@ -430,6 +434,11 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
struct vmw_mob *mob) struct vmw_mob *mob)
{ {
int ret; int ret;
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
BUG_ON(mob->pt_bo != NULL); BUG_ON(mob->pt_bo != NULL);
ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE, ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
...@@ -442,7 +451,7 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv, ...@@ -442,7 +451,7 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
ret = ttm_bo_reserve(mob->pt_bo, false, true, NULL); ret = ttm_bo_reserve(mob->pt_bo, false, true, NULL);
BUG_ON(ret != 0); BUG_ON(ret != 0);
ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm); ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm, &ctx);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_unreserve; goto out_unreserve;
ret = vmw_bo_map_dma(mob->pt_bo); ret = vmw_bo_map_dma(mob->pt_bo);
......
...@@ -213,9 +213,10 @@ static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev, ...@@ -213,9 +213,10 @@ static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev,
return tt; return tt;
} }
static int vbox_ttm_tt_populate(struct ttm_tt *ttm) static int vbox_ttm_tt_populate(struct ttm_tt *ttm,
struct ttm_operation_ctx *ctx)
{ {
return ttm_pool_populate(ttm); return ttm_pool_populate(ttm, ctx);
} }
static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm) static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm)
......
...@@ -352,7 +352,8 @@ struct ttm_bo_driver { ...@@ -352,7 +352,8 @@ struct ttm_bo_driver {
* Returns: * Returns:
* -ENOMEM: Out of memory. * -ENOMEM: Out of memory.
*/ */
int (*ttm_tt_populate)(struct ttm_tt *ttm); int (*ttm_tt_populate)(struct ttm_tt *ttm,
struct ttm_operation_ctx *ctx);
/** /**
* ttm_tt_unpopulate * ttm_tt_unpopulate
...@@ -1077,7 +1078,7 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev, ...@@ -1077,7 +1078,7 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
struct agp_bridge_data *bridge, struct agp_bridge_data *bridge,
unsigned long size, uint32_t page_flags, unsigned long size, uint32_t page_flags,
struct page *dummy_read_page); struct page *dummy_read_page);
int ttm_agp_tt_populate(struct ttm_tt *ttm); int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
void ttm_agp_tt_unpopulate(struct ttm_tt *ttm); void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
#endif #endif
......
...@@ -47,7 +47,7 @@ void ttm_page_alloc_fini(void); ...@@ -47,7 +47,7 @@ void ttm_page_alloc_fini(void);
* *
* Add backing pages to all of @ttm * Add backing pages to all of @ttm
*/ */
int ttm_pool_populate(struct ttm_tt *ttm); int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
/** /**
* ttm_pool_unpopulate: * ttm_pool_unpopulate:
...@@ -61,7 +61,8 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm); ...@@ -61,7 +61,8 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm);
/** /**
* Populates and DMA maps pages to fullfil a ttm_dma_populate() request * Populates and DMA maps pages to fullfil a ttm_dma_populate() request
*/ */
int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt); int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
struct ttm_operation_ctx *ctx);
/** /**
* Unpopulates and DMA unmaps pages as part of a * Unpopulates and DMA unmaps pages as part of a
...@@ -89,7 +90,8 @@ void ttm_dma_page_alloc_fini(void); ...@@ -89,7 +90,8 @@ void ttm_dma_page_alloc_fini(void);
*/ */
int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data); int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev); int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
struct ttm_operation_ctx *ctx);
void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev); void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
#else #else
...@@ -106,7 +108,8 @@ static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data) ...@@ -106,7 +108,8 @@ static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
return 0; return 0;
} }
static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma,
struct device *dev) struct device *dev,
struct ttm_operation_ctx *ctx)
{ {
return -ENOMEM; return -ENOMEM;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment