Commit e46f468f authored by Dave Airlie's avatar Dave Airlie

drm/ttm: drop special pipeline accel cleanup function.

The two accel cleanup paths were mostly the same once refactored.

Just pass a bool to say if the evictions are to be pipelined.
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200917064132.148521-2-airlied@gmail.com
parent 92afce90
...@@ -500,9 +500,9 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, ...@@ -500,9 +500,9 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
/* Always block for VM page tables before committing the new location */ /* Always block for VM page tables before committing the new location */
if (bo->type == ttm_bo_type_kernel) if (bo->type == ttm_bo_type_kernel)
r = ttm_bo_move_accel_cleanup(bo, fence, true, new_mem); r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem);
else else
r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem);
dma_fence_put(fence); dma_fence_put(fence);
return r; return r;
......
...@@ -824,7 +824,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, ...@@ -824,7 +824,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
if (ret == 0) { if (ret == 0) {
ret = ttm_bo_move_accel_cleanup(bo, ret = ttm_bo_move_accel_cleanup(bo,
&fence->base, &fence->base,
evict, evict, false,
new_reg); new_reg);
nouveau_fence_unref(&fence); nouveau_fence_unref(&fence);
} }
......
...@@ -200,7 +200,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, ...@@ -200,7 +200,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
if (IS_ERR(fence)) if (IS_ERR(fence))
return PTR_ERR(fence); return PTR_ERR(fence);
r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, new_mem); r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false, new_mem);
radeon_fence_unref(&fence); radeon_fence_unref(&fence);
return r; return r;
} }
......
...@@ -580,81 +580,56 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo, ...@@ -580,81 +580,56 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
return 0; return 0;
} }
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
struct dma_fence *fence, struct dma_fence *fence)
bool evict,
struct ttm_resource *new_mem)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
int ret;
dma_resv_add_excl_fence(bo->base.resv, fence); /**
if (evict) * BO doesn't have a TTM we need to bind/unbind. Just remember
ret = ttm_bo_wait_free_node(bo, man->use_tt); * this eviction and free up the allocation
else */
ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt); spin_lock(&from->move_lock);
if (ret) if (!from->move || dma_fence_is_later(fence, from->move)) {
return ret; dma_fence_put(from->move);
from->move = dma_fence_get(fence);
}
spin_unlock(&from->move_lock);
ttm_bo_assign_mem(bo, new_mem); ttm_bo_free_old_node(bo);
return 0; dma_fence_put(bo->moving);
bo->moving = dma_fence_get(fence);
} }
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct dma_fence *fence, bool evict, struct dma_fence *fence,
struct ttm_resource *new_mem) bool evict,
bool pipeline,
struct ttm_resource *new_mem)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type); struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
struct ttm_resource_manager *to = ttm_manager_type(bdev, new_mem->mem_type); struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
int ret = 0;
int ret;
dma_resv_add_excl_fence(bo->base.resv, fence); dma_resv_add_excl_fence(bo->base.resv, fence);
if (!evict)
ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
else if (!from->use_tt && pipeline)
ttm_bo_move_pipeline_evict(bo, fence);
else
ret = ttm_bo_wait_free_node(bo, man->use_tt);
if (!evict) { if (ret)
ret = ttm_bo_move_to_ghost(bo, fence, to->use_tt); return ret;
if (ret)
return ret;
} else if (!from->use_tt) {
/**
* BO doesn't have a TTM we need to bind/unbind. Just remember
* this eviction and free up the allocation
*/
spin_lock(&from->move_lock);
if (!from->move || dma_fence_is_later(fence, from->move)) {
dma_fence_put(from->move);
from->move = dma_fence_get(fence);
}
spin_unlock(&from->move_lock);
ttm_bo_free_old_node(bo);
dma_fence_put(bo->moving);
bo->moving = dma_fence_get(fence);
} else {
/**
* Last resort, wait for the move to be completed.
*
* Should never happen in pratice.
*/
ret = ttm_bo_wait_free_node(bo, to->use_tt);
if (ret)
return ret;
}
ttm_bo_assign_mem(bo, new_mem); ttm_bo_assign_mem(bo, new_mem);
return 0; return 0;
} }
EXPORT_SYMBOL(ttm_bo_pipeline_move); EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
{ {
......
...@@ -642,6 +642,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo); ...@@ -642,6 +642,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
* @bo: A pointer to a struct ttm_buffer_object. * @bo: A pointer to a struct ttm_buffer_object.
* @fence: A fence object that signals when moving is complete. * @fence: A fence object that signals when moving is complete.
* @evict: This is an evict move. Don't return until the buffer is idle. * @evict: This is an evict move. Don't return until the buffer is idle.
* @pipeline: evictions are to be pipelined.
* @new_mem: struct ttm_resource indicating where to move. * @new_mem: struct ttm_resource indicating where to move.
* *
* Accelerated move function to be called when an accelerated move * Accelerated move function to be called when an accelerated move
...@@ -653,23 +654,9 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo); ...@@ -653,23 +654,9 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
*/ */
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct dma_fence *fence, bool evict, struct dma_fence *fence, bool evict,
bool pipeline,
struct ttm_resource *new_mem); struct ttm_resource *new_mem);
/**
* ttm_bo_pipeline_move.
*
* @bo: A pointer to a struct ttm_buffer_object.
* @fence: A fence object that signals when moving is complete.
* @evict: This is an evict move. Don't return until the buffer is idle.
* @new_mem: struct ttm_resource indicating where to move.
*
* Function for pipelining accelerated moves. Either free the memory
* immediately or hang it on a temporary buffer object.
*/
int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
struct dma_fence *fence, bool evict,
struct ttm_resource *new_mem);
/** /**
* ttm_bo_pipeline_gutting. * ttm_bo_pipeline_gutting.
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment