Commit 9e9a153b authored by Dave Airlie's avatar Dave Airlie

drm/ttm: move ttm binding/unbinding out of ttm_tt paths.

Move these up to the bo level, moving ttm_tt to just being
backing store. Next step is to move the bound flag out.
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200915024007.67163-6-airlied@gmail.com
parent 2040ec97
......@@ -552,7 +552,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
goto out_cleanup;
/* Bind the memory to the GTT space */
r = ttm_tt_bind(bo->bdev, bo->ttm, &tmp_mem);
r = ttm_bo_tt_bind(bo, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
......
......@@ -924,7 +924,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
ret = ttm_tt_bind(bo->bdev, bo->ttm, &tmp_reg);
ret = ttm_bo_tt_bind(bo, &tmp_reg);
if (ret)
goto out;
......
......@@ -53,7 +53,7 @@ static bool radeon_mn_invalidate(struct mmu_interval_notifier *mn,
struct ttm_operation_ctx ctx = { false, false };
long r;
if (!bo->tbo.ttm || !ttm_tt_is_bound(bo->tbo.ttm))
if (!bo->tbo.ttm || !ttm_bo_tt_is_bound(&bo->tbo))
return true;
if (!mmu_notifier_range_blockable(range))
......
......@@ -238,7 +238,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
goto out_cleanup;
}
r = ttm_tt_bind(bo->bdev, bo->ttm, &tmp_mem);
r = ttm_bo_tt_bind(bo, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
......
......@@ -264,7 +264,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (ret)
goto out_err;
ret = ttm_tt_bind(bdev, bo->ttm, mem);
ret = ttm_bo_tt_bind(bo, mem);
if (ret)
goto out_err;
}
......@@ -1619,6 +1619,35 @@ void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
{
if (bo->ttm == NULL)
return;
ttm_bo_tt_unbind(bo);
ttm_tt_destroy(bo->bdev, bo->ttm);
bo->ttm = NULL;
}
int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem)
{
int ret;
if (!bo->ttm)
return -EINVAL;
if (ttm_bo_tt_is_bound(bo))
return 0;
ret = bo->bdev->driver->ttm_tt_bind(bo->bdev, bo->ttm, mem);
if (unlikely(ret != 0))
return ret;
ttm_bo_tt_set_bound(bo);
return 0;
}
EXPORT_SYMBOL(ttm_bo_tt_bind);
void ttm_bo_tt_unbind(struct ttm_buffer_object *bo)
{
if (ttm_bo_tt_is_bound(bo)) {
bo->bdev->driver->ttm_tt_unbind(bo->bdev, bo->ttm);
ttm_bo_tt_set_unbound(bo);
}
}
......@@ -67,7 +67,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
return ret;
}
ttm_tt_unbind(bo->bdev, ttm);
ttm_bo_tt_unbind(bo);
ttm_bo_free_old_node(bo);
old_mem->mem_type = TTM_PL_SYSTEM;
}
......@@ -82,7 +82,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
if (unlikely(ret != 0))
return ret;
ret = ttm_tt_bind(bo->bdev, ttm, new_mem);
ret = ttm_bo_tt_bind(bo, new_mem);
if (unlikely(ret != 0))
return ret;
}
......@@ -701,4 +701,3 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
return 0;
}
......@@ -209,8 +209,6 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching);
void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{
ttm_tt_unbind(bdev, ttm);
ttm_tt_unpopulate(bdev, ttm);
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
......@@ -303,35 +301,6 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
}
EXPORT_SYMBOL(ttm_dma_tt_fini);
void ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{
if (ttm_tt_is_bound(ttm)) {
bdev->driver->ttm_tt_unbind(bdev, ttm);
ttm_tt_set_unbound(ttm);
}
}
int ttm_tt_bind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm, struct ttm_resource *bo_mem)
{
int ret = 0;
if (!ttm)
return -EINVAL;
if (ttm_tt_is_bound(ttm))
return 0;
ret = bdev->driver->ttm_tt_bind(bdev, ttm, bo_mem);
if (unlikely(ret != 0))
return ret;
ttm_tt_set_bound(ttm);
return 0;
}
EXPORT_SYMBOL(ttm_tt_bind);
int ttm_tt_swapin(struct ttm_tt *ttm)
{
struct address_space *swap_space;
......
......@@ -684,6 +684,34 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
*/
pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
/**
* ttm_bo_tt_bind
*
* Bind the object tt to a memory resource.
*/
int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem);
/**
* ttm_bo_tt_bind
*
* Unbind the object tt from a memory resource.
*/
void ttm_bo_tt_unbind(struct ttm_buffer_object *bo);
static inline bool ttm_bo_tt_is_bound(struct ttm_buffer_object *bo)
{
return bo->ttm->_state == tt_bound;
}
static inline void ttm_bo_tt_set_unbound(struct ttm_buffer_object *bo)
{
bo->ttm->_state = tt_unbound;
}
static inline void ttm_bo_tt_set_bound(struct ttm_buffer_object *bo)
{
bo->ttm->_state = tt_bound;
}
/**
* ttm_bo_tt_destroy.
*/
......
......@@ -82,11 +82,6 @@ static inline bool ttm_tt_is_populated(struct ttm_tt *tt)
return tt->_state != tt_unpopulated;
}
static inline bool ttm_tt_is_bound(struct ttm_tt *tt)
{
return tt->_state == tt_bound;
}
static inline void ttm_tt_set_unpopulated(struct ttm_tt *tt)
{
tt->_state = tt_unpopulated;
......@@ -97,16 +92,6 @@ static inline void ttm_tt_set_populated(struct ttm_tt *tt)
tt->_state = tt_unbound;
}
static inline void ttm_tt_set_unbound(struct ttm_tt *tt)
{
tt->_state = tt_unbound;
}
static inline void ttm_tt_set_bound(struct ttm_tt *tt)
{
tt->_state = tt_bound;
}
/**
* struct ttm_dma_tt
*
......@@ -164,17 +149,6 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
void ttm_tt_fini(struct ttm_tt *ttm);
void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
/**
* ttm_ttm_bind:
*
* @ttm: The struct ttm_tt containing backing pages.
* @bo_mem: The struct ttm_resource identifying the binding location.
*
* Bind the pages of @ttm to an aperture location identified by @bo_mem
*/
int ttm_tt_bind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm, struct ttm_resource *bo_mem);
/**
* ttm_ttm_destroy:
*
......@@ -184,15 +158,6 @@ int ttm_tt_bind(struct ttm_bo_device *bdev,
*/
void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
/**
* ttm_ttm_unbind:
*
* @ttm: The struct ttm_tt.
*
* Unbind a struct ttm_tt.
*/
void ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
/**
* ttm_tt_swapin:
*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment