Commit 98a7f88c authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: bind BOs with GTT space allocated directly v2

This avoids binding them later on.

v2: fix typo in function name
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Acked-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
parent 92c60d9c
...@@ -80,6 +80,20 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man) ...@@ -80,6 +80,20 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
return 0; return 0;
} }
/**
* amdgpu_gtt_mgr_is_allocated - Check if mem has address space
*
* @mem: the mem object to check
*
* Check if a mem object has already address space allocated.
*/
bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem)
{
struct drm_mm_node *node = mem->mm_node;
return (node->start != AMDGPU_BO_INVALID_OFFSET);
}
/** /**
* amdgpu_gtt_mgr_alloc - allocate new ranges * amdgpu_gtt_mgr_alloc - allocate new ranges
* *
...@@ -101,7 +115,7 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, ...@@ -101,7 +115,7 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
unsigned long fpfn, lpfn; unsigned long fpfn, lpfn;
int r; int r;
if (node->start != AMDGPU_BO_INVALID_OFFSET) if (amdgpu_gtt_mgr_is_allocated(mem))
return 0; return 0;
if (place) if (place)
......
...@@ -681,6 +681,31 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) ...@@ -681,6 +681,31 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
sg_free_table(ttm->sg); sg_free_table(ttm->sg);
} }
static int amdgpu_ttm_do_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
uint64_t flags;
int r;
spin_lock(&gtt->adev->gtt_list_lock);
flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, mem);
gtt->offset = (u64)mem->start << PAGE_SHIFT;
r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
ttm->pages, gtt->ttm.dma_address, flags);
if (r) {
DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
ttm->num_pages, gtt->offset);
goto error_gart_bind;
}
list_add_tail(&gtt->list, &gtt->adev->gtt_list);
error_gart_bind:
spin_unlock(&gtt->adev->gtt_list_lock);
return r;
}
static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
struct ttm_mem_reg *bo_mem) struct ttm_mem_reg *bo_mem)
{ {
...@@ -704,7 +729,10 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, ...@@ -704,7 +729,10 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
bo_mem->mem_type == AMDGPU_PL_OA) bo_mem->mem_type == AMDGPU_PL_OA)
return -EINVAL; return -EINVAL;
return 0; if (amdgpu_gtt_mgr_is_allocated(bo_mem))
r = amdgpu_ttm_do_bind(ttm, bo_mem);
return r;
} }
bool amdgpu_ttm_is_bound(struct ttm_tt *ttm) bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
...@@ -717,8 +745,6 @@ bool amdgpu_ttm_is_bound(struct ttm_tt *ttm) ...@@ -717,8 +745,6 @@ bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem) int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
{ {
struct ttm_tt *ttm = bo->ttm; struct ttm_tt *ttm = bo->ttm;
struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
uint64_t flags;
int r; int r;
if (!ttm || amdgpu_ttm_is_bound(ttm)) if (!ttm || amdgpu_ttm_is_bound(ttm))
...@@ -731,22 +757,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem) ...@@ -731,22 +757,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
return r; return r;
} }
spin_lock(&gtt->adev->gtt_list_lock); return amdgpu_ttm_do_bind(ttm, bo_mem);
flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
ttm->pages, gtt->ttm.dma_address, flags);
if (r) {
DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
ttm->num_pages, gtt->offset);
goto error_gart_bind;
}
list_add_tail(&gtt->list, &gtt->adev->gtt_list);
error_gart_bind:
spin_unlock(&gtt->adev->gtt_list_lock);
return r;
} }
int amdgpu_ttm_recover_gart(struct amdgpu_device *adev) int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
......
...@@ -56,6 +56,7 @@ struct amdgpu_mman { ...@@ -56,6 +56,7 @@ struct amdgpu_mman {
extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func; extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func; extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem);
int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *tbo, struct ttm_buffer_object *tbo,
const struct ttm_place *place, const struct ttm_place *place,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment