Commit 1e691e24 authored by Christian König's avatar Christian König

drm/amdgpu: stop allocating dummy GTT nodes

Now that TTM is fixed up we can finally stop that nonsense.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarMadhav Chauhan <madhav.chauhan@amd.com>
Link: https://patchwork.freedesktop.org/patch/375620
parent e04be231
...@@ -150,60 +150,7 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man) ...@@ -150,60 +150,7 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
*/ */
bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem) bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem)
{ {
struct amdgpu_gtt_node *node = mem->mm_node; return mem->mm_node != NULL;
return (node->node.start != AMDGPU_BO_INVALID_OFFSET);
}
/**
* amdgpu_gtt_mgr_alloc - allocate new ranges
*
* @man: TTM memory type manager
* @tbo: TTM BO we need this range for
* @place: placement flags and restrictions
* @mem: the resulting mem object
*
* Allocate the address space for a node.
*/
static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *tbo,
const struct ttm_place *place,
struct ttm_mem_reg *mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
struct amdgpu_gtt_mgr *mgr = man->priv;
struct amdgpu_gtt_node *node = mem->mm_node;
enum drm_mm_insert_mode mode;
unsigned long fpfn, lpfn;
int r;
if (amdgpu_gtt_mgr_has_gart_addr(mem))
return 0;
if (place)
fpfn = place->fpfn;
else
fpfn = 0;
if (place && place->lpfn)
lpfn = place->lpfn;
else
lpfn = adev->gart.num_cpu_pages;
mode = DRM_MM_INSERT_BEST;
if (place && place->flags & TTM_PL_FLAG_TOPDOWN)
mode = DRM_MM_INSERT_HIGH;
spin_lock(&mgr->lock);
r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages,
mem->page_alignment, 0, fpfn, lpfn,
mode);
spin_unlock(&mgr->lock);
if (!r)
mem->start = node->node.start;
return r;
} }
/** /**
...@@ -234,29 +181,37 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, ...@@ -234,29 +181,37 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
atomic64_sub(mem->num_pages, &mgr->available); atomic64_sub(mem->num_pages, &mgr->available);
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
if (!place->lpfn) {
mem->mm_node = NULL;
mem->start = AMDGPU_BO_INVALID_OFFSET;
return 0;
}
node = kzalloc(sizeof(*node), GFP_KERNEL); node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node) { if (!node) {
r = -ENOMEM; r = -ENOMEM;
goto err_out; goto err_out;
} }
node->node.start = AMDGPU_BO_INVALID_OFFSET;
node->node.size = mem->num_pages;
node->tbo = tbo; node->tbo = tbo;
mem->mm_node = node;
if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) { spin_lock(&mgr->lock);
r = amdgpu_gtt_mgr_alloc(man, tbo, place, mem); r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages,
if (unlikely(r)) { mem->page_alignment, 0, place->fpfn,
kfree(node); place->lpfn, DRM_MM_INSERT_BEST);
mem->mm_node = NULL; spin_unlock(&mgr->lock);
goto err_out;
} if (unlikely(r))
} else { goto err_free;
mem->mm_node = node;
mem->start = node->node.start; mem->start = node->node.start;
}
return 0; return 0;
err_free:
kfree(node);
err_out: err_out:
atomic64_add(mem->num_pages, &mgr->available); atomic64_add(mem->num_pages, &mgr->available);
...@@ -279,17 +234,14 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man, ...@@ -279,17 +234,14 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
struct amdgpu_gtt_mgr *mgr = man->priv; struct amdgpu_gtt_mgr *mgr = man->priv;
struct amdgpu_gtt_node *node = mem->mm_node; struct amdgpu_gtt_node *node = mem->mm_node;
if (!node) if (node) {
return;
spin_lock(&mgr->lock); spin_lock(&mgr->lock);
if (node->node.start != AMDGPU_BO_INVALID_OFFSET)
drm_mm_remove_node(&node->node); drm_mm_remove_node(&node->node);
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
atomic64_add(mem->num_pages, &mgr->available);
kfree(node); kfree(node);
mem->mm_node = NULL; }
atomic64_add(mem->num_pages, &mgr->available);
} }
/** /**
......
...@@ -429,12 +429,22 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, ...@@ -429,12 +429,22 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
} }
src_offset = src->offset; src_offset = src->offset;
if (src->mem->mm_node) {
src_mm = amdgpu_find_mm_node(src->mem, &src_offset); src_mm = amdgpu_find_mm_node(src->mem, &src_offset);
src_node_size = (src_mm->size << PAGE_SHIFT) - src_offset; src_node_size = (src_mm->size << PAGE_SHIFT) - src_offset;
} else {
src_mm = NULL;
src_node_size = ULLONG_MAX;
}
dst_offset = dst->offset; dst_offset = dst->offset;
if (dst->mem->mm_node) {
dst_mm = amdgpu_find_mm_node(dst->mem, &dst_offset); dst_mm = amdgpu_find_mm_node(dst->mem, &dst_offset);
dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst_offset; dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst_offset;
} else {
dst_mm = NULL;
dst_node_size = ULLONG_MAX;
}
mutex_lock(&adev->mman.gtt_window_lock); mutex_lock(&adev->mman.gtt_window_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment