Commit d624e1bf authored by Christian König's avatar Christian König

drm/amdgpu: revert "drm/amdgpu: stop allocating dummy GTT nodes"

TTM is going to need this again since we are moving the resource
allocation into the backend.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210602100914.46246-4-christian.koenig@amd.com
parent db734953
...@@ -24,16 +24,22 @@ ...@@ -24,16 +24,22 @@
#include "amdgpu.h" #include "amdgpu.h"
struct amdgpu_gtt_node {
struct drm_mm_node node;
struct ttm_buffer_object *tbo;
};
static inline struct amdgpu_gtt_mgr * static inline struct amdgpu_gtt_mgr *
to_gtt_mgr(struct ttm_resource_manager *man) to_gtt_mgr(struct ttm_resource_manager *man)
{ {
return container_of(man, struct amdgpu_gtt_mgr, manager); return container_of(man, struct amdgpu_gtt_mgr, manager);
} }
struct amdgpu_gtt_node { static inline struct amdgpu_gtt_node *
struct drm_mm_node node; to_amdgpu_gtt_node(struct ttm_resource *res)
struct ttm_buffer_object *tbo; {
}; return container_of(res->mm_node, struct amdgpu_gtt_node, node);
}
/** /**
* DOC: mem_info_gtt_total * DOC: mem_info_gtt_total
...@@ -99,7 +105,9 @@ const struct attribute_group amdgpu_gtt_mgr_attr_group = { ...@@ -99,7 +105,9 @@ const struct attribute_group amdgpu_gtt_mgr_attr_group = {
*/ */
bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem) bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem)
{ {
return mem->mm_node != NULL; struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(mem);
return drm_mm_node_allocated(&node->node);
} }
/** /**
...@@ -130,12 +138,6 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, ...@@ -130,12 +138,6 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
atomic64_sub(mem->num_pages, &mgr->available); atomic64_sub(mem->num_pages, &mgr->available);
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
if (!place->lpfn) {
mem->mm_node = NULL;
mem->start = AMDGPU_BO_INVALID_OFFSET;
return 0;
}
node = kzalloc(sizeof(*node), GFP_KERNEL); node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node) { if (!node) {
r = -ENOMEM; r = -ENOMEM;
...@@ -143,19 +145,25 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, ...@@ -143,19 +145,25 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
} }
node->tbo = tbo; node->tbo = tbo;
if (place->lpfn) {
spin_lock(&mgr->lock);
r = drm_mm_insert_node_in_range(&mgr->mm, &node->node,
mem->num_pages,
tbo->page_alignment, 0,
place->fpfn, place->lpfn,
DRM_MM_INSERT_BEST);
spin_unlock(&mgr->lock);
if (unlikely(r))
goto err_free;
spin_lock(&mgr->lock); mem->start = node->node.start;
r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages, } else {
tbo->page_alignment, 0, place->fpfn, node->node.start = 0;
place->lpfn, DRM_MM_INSERT_BEST); node->node.size = mem->num_pages;
spin_unlock(&mgr->lock); mem->start = AMDGPU_BO_INVALID_OFFSET;
}
if (unlikely(r))
goto err_free;
mem->mm_node = node;
mem->start = node->node.start;
mem->mm_node = &node->node;
return 0; return 0;
err_free: err_free:
...@@ -178,17 +186,19 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, ...@@ -178,17 +186,19 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man, static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
struct ttm_resource *mem) struct ttm_resource *mem)
{ {
struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(mem);
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
struct amdgpu_gtt_node *node = mem->mm_node;
if (node) { if (!node)
spin_lock(&mgr->lock); return;
drm_mm_remove_node(&node->node);
spin_unlock(&mgr->lock);
kfree(node);
}
spin_lock(&mgr->lock);
if (drm_mm_node_allocated(&node->node))
drm_mm_remove_node(&node->node);
spin_unlock(&mgr->lock);
atomic64_add(mem->num_pages, &mgr->available); atomic64_add(mem->num_pages, &mgr->available);
kfree(node);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment