Commit 7db47b83 authored by Christian König's avatar Christian König

drm/amdgpu: remove VRAM accounting v2

This is provided by TTM now.

Also switch man->size to bytes instead of pages and fix the double
printing of size and usage in debugfs.

v2: fix size checking as well
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Tested-by: default avatarBas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220214093439.2989-8-christian.koenig@amd.com
parent 3fc2b087
...@@ -314,7 +314,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, ...@@ -314,7 +314,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
} }
total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size); total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
used_vram = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr); used_vram = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram; free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
spin_lock(&adev->mm_stats.lock); spin_lock(&adev->mm_stats.lock);
......
...@@ -678,7 +678,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -678,7 +678,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
ui64 = atomic64_read(&adev->num_vram_cpu_page_faults); ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VRAM_USAGE: case AMDGPU_INFO_VRAM_USAGE:
ui64 = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr); ui64 = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VIS_VRAM_USAGE: case AMDGPU_INFO_VIS_VRAM_USAGE:
ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr); ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
...@@ -717,6 +717,8 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -717,6 +717,8 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
struct drm_amdgpu_memory_info mem; struct drm_amdgpu_memory_info mem;
struct ttm_resource_manager *gtt_man = struct ttm_resource_manager *gtt_man =
&adev->mman.gtt_mgr.manager; &adev->mman.gtt_mgr.manager;
struct ttm_resource_manager *vram_man =
&adev->mman.vram_mgr.manager;
memset(&mem, 0, sizeof(mem)); memset(&mem, 0, sizeof(mem));
mem.vram.total_heap_size = adev->gmc.real_vram_size; mem.vram.total_heap_size = adev->gmc.real_vram_size;
...@@ -724,7 +726,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -724,7 +726,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
atomic64_read(&adev->vram_pin_size) - atomic64_read(&adev->vram_pin_size) -
AMDGPU_VM_RESERVED_VRAM; AMDGPU_VM_RESERVED_VRAM;
mem.vram.heap_usage = mem.vram.heap_usage =
amdgpu_vram_mgr_usage(&adev->mman.vram_mgr); ttm_resource_manager_usage(vram_man);
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
mem.cpu_accessible_vram.total_heap_size = mem.cpu_accessible_vram.total_heap_size =
......
...@@ -460,7 +460,7 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev, ...@@ -460,7 +460,7 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
if (domain & AMDGPU_GEM_DOMAIN_VRAM) { if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
if (size < (man->size << PAGE_SHIFT)) if (size < man->size)
return true; return true;
else else
goto fail; goto fail;
......
...@@ -1884,7 +1884,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) ...@@ -1884,7 +1884,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
size = adev->gmc.real_vram_size; size = adev->gmc.real_vram_size;
else else
size = adev->gmc.visible_vram_size; size = adev->gmc.visible_vram_size;
man->size = size >> PAGE_SHIFT; man->size = size;
adev->mman.buffer_funcs_enabled = enable; adev->mman.buffer_funcs_enabled = enable;
} }
......
...@@ -44,7 +44,6 @@ struct amdgpu_vram_mgr { ...@@ -44,7 +44,6 @@ struct amdgpu_vram_mgr {
spinlock_t lock; spinlock_t lock;
struct list_head reservations_pending; struct list_head reservations_pending;
struct list_head reserved_pages; struct list_head reserved_pages;
atomic64_t usage;
atomic64_t vis_usage; atomic64_t vis_usage;
}; };
...@@ -122,7 +121,6 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, ...@@ -122,7 +121,6 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
void amdgpu_vram_mgr_free_sgt(struct device *dev, void amdgpu_vram_mgr_free_sgt(struct device *dev,
enum dma_data_direction dir, enum dma_data_direction dir,
struct sg_table *sgt); struct sg_table *sgt);
uint64_t amdgpu_vram_mgr_usage(struct amdgpu_vram_mgr *mgr);
uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr); uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr);
int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr, int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
uint64_t start, uint64_t size); uint64_t start, uint64_t size);
......
...@@ -575,8 +575,10 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev) ...@@ -575,8 +575,10 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
vf2pf_info->driver_cert = 0; vf2pf_info->driver_cert = 0;
vf2pf_info->os_info.all = 0; vf2pf_info->os_info.all = 0;
vf2pf_info->fb_usage = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr) >> 20; vf2pf_info->fb_usage =
vf2pf_info->fb_vis_usage = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20; ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20;
vf2pf_info->fb_vis_usage =
amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20; vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20; vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
......
...@@ -96,9 +96,9 @@ static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev, ...@@ -96,9 +96,9 @@ static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev); struct amdgpu_device *adev = drm_to_adev(ddev);
struct ttm_resource_manager *man = &adev->mman.vram_mgr.manager;
return sysfs_emit(buf, "%llu\n", return sysfs_emit(buf, "%llu\n", ttm_resource_manager_usage(man));
amdgpu_vram_mgr_usage(&adev->mman.vram_mgr));
} }
/** /**
...@@ -253,7 +253,9 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man) ...@@ -253,7 +253,9 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
vis_usage = amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node); vis_usage = amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node);
atomic64_add(vis_usage, &mgr->vis_usage); atomic64_add(vis_usage, &mgr->vis_usage);
atomic64_add(rsv->mm_node.size << PAGE_SHIFT, &mgr->usage); spin_lock(&man->bdev->lru_lock);
man->usage += rsv->mm_node.size << PAGE_SHIFT;
spin_unlock(&man->bdev->lru_lock);
list_move(&rsv->node, &mgr->reserved_pages); list_move(&rsv->node, &mgr->reserved_pages);
} }
} }
...@@ -378,19 +380,13 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, ...@@ -378,19 +380,13 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
lpfn = place->lpfn; lpfn = place->lpfn;
if (!lpfn) if (!lpfn)
lpfn = man->size; lpfn = man->size >> PAGE_SHIFT;
max_bytes = adev->gmc.mc_vram_size; max_bytes = adev->gmc.mc_vram_size;
if (tbo->type != ttm_bo_type_kernel) if (tbo->type != ttm_bo_type_kernel)
max_bytes -= AMDGPU_VM_RESERVED_VRAM; max_bytes -= AMDGPU_VM_RESERVED_VRAM;
/* bail out quickly if there's likely not enough VRAM for this BO */
mem_bytes = tbo->base.size; mem_bytes = tbo->base.size;
if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) {
r = -ENOSPC;
goto error_sub;
}
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
pages_per_node = ~0ul; pages_per_node = ~0ul;
num_nodes = 1; num_nodes = 1;
...@@ -408,13 +404,17 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, ...@@ -408,13 +404,17 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
node = kvmalloc(struct_size(node, mm_nodes, num_nodes), node = kvmalloc(struct_size(node, mm_nodes, num_nodes),
GFP_KERNEL | __GFP_ZERO); GFP_KERNEL | __GFP_ZERO);
if (!node) { if (!node)
r = -ENOMEM; return -ENOMEM;
goto error_sub;
}
ttm_resource_init(tbo, place, &node->base); ttm_resource_init(tbo, place, &node->base);
/* bail out quickly if there's likely not enough VRAM for this BO */
if (ttm_resource_manager_usage(man) > max_bytes) {
r = -ENOSPC;
goto error_fini;
}
mode = DRM_MM_INSERT_BEST; mode = DRM_MM_INSERT_BEST;
if (place->flags & TTM_PL_FLAG_TOPDOWN) if (place->flags & TTM_PL_FLAG_TOPDOWN)
mode = DRM_MM_INSERT_HIGH; mode = DRM_MM_INSERT_HIGH;
...@@ -472,11 +472,10 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, ...@@ -472,11 +472,10 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
while (i--) while (i--)
drm_mm_remove_node(&node->mm_nodes[i]); drm_mm_remove_node(&node->mm_nodes[i]);
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
error_fini:
ttm_resource_fini(man, &node->base); ttm_resource_fini(man, &node->base);
kvfree(node); kvfree(node);
error_sub:
atomic64_sub(mem_bytes, &mgr->usage);
return r; return r;
} }
...@@ -494,7 +493,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, ...@@ -494,7 +493,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res); struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr); struct amdgpu_device *adev = to_amdgpu_device(mgr);
uint64_t usage = 0, vis_usage = 0; uint64_t vis_usage = 0;
unsigned i, pages; unsigned i, pages;
spin_lock(&mgr->lock); spin_lock(&mgr->lock);
...@@ -503,13 +502,11 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, ...@@ -503,13 +502,11 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
struct drm_mm_node *mm = &node->mm_nodes[i]; struct drm_mm_node *mm = &node->mm_nodes[i];
drm_mm_remove_node(mm); drm_mm_remove_node(mm);
usage += mm->size << PAGE_SHIFT;
vis_usage += amdgpu_vram_mgr_vis_size(adev, mm); vis_usage += amdgpu_vram_mgr_vis_size(adev, mm);
} }
amdgpu_vram_mgr_do_reserve(man); amdgpu_vram_mgr_do_reserve(man);
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
atomic64_sub(usage, &mgr->usage);
atomic64_sub(vis_usage, &mgr->vis_usage); atomic64_sub(vis_usage, &mgr->vis_usage);
ttm_resource_fini(man, res); ttm_resource_fini(man, res);
...@@ -627,18 +624,6 @@ void amdgpu_vram_mgr_free_sgt(struct device *dev, ...@@ -627,18 +624,6 @@ void amdgpu_vram_mgr_free_sgt(struct device *dev,
kfree(sgt); kfree(sgt);
} }
/**
* amdgpu_vram_mgr_usage - how many bytes are used in this domain
*
* @mgr: amdgpu_vram_mgr pointer
*
* Returns how many bytes are used in this domain.
*/
uint64_t amdgpu_vram_mgr_usage(struct amdgpu_vram_mgr *mgr)
{
return atomic64_read(&mgr->usage);
}
/** /**
* amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part
* *
...@@ -664,13 +649,12 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man, ...@@ -664,13 +649,12 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
{ {
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
drm_printf(printer, " vis usage:%llu\n",
amdgpu_vram_mgr_vis_usage(mgr));
spin_lock(&mgr->lock); spin_lock(&mgr->lock);
drm_mm_print(&mgr->mm, printer); drm_mm_print(&mgr->mm, printer);
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
drm_printf(printer, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
man->size, amdgpu_vram_mgr_usage(mgr) >> 20,
amdgpu_vram_mgr_vis_usage(mgr) >> 20);
} }
static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = { static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
...@@ -692,11 +676,11 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev) ...@@ -692,11 +676,11 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
struct ttm_resource_manager *man = &mgr->manager; struct ttm_resource_manager *man = &mgr->manager;
ttm_resource_manager_init(man, &adev->mman.bdev, ttm_resource_manager_init(man, &adev->mman.bdev,
adev->gmc.real_vram_size >> PAGE_SHIFT); adev->gmc.real_vram_size);
man->func = &amdgpu_vram_mgr_func; man->func = &amdgpu_vram_mgr_func;
drm_mm_init(&mgr->mm, 0, man->size); drm_mm_init(&mgr->mm, 0, man->size >> PAGE_SHIFT);
spin_lock_init(&mgr->lock); spin_lock_init(&mgr->lock);
INIT_LIST_HEAD(&mgr->reservations_pending); INIT_LIST_HEAD(&mgr->reservations_pending);
INIT_LIST_HEAD(&mgr->reserved_pages); INIT_LIST_HEAD(&mgr->reserved_pages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment