Commit 9255d77d authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: move gtt usage tracking into the gtt manager v2

It doesn't make much sense to count those numbers twice.

v2: use and atomic64_t instead
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 97cbb284
......@@ -1484,7 +1484,6 @@ struct amdgpu_device {
struct amdgpu_wb wb;
atomic64_t vram_usage;
atomic64_t vram_vis_usage;
atomic64_t gtt_usage;
atomic64_t num_bytes_moved;
atomic64_t num_evictions;
atomic64_t num_vram_cpu_page_faults;
......
......@@ -28,7 +28,7 @@
struct amdgpu_gtt_mgr {
struct drm_mm mm;
spinlock_t lock;
uint64_t available;
atomic64_t available;
};
/**
......@@ -54,7 +54,7 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
size = (adev->mc.gart_size >> PAGE_SHIFT) - start;
drm_mm_init(&mgr->mm, start, size);
spin_lock_init(&mgr->lock);
mgr->available = p_size;
atomic64_set(&mgr->available, p_size);
man->priv = mgr;
return 0;
}
......@@ -173,11 +173,11 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
int r;
spin_lock(&mgr->lock);
if (mgr->available < mem->num_pages) {
if (atomic64_read(&mgr->available) < mem->num_pages) {
spin_unlock(&mgr->lock);
return 0;
}
mgr->available -= mem->num_pages;
atomic64_sub(mem->num_pages, &mgr->available);
spin_unlock(&mgr->lock);
node = kzalloc(sizeof(*node), GFP_KERNEL);
......@@ -204,9 +204,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
return 0;
err_out:
spin_lock(&mgr->lock);
mgr->available += mem->num_pages;
spin_unlock(&mgr->lock);
atomic64_add(mem->num_pages, &mgr->available);
return r;
}
......@@ -233,13 +231,27 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
spin_lock(&mgr->lock);
if (node->start != AMDGPU_BO_INVALID_OFFSET)
drm_mm_remove_node(node);
mgr->available += mem->num_pages;
spin_unlock(&mgr->lock);
atomic64_add(mem->num_pages, &mgr->available);
kfree(node);
mem->mm_node = NULL;
}
/**
* amdgpu_gtt_mgr_usage - return usage of GTT domain
*
* @man: TTM memory type manager
*
* Return how many bytes are used in the GTT domain
*/
uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man)
{
struct amdgpu_gtt_mgr *mgr = man->priv;
return (u64)(man->size - atomic64_read(&mgr->available)) * PAGE_SIZE;
}
/**
* amdgpu_gtt_mgr_debug - dump VRAM table
*
......@@ -251,7 +263,6 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
struct drm_printer *printer)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
struct amdgpu_gtt_mgr *mgr = man->priv;
spin_lock(&mgr->lock);
......@@ -259,8 +270,8 @@ static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
spin_unlock(&mgr->lock);
drm_printf(printer, "man size:%llu pages, gtt available:%llu pages, usage:%lluMB\n",
man->size, mgr->available,
(u64)atomic64_read(&adev->gtt_usage) >> 20);
man->size, (u64)atomic64_read(&mgr->available),
amdgpu_gtt_mgr_usage(man) >> 20);
}
const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = {
......
......@@ -461,7 +461,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
ui64 = atomic64_read(&adev->vram_vis_usage);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_GTT_USAGE:
ui64 = atomic64_read(&adev->gtt_usage);
ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_GDS_CONFIG: {
struct drm_amdgpu_info_gds gds_info;
......@@ -514,7 +514,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
mem.gtt.total_heap_size *= PAGE_SIZE;
mem.gtt.usable_heap_size = mem.gtt.total_heap_size
- adev->gart_pin_size;
mem.gtt.heap_usage = atomic64_read(&adev->gtt_usage);
mem.gtt.heap_usage =
amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
return copy_to_user(out, &mem,
......
......@@ -62,7 +62,6 @@ static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
if (new_mem) {
switch (new_mem->mem_type) {
case TTM_PL_TT:
atomic64_add(new_mem->size, &adev->gtt_usage);
break;
case TTM_PL_VRAM:
atomic64_add(new_mem->size, &adev->vram_usage);
......@@ -75,7 +74,6 @@ static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
if (old_mem) {
switch (old_mem->mem_type) {
case TTM_PL_TT:
atomic64_sub(old_mem->size, &adev->gtt_usage);
break;
case TTM_PL_VRAM:
atomic64_sub(old_mem->size, &adev->vram_usage);
......
......@@ -66,6 +66,7 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *tbo,
const struct ttm_place *place,
struct ttm_mem_reg *mem);
uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint64_t dst_offset, uint32_t byte_count,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment