Commit c1c7ce8f authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: move GART recovery into GTT manager v2

The GTT manager handles the GART address space anyway, so it is
completely pointless to keep the same information around twice.

v2: rebased
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 3da917b6
...@@ -1630,9 +1630,6 @@ struct amdgpu_device { ...@@ -1630,9 +1630,6 @@ struct amdgpu_device {
/* link all shadow bo */ /* link all shadow bo */
struct list_head shadow_list; struct list_head shadow_list;
struct mutex shadow_list_lock; struct mutex shadow_list_lock;
/* link all gtt */
spinlock_t gtt_list_lock;
struct list_head gtt_list;
/* keep an lru list of rings by HW IP */ /* keep an lru list of rings by HW IP */
struct list_head ring_lru_list; struct list_head ring_lru_list;
spinlock_t ring_lru_list_lock; spinlock_t ring_lru_list_lock;
......
...@@ -2180,9 +2180,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, ...@@ -2180,9 +2180,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_LIST_HEAD(&adev->shadow_list); INIT_LIST_HEAD(&adev->shadow_list);
mutex_init(&adev->shadow_list_lock); mutex_init(&adev->shadow_list_lock);
INIT_LIST_HEAD(&adev->gtt_list);
spin_lock_init(&adev->gtt_list_lock);
INIT_LIST_HEAD(&adev->ring_lru_list); INIT_LIST_HEAD(&adev->ring_lru_list);
spin_lock_init(&adev->ring_lru_list_lock); spin_lock_init(&adev->ring_lru_list_lock);
...@@ -2877,7 +2874,8 @@ static int amdgpu_reset(struct amdgpu_device *adev, uint64_t* reset_flags) ...@@ -2877,7 +2874,8 @@ static int amdgpu_reset(struct amdgpu_device *adev, uint64_t* reset_flags)
atomic_inc(&adev->vram_lost_counter); atomic_inc(&adev->vram_lost_counter);
} }
r = amdgpu_ttm_recover_gart(adev); r = amdgpu_gtt_mgr_recover(
&adev->mman.bdev.man[TTM_PL_TT]);
if (r) if (r)
goto out; goto out;
...@@ -2939,7 +2937,7 @@ static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t *reset_flags, ...@@ -2939,7 +2937,7 @@ static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t *reset_flags,
goto error; goto error;
/* we need recover gart prior to run SMC/CP/SDMA resume */ /* we need recover gart prior to run SMC/CP/SDMA resume */
amdgpu_ttm_recover_gart(adev); amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
/* now we are okay to resume SMC/CP/SDMA */ /* now we are okay to resume SMC/CP/SDMA */
r = amdgpu_sriov_reinit_late(adev); r = amdgpu_sriov_reinit_late(adev);
......
...@@ -31,6 +31,11 @@ struct amdgpu_gtt_mgr { ...@@ -31,6 +31,11 @@ struct amdgpu_gtt_mgr {
atomic64_t available; atomic64_t available;
}; };
struct amdgpu_gtt_node {
struct drm_mm_node node;
struct ttm_buffer_object *tbo;
};
/** /**
* amdgpu_gtt_mgr_init - init GTT manager and DRM MM * amdgpu_gtt_mgr_init - init GTT manager and DRM MM
* *
...@@ -87,9 +92,9 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man) ...@@ -87,9 +92,9 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
*/ */
bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem) bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem)
{ {
struct drm_mm_node *node = mem->mm_node; struct amdgpu_gtt_node *node = mem->mm_node;
return (node->start != AMDGPU_BO_INVALID_OFFSET); return (node->node.start != AMDGPU_BO_INVALID_OFFSET);
} }
/** /**
...@@ -109,7 +114,7 @@ static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, ...@@ -109,7 +114,7 @@ static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
struct amdgpu_gtt_mgr *mgr = man->priv; struct amdgpu_gtt_mgr *mgr = man->priv;
struct drm_mm_node *node = mem->mm_node; struct amdgpu_gtt_node *node = mem->mm_node;
enum drm_mm_insert_mode mode; enum drm_mm_insert_mode mode;
unsigned long fpfn, lpfn; unsigned long fpfn, lpfn;
int r; int r;
...@@ -132,13 +137,13 @@ static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, ...@@ -132,13 +137,13 @@ static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
mode = DRM_MM_INSERT_HIGH; mode = DRM_MM_INSERT_HIGH;
spin_lock(&mgr->lock); spin_lock(&mgr->lock);
r = drm_mm_insert_node_in_range(&mgr->mm, node, r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages,
mem->num_pages, mem->page_alignment, 0, mem->page_alignment, 0, fpfn, lpfn,
fpfn, lpfn, mode); mode);
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
if (!r) if (!r)
mem->start = node->start; mem->start = node->node.start;
return r; return r;
} }
...@@ -159,7 +164,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, ...@@ -159,7 +164,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem) struct ttm_mem_reg *mem)
{ {
struct amdgpu_gtt_mgr *mgr = man->priv; struct amdgpu_gtt_mgr *mgr = man->priv;
struct drm_mm_node *node; struct amdgpu_gtt_node *node;
int r; int r;
spin_lock(&mgr->lock); spin_lock(&mgr->lock);
...@@ -177,8 +182,9 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, ...@@ -177,8 +182,9 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
goto err_out; goto err_out;
} }
node->start = AMDGPU_BO_INVALID_OFFSET; node->node.start = AMDGPU_BO_INVALID_OFFSET;
node->size = mem->num_pages; node->node.size = mem->num_pages;
node->tbo = tbo;
mem->mm_node = node; mem->mm_node = node;
if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) { if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
...@@ -190,7 +196,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, ...@@ -190,7 +196,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
goto err_out; goto err_out;
} }
} else { } else {
mem->start = node->start; mem->start = node->node.start;
} }
return 0; return 0;
...@@ -214,14 +220,14 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man, ...@@ -214,14 +220,14 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem) struct ttm_mem_reg *mem)
{ {
struct amdgpu_gtt_mgr *mgr = man->priv; struct amdgpu_gtt_mgr *mgr = man->priv;
struct drm_mm_node *node = mem->mm_node; struct amdgpu_gtt_node *node = mem->mm_node;
if (!node) if (!node)
return; return;
spin_lock(&mgr->lock); spin_lock(&mgr->lock);
if (node->start != AMDGPU_BO_INVALID_OFFSET) if (node->node.start != AMDGPU_BO_INVALID_OFFSET)
drm_mm_remove_node(node); drm_mm_remove_node(&node->node);
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
atomic64_add(mem->num_pages, &mgr->available); atomic64_add(mem->num_pages, &mgr->available);
...@@ -244,6 +250,25 @@ uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man) ...@@ -244,6 +250,25 @@ uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man)
return (result > 0 ? result : 0) * PAGE_SIZE; return (result > 0 ? result : 0) * PAGE_SIZE;
} }
int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man)
{
struct amdgpu_gtt_mgr *mgr = man->priv;
struct amdgpu_gtt_node *node;
struct drm_mm_node *mm_node;
int r = 0;
spin_lock(&mgr->lock);
drm_mm_for_each_node(mm_node, &mgr->mm) {
node = container_of(mm_node, struct amdgpu_gtt_node, node);
r = amdgpu_ttm_recover_gart(node->tbo);
if (r)
break;
}
spin_unlock(&mgr->lock);
return r;
}
/** /**
* amdgpu_gtt_mgr_debug - dump VRAM table * amdgpu_gtt_mgr_debug - dump VRAM table
* *
......
...@@ -689,7 +689,6 @@ struct amdgpu_ttm_tt { ...@@ -689,7 +689,6 @@ struct amdgpu_ttm_tt {
struct list_head guptasks; struct list_head guptasks;
atomic_t mmu_invalidations; atomic_t mmu_invalidations;
uint32_t last_set_pages; uint32_t last_set_pages;
struct list_head list;
}; };
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
...@@ -865,21 +864,14 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, ...@@ -865,21 +864,14 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
return 0; return 0;
} }
spin_lock(&gtt->adev->gtt_list_lock);
flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
ttm->pages, gtt->ttm.dma_address, flags); ttm->pages, gtt->ttm.dma_address, flags);
if (r) { if (r)
DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
ttm->num_pages, gtt->offset); ttm->num_pages, gtt->offset);
goto error_gart_bind;
}
list_add_tail(&gtt->list, &gtt->adev->gtt_list);
error_gart_bind:
spin_unlock(&gtt->adev->gtt_list_lock);
return r; return r;
} }
...@@ -920,29 +912,23 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo) ...@@ -920,29 +912,23 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo)
return r; return r;
} }
int amdgpu_ttm_recover_gart(struct amdgpu_device *adev) int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
{ {
struct amdgpu_ttm_tt *gtt, *tmp; struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
struct ttm_mem_reg bo_mem; struct amdgpu_ttm_tt *gtt = (void *)tbo->ttm;
uint64_t flags; uint64_t flags;
int r; int r;
bo_mem.mem_type = TTM_PL_TT; if (!gtt)
spin_lock(&adev->gtt_list_lock); return 0;
list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) {
flags = amdgpu_ttm_tt_pte_flags(gtt->adev, &gtt->ttm.ttm, &bo_mem); flags = amdgpu_ttm_tt_pte_flags(adev, &gtt->ttm.ttm, &tbo->mem);
r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages, r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
gtt->ttm.ttm.pages, gtt->ttm.dma_address, gtt->ttm.ttm.pages, gtt->ttm.dma_address, flags);
flags); if (r)
if (r) { DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
spin_unlock(&adev->gtt_list_lock); gtt->ttm.ttm.num_pages, gtt->offset);
DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", return r;
gtt->ttm.ttm.num_pages, gtt->offset);
return r;
}
}
spin_unlock(&adev->gtt_list_lock);
return 0;
} }
static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
...@@ -957,16 +943,10 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) ...@@ -957,16 +943,10 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
return 0; return 0;
/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
spin_lock(&gtt->adev->gtt_list_lock);
r = amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages); r = amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
if (r) { if (r)
DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n", DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
gtt->ttm.ttm.num_pages, gtt->offset); gtt->ttm.ttm.num_pages, gtt->offset);
goto error_unbind;
}
list_del_init(&gtt->list);
error_unbind:
spin_unlock(&gtt->adev->gtt_list_lock);
return r; return r;
} }
...@@ -1003,7 +983,6 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev, ...@@ -1003,7 +983,6 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
kfree(gtt); kfree(gtt);
return NULL; return NULL;
} }
INIT_LIST_HEAD(&gtt->list);
return &gtt->ttm.ttm; return &gtt->ttm.ttm;
} }
......
...@@ -69,6 +69,7 @@ extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func; ...@@ -69,6 +69,7 @@ extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem); bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man); uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
...@@ -91,7 +92,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, ...@@ -91,7 +92,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
int amdgpu_ttm_bind(struct ttm_buffer_object *bo); int amdgpu_ttm_bind(struct ttm_buffer_object *bo);
int amdgpu_ttm_recover_gart(struct amdgpu_device *adev); int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages); void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment