Commit b61857b5 authored by Chunming Zhou's avatar Chunming Zhou Committed by Alex Deucher

drm/amdgpu: set bulk_moveable to false when lru changed v2

if lru is changed, we cannot do bulk moving.
v2:
root bo isn't in bulk moving, skip its change.
Signed-off-by: default avatarChunming Zhou <david1.zhou@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent ae6d3435
......@@ -1546,7 +1546,8 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
.io_mem_free = &amdgpu_ttm_io_mem_free,
.io_mem_pfn = amdgpu_ttm_io_mem_pfn,
.access_memory = &amdgpu_ttm_access_memory
.access_memory = &amdgpu_ttm_access_memory,
.del_from_lru_notify = &amdgpu_vm_del_from_lru_notify
};
/*
......
......@@ -623,6 +623,28 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
list_add(&entry->tv.head, validated);
}
void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
{
struct amdgpu_bo *abo;
struct amdgpu_vm_bo_base *bo_base;
if (!amdgpu_bo_is_amdgpu_bo(bo))
return;
if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)
return;
abo = ttm_to_amdgpu_bo(bo);
if (!abo->parent)
return;
for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) {
struct amdgpu_vm *vm = bo_base->vm;
if (abo->tbo.resv == vm->root.base.bo->tbo.resv)
vm->bulk_moveable = false;
}
}
/**
* amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
*
......
......@@ -363,4 +363,6 @@ int amdgpu_vm_add_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key)
void amdgpu_vm_clear_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key);
void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment