Commit 7978c4d4 authored by Alex Deucher's avatar Alex Deucher

drm/amdkfd: simplify APU VRAM handling

With commit 89773b85
("drm/amdkfd: Let VRAM allocations go to GTT domain on small APUs")
big and small APU "VRAM" handling in KFD was unified.  Since AMD_IS_APU
is set for both big and small APUs, we can simplify the checks in
the code.

v2: clean up a few more places (Lang)
Acked-by: default avatarFelix Kuehling <felix.kuehling@amd.com>
Reviewed-by: default avatarLang Yu <Lang.Yu@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent d4ab6c40
......@@ -196,7 +196,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
return -EINVAL;
vram_size = KFD_XCP_MEMORY_SIZE(adev, xcp_id);
if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
if (adev->flags & AMD_IS_APU) {
system_mem_needed = size;
ttm_mem_needed = size;
}
......@@ -233,7 +233,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
if (adev && xcp_id >= 0) {
adev->kfd.vram_used[xcp_id] += vram_needed;
adev->kfd.vram_used_aligned[xcp_id] +=
(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) ?
(adev->flags & AMD_IS_APU) ?
vram_needed :
ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
}
......@@ -261,7 +261,7 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
if (adev) {
adev->kfd.vram_used[xcp_id] -= size;
if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
if (adev->flags & AMD_IS_APU) {
adev->kfd.vram_used_aligned[xcp_id] -= size;
kfd_mem_limit.system_mem_used -= size;
kfd_mem_limit.ttm_mem_used -= size;
......@@ -894,7 +894,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
* if peer device has large BAR. In contrast, access over xGMI is
* allowed for both small and large BAR configurations of peer device
*/
if ((adev != bo_adev && !(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU)) &&
if ((adev != bo_adev && !(adev->flags & AMD_IS_APU)) &&
((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) ||
(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) ||
(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
......@@ -1682,7 +1682,7 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
- atomic64_read(&adev->vram_pin_size)
- reserved_for_pt;
if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
if (adev->flags & AMD_IS_APU) {
system_mem_available = no_system_mem_limit ?
kfd_mem_limit.max_system_mem_limit :
kfd_mem_limit.max_system_mem_limit -
......@@ -1730,7 +1730,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
if (adev->flags & AMD_IS_APU) {
domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_flags = 0;
......@@ -1981,7 +1981,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
if (size) {
if (!is_imported &&
(mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM ||
((adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) &&
((adev->flags & AMD_IS_APU) &&
mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT)))
*size = bo_size;
else
......@@ -2404,7 +2404,7 @@ static int import_obj_create(struct amdgpu_device *adev,
(*mem)->bo = bo;
(*mem)->va = va;
(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) &&
!(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) ?
!(adev->flags & AMD_IS_APU) ?
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
(*mem)->mapped_to_gpu_memory = 0;
......
......@@ -1023,7 +1023,7 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 1))
return -EINVAL;
if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU)
if (adev->flags & AMD_IS_APU)
return 0;
pgmap = &kfddev->pgmap;
......
......@@ -2634,8 +2634,7 @@ svm_range_best_restore_location(struct svm_range *prange,
return -1;
}
if (node->adev->gmc.is_app_apu ||
node->adev->flags & AMD_IS_APU)
if (node->adev->flags & AMD_IS_APU)
return 0;
if (prange->preferred_loc == gpuid ||
......@@ -3353,8 +3352,7 @@ svm_range_best_prefetch_location(struct svm_range *prange)
goto out;
}
if (bo_node->adev->gmc.is_app_apu ||
bo_node->adev->flags & AMD_IS_APU) {
if (bo_node->adev->flags & AMD_IS_APU) {
best_loc = 0;
goto out;
}
......
......@@ -201,7 +201,6 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s
* is initialized to not 0 when page migration register device memory.
*/
#define KFD_IS_SVM_API_SUPPORTED(adev) ((adev)->kfd.pgmap.type != 0 ||\
(adev)->gmc.is_app_apu ||\
((adev)->flags & AMD_IS_APU))
void svm_range_bo_unref_async(struct svm_range_bo *svm_bo);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment