Commit c86ad391 authored by Philip Yang's avatar Philip Yang Committed by Alex Deucher

drm/amdkfd: amdkfd_free_gtt_mem clear the correct pointer

Pass pointer reference to amdgpu_bo_unref to clear the correct pointer,
otherwise amdgpu_bo_unref clear the local variable, the original pointer
not set to NULL, this could cause use-after-free bug.
Signed-off-by: default avatarPhilip Yang <Philip.Yang@amd.com>
Reviewed-by: default avatarFelix Kuehling <felix.kuehling@amd.com>
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent f9e292cb
...@@ -364,15 +364,15 @@ int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size, ...@@ -364,15 +364,15 @@ int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size,
return r; return r;
} }
void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj) void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj)
{ {
struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj; struct amdgpu_bo **bo = (struct amdgpu_bo **) mem_obj;
amdgpu_bo_reserve(bo, true); amdgpu_bo_reserve(*bo, true);
amdgpu_bo_kunmap(bo); amdgpu_bo_kunmap(*bo);
amdgpu_bo_unpin(bo); amdgpu_bo_unpin(*bo);
amdgpu_bo_unreserve(bo); amdgpu_bo_unreserve(*bo);
amdgpu_bo_unref(&(bo)); amdgpu_bo_unref(bo);
} }
int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size, int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
......
...@@ -235,7 +235,7 @@ int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo, ...@@ -235,7 +235,7 @@ int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size, int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size,
void **mem_obj, uint64_t *gpu_addr, void **mem_obj, uint64_t *gpu_addr,
void **cpu_ptr, bool mqd_gfx9); void **cpu_ptr, bool mqd_gfx9);
void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj); void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj);
int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size, int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
void **mem_obj); void **mem_obj);
void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj); void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj);
......
...@@ -423,7 +423,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, ...@@ -423,7 +423,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
err_create_queue: err_create_queue:
if (wptr_bo) if (wptr_bo)
amdgpu_amdkfd_free_gtt_mem(dev->adev, wptr_bo); amdgpu_amdkfd_free_gtt_mem(dev->adev, (void **)&wptr_bo);
err_wptr_map_gart: err_wptr_map_gart:
err_bind_process: err_bind_process:
err_pdd: err_pdd:
......
...@@ -907,7 +907,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, ...@@ -907,7 +907,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd_doorbell_error: kfd_doorbell_error:
kfd_gtt_sa_fini(kfd); kfd_gtt_sa_fini(kfd);
kfd_gtt_sa_init_error: kfd_gtt_sa_init_error:
amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem); amdgpu_amdkfd_free_gtt_mem(kfd->adev, &kfd->gtt_mem);
alloc_gtt_mem_failure: alloc_gtt_mem_failure:
dev_err(kfd_device, dev_err(kfd_device,
"device %x:%x NOT added due to errors\n", "device %x:%x NOT added due to errors\n",
...@@ -925,7 +925,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd) ...@@ -925,7 +925,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
kfd_doorbell_fini(kfd); kfd_doorbell_fini(kfd);
ida_destroy(&kfd->doorbell_ida); ida_destroy(&kfd->doorbell_ida);
kfd_gtt_sa_fini(kfd); kfd_gtt_sa_fini(kfd);
amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem); amdgpu_amdkfd_free_gtt_mem(kfd->adev, &kfd->gtt_mem);
} }
kfree(kfd); kfree(kfd);
......
...@@ -2621,7 +2621,7 @@ static void deallocate_hiq_sdma_mqd(struct kfd_node *dev, ...@@ -2621,7 +2621,7 @@ static void deallocate_hiq_sdma_mqd(struct kfd_node *dev,
{ {
WARN(!mqd, "No hiq sdma mqd trunk to free"); WARN(!mqd, "No hiq sdma mqd trunk to free");
amdgpu_amdkfd_free_gtt_mem(dev->adev, mqd->gtt_mem); amdgpu_amdkfd_free_gtt_mem(dev->adev, &mqd->gtt_mem);
} }
void device_queue_manager_uninit(struct device_queue_manager *dqm) void device_queue_manager_uninit(struct device_queue_manager *dqm)
......
...@@ -225,7 +225,7 @@ void kfd_free_mqd_cp(struct mqd_manager *mm, void *mqd, ...@@ -225,7 +225,7 @@ void kfd_free_mqd_cp(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj) struct kfd_mem_obj *mqd_mem_obj)
{ {
if (mqd_mem_obj->gtt_mem) { if (mqd_mem_obj->gtt_mem) {
amdgpu_amdkfd_free_gtt_mem(mm->dev->adev, mqd_mem_obj->gtt_mem); amdgpu_amdkfd_free_gtt_mem(mm->dev->adev, &mqd_mem_obj->gtt_mem);
kfree(mqd_mem_obj); kfree(mqd_mem_obj);
} else { } else {
kfd_gtt_sa_free(mm->dev, mqd_mem_obj); kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
......
...@@ -1048,7 +1048,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) ...@@ -1048,7 +1048,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
if (pdd->dev->kfd->shared_resources.enable_mes) if (pdd->dev->kfd->shared_resources.enable_mes)
amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev, amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev,
pdd->proc_ctx_bo); &pdd->proc_ctx_bo);
/* /*
* before destroying pdd, make sure to report availability * before destroying pdd, make sure to report availability
* for auto suspend * for auto suspend
......
...@@ -204,9 +204,9 @@ static void pqm_clean_queue_resource(struct process_queue_manager *pqm, ...@@ -204,9 +204,9 @@ static void pqm_clean_queue_resource(struct process_queue_manager *pqm,
} }
if (dev->kfd->shared_resources.enable_mes) { if (dev->kfd->shared_resources.enable_mes) {
amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->gang_ctx_bo); amdgpu_amdkfd_free_gtt_mem(dev->adev, &pqn->q->gang_ctx_bo);
if (pqn->q->wptr_bo) if (pqn->q->wptr_bo)
amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->wptr_bo); amdgpu_amdkfd_free_gtt_mem(dev->adev, (void **)&pqn->q->wptr_bo);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment