Commit a86aa3ca authored by Oded Gabbay's avatar Oded Gabbay

drm/amdkfd: Using new gtt sa in amdkfd

This patch change the calls throughout the amdkfd driver from the old kfd-->kgd
interface to the new kfd gtt sa inside amdkfd

v2: change the new call in sdma code that appeared because of the sdma feature
Signed-off-by: default avatarOded Gabbay <oded.gabbay@amd.com>
Reviewed-by: default avatarAlexey Skidanov <Alexey.skidanov@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 73a1da0b
...@@ -519,11 +519,8 @@ static int init_pipelines(struct device_queue_manager *dqm, ...@@ -519,11 +519,8 @@ static int init_pipelines(struct device_queue_manager *dqm,
* because it contains no data when there are no active queues. * because it contains no data when there are no active queues.
*/ */
err = kfd2kgd->allocate_mem(dqm->dev->kgd, err = kfd_gtt_sa_allocate(dqm->dev, CIK_HPD_EOP_BYTES * pipes_num,
CIK_HPD_EOP_BYTES * pipes_num, &dqm->pipeline_mem);
PAGE_SIZE,
KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
(struct kgd_mem **) &dqm->pipeline_mem);
if (err) { if (err) {
pr_err("kfd: error allocate vidmem num pipes: %d\n", pr_err("kfd: error allocate vidmem num pipes: %d\n",
...@@ -538,8 +535,7 @@ static int init_pipelines(struct device_queue_manager *dqm, ...@@ -538,8 +535,7 @@ static int init_pipelines(struct device_queue_manager *dqm,
mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE); mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
if (mqd == NULL) { if (mqd == NULL) {
kfd2kgd->free_mem(dqm->dev->kgd, kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
(struct kgd_mem *) dqm->pipeline_mem);
return -ENOMEM; return -ENOMEM;
} }
...@@ -614,8 +610,7 @@ static void uninitialize_nocpsch(struct device_queue_manager *dqm) ...@@ -614,8 +610,7 @@ static void uninitialize_nocpsch(struct device_queue_manager *dqm)
for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++) for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
kfree(dqm->mqds[i]); kfree(dqm->mqds[i]);
mutex_destroy(&dqm->lock); mutex_destroy(&dqm->lock);
kfd2kgd->free_mem(dqm->dev->kgd, kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
(struct kgd_mem *) dqm->pipeline_mem);
} }
static int start_nocpsch(struct device_queue_manager *dqm) static int start_nocpsch(struct device_queue_manager *dqm)
...@@ -773,11 +768,8 @@ static int start_cpsch(struct device_queue_manager *dqm) ...@@ -773,11 +768,8 @@ static int start_cpsch(struct device_queue_manager *dqm)
pr_debug("kfd: allocating fence memory\n"); pr_debug("kfd: allocating fence memory\n");
/* allocate fence memory on the gart */ /* allocate fence memory on the gart */
retval = kfd2kgd->allocate_mem(dqm->dev->kgd, retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
sizeof(*dqm->fence_addr), &dqm->fence_mem);
32,
KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
(struct kgd_mem **) &dqm->fence_mem);
if (retval != 0) if (retval != 0)
goto fail_allocate_vidmem; goto fail_allocate_vidmem;
...@@ -812,8 +804,7 @@ static int stop_cpsch(struct device_queue_manager *dqm) ...@@ -812,8 +804,7 @@ static int stop_cpsch(struct device_queue_manager *dqm)
pdd = qpd_to_pdd(node->qpd); pdd = qpd_to_pdd(node->qpd);
pdd->bound = false; pdd->bound = false;
} }
kfd2kgd->free_mem(dqm->dev->kgd, kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
(struct kgd_mem *) dqm->fence_mem);
pm_uninit(&dqm->packets); pm_uninit(&dqm->packets);
return 0; return 0;
......
...@@ -72,11 +72,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, ...@@ -72,11 +72,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
if (prop.doorbell_ptr == NULL) if (prop.doorbell_ptr == NULL)
goto err_get_kernel_doorbell; goto err_get_kernel_doorbell;
retval = kfd2kgd->allocate_mem(dev->kgd, retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq);
queue_size,
PAGE_SIZE,
KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
(struct kgd_mem **) &kq->pq);
if (retval != 0) if (retval != 0)
goto err_pq_allocate_vidmem; goto err_pq_allocate_vidmem;
...@@ -84,11 +80,8 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, ...@@ -84,11 +80,8 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
kq->pq_kernel_addr = kq->pq->cpu_ptr; kq->pq_kernel_addr = kq->pq->cpu_ptr;
kq->pq_gpu_addr = kq->pq->gpu_addr; kq->pq_gpu_addr = kq->pq->gpu_addr;
retval = kfd2kgd->allocate_mem(dev->kgd, retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->rptr_kernel),
sizeof(*kq->rptr_kernel), &kq->rptr_mem);
32,
KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
(struct kgd_mem **) &kq->rptr_mem);
if (retval != 0) if (retval != 0)
goto err_rptr_allocate_vidmem; goto err_rptr_allocate_vidmem;
...@@ -96,11 +89,8 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, ...@@ -96,11 +89,8 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
kq->rptr_kernel = kq->rptr_mem->cpu_ptr; kq->rptr_kernel = kq->rptr_mem->cpu_ptr;
kq->rptr_gpu_addr = kq->rptr_mem->gpu_addr; kq->rptr_gpu_addr = kq->rptr_mem->gpu_addr;
retval = kfd2kgd->allocate_mem(dev->kgd, retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->wptr_kernel),
sizeof(*kq->wptr_kernel), &kq->wptr_mem);
32,
KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
(struct kgd_mem **) &kq->wptr_mem);
if (retval != 0) if (retval != 0)
goto err_wptr_allocate_vidmem; goto err_wptr_allocate_vidmem;
...@@ -145,11 +135,8 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, ...@@ -145,11 +135,8 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
} else { } else {
/* allocate fence for DIQ */ /* allocate fence for DIQ */
retval = kfd2kgd->allocate_mem(dev->kgd, retval = kfd_gtt_sa_allocate(dev, sizeof(uint32_t),
sizeof(uint32_t), &kq->fence_mem_obj);
32,
KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
(struct kgd_mem **) &kq->fence_mem_obj);
if (retval != 0) if (retval != 0)
goto err_alloc_fence; goto err_alloc_fence;
...@@ -165,11 +152,11 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, ...@@ -165,11 +152,11 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
err_init_mqd: err_init_mqd:
uninit_queue(kq->queue); uninit_queue(kq->queue);
err_init_queue: err_init_queue:
kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->wptr_mem); kfd_gtt_sa_free(dev, kq->wptr_mem);
err_wptr_allocate_vidmem: err_wptr_allocate_vidmem:
kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->rptr_mem); kfd_gtt_sa_free(dev, kq->rptr_mem);
err_rptr_allocate_vidmem: err_rptr_allocate_vidmem:
kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->pq); kfd_gtt_sa_free(dev, kq->pq);
err_pq_allocate_vidmem: err_pq_allocate_vidmem:
pr_err("kfd: error init pq\n"); pr_err("kfd: error init pq\n");
kfd_release_kernel_doorbell(dev, prop.doorbell_ptr); kfd_release_kernel_doorbell(dev, prop.doorbell_ptr);
...@@ -190,10 +177,12 @@ static void uninitialize(struct kernel_queue *kq) ...@@ -190,10 +177,12 @@ static void uninitialize(struct kernel_queue *kq)
QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS, QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
kq->queue->pipe, kq->queue->pipe,
kq->queue->queue); kq->queue->queue);
else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ)
kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj);
kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->rptr_mem); kfd_gtt_sa_free(kq->dev, kq->rptr_mem);
kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->wptr_mem); kfd_gtt_sa_free(kq->dev, kq->wptr_mem);
kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->pq); kfd_gtt_sa_free(kq->dev, kq->pq);
kfd_release_kernel_doorbell(kq->dev, kfd_release_kernel_doorbell(kq->dev,
kq->queue->properties.doorbell_ptr); kq->queue->properties.doorbell_ptr);
uninit_queue(kq->queue); uninit_queue(kq->queue);
......
...@@ -52,11 +52,8 @@ static int init_mqd(struct mqd_manager *mm, void **mqd, ...@@ -52,11 +52,8 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
pr_debug("kfd: In func %s\n", __func__); pr_debug("kfd: In func %s\n", __func__);
retval = kfd2kgd->allocate_mem(mm->dev->kgd, retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
sizeof(struct cik_mqd), mqd_mem_obj);
256,
KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
(struct kgd_mem **) mqd_mem_obj);
if (retval != 0) if (retval != 0)
return -ENOMEM; return -ENOMEM;
...@@ -121,11 +118,9 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd, ...@@ -121,11 +118,9 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
BUG_ON(!mm || !mqd || !mqd_mem_obj); BUG_ON(!mm || !mqd || !mqd_mem_obj);
retval = kfd2kgd->allocate_mem(mm->dev->kgd, retval = kfd_gtt_sa_allocate(mm->dev,
sizeof(struct cik_sdma_rlc_registers), sizeof(struct cik_sdma_rlc_registers),
256, mqd_mem_obj);
KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
(struct kgd_mem **) mqd_mem_obj);
if (retval != 0) if (retval != 0)
return -ENOMEM; return -ENOMEM;
...@@ -147,14 +142,14 @@ static void uninit_mqd(struct mqd_manager *mm, void *mqd, ...@@ -147,14 +142,14 @@ static void uninit_mqd(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj) struct kfd_mem_obj *mqd_mem_obj)
{ {
BUG_ON(!mm || !mqd); BUG_ON(!mm || !mqd);
kfd2kgd->free_mem(mm->dev->kgd, (struct kgd_mem *) mqd_mem_obj); kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
} }
static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd, static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj) struct kfd_mem_obj *mqd_mem_obj)
{ {
BUG_ON(!mm || !mqd); BUG_ON(!mm || !mqd);
kfd2kgd->free_mem(mm->dev->kgd, (struct kgd_mem *) mqd_mem_obj); kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
} }
static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
...@@ -306,11 +301,8 @@ static int init_mqd_hiq(struct mqd_manager *mm, void **mqd, ...@@ -306,11 +301,8 @@ static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
pr_debug("kfd: In func %s\n", __func__); pr_debug("kfd: In func %s\n", __func__);
retval = kfd2kgd->allocate_mem(mm->dev->kgd, retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
sizeof(struct cik_mqd), mqd_mem_obj);
256,
KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
(struct kgd_mem **) mqd_mem_obj);
if (retval != 0) if (retval != 0)
return -ENOMEM; return -ENOMEM;
......
...@@ -97,11 +97,8 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm, ...@@ -97,11 +97,8 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription); pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
retval = kfd2kgd->allocate_mem(pm->dqm->dev->kgd, retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
*rl_buffer_size, &pm->ib_buffer_obj);
PAGE_SIZE,
KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
(struct kgd_mem **) &pm->ib_buffer_obj);
if (retval != 0) { if (retval != 0) {
pr_err("kfd: failed to allocate runlist IB\n"); pr_err("kfd: failed to allocate runlist IB\n");
...@@ -557,8 +554,7 @@ void pm_release_ib(struct packet_manager *pm) ...@@ -557,8 +554,7 @@ void pm_release_ib(struct packet_manager *pm)
mutex_lock(&pm->lock); mutex_lock(&pm->lock);
if (pm->allocated) { if (pm->allocated) {
kfd2kgd->free_mem(pm->dqm->dev->kgd, kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
(struct kgd_mem *) pm->ib_buffer_obj);
pm->allocated = false; pm->allocated = false;
} }
mutex_unlock(&pm->lock); mutex_unlock(&pm->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment