Commit a5a4d68c authored by Yong Zhao's avatar Yong Zhao Committed by Alex Deucher

drm/amdkfd: Eliminate unnecessary kernel queue function pointers

Up to this point, those functions are all the same for all ASICs, so
no need to call them by functions pointers. Removing the function
pointers will greatly increase the code readablity. If there is ever
need for those function pointers, we can add it back then.
Signed-off-by: default avatarYong Zhao <Yong.Zhao@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent f83f5a1e
...@@ -72,11 +72,11 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev, ...@@ -72,11 +72,11 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
* The receive packet buff will be sitting on the Indirect Buffer * The receive packet buff will be sitting on the Indirect Buffer
* and in the PQ we put the IB packet + sync packet(s). * and in the PQ we put the IB packet + sync packet(s).
*/ */
status = kq->ops.acquire_packet_buffer(kq, status = kq_acquire_packet_buffer(kq,
pq_packets_size_in_bytes / sizeof(uint32_t), pq_packets_size_in_bytes / sizeof(uint32_t),
&ib_packet_buff); &ib_packet_buff);
if (status) { if (status) {
pr_err("acquire_packet_buffer failed\n"); pr_err("kq_acquire_packet_buffer failed\n");
return status; return status;
} }
...@@ -115,7 +115,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev, ...@@ -115,7 +115,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
if (status) { if (status) {
pr_err("Failed to allocate GART memory\n"); pr_err("Failed to allocate GART memory\n");
kq->ops.rollback_packet(kq); kq_rollback_packet(kq);
return status; return status;
} }
...@@ -151,7 +151,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev, ...@@ -151,7 +151,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
rm_packet->data_lo = QUEUESTATE__ACTIVE; rm_packet->data_lo = QUEUESTATE__ACTIVE;
kq->ops.submit_packet(kq); kq_submit_packet(kq);
/* Wait till CP writes sync code: */ /* Wait till CP writes sync code: */
status = amdkfd_fence_wait_timeout( status = amdkfd_fence_wait_timeout(
......
...@@ -34,7 +34,10 @@ ...@@ -34,7 +34,10 @@
#define PM4_COUNT_ZERO (((1 << 15) - 1) << 16) #define PM4_COUNT_ZERO (((1 << 15) - 1) << 16)
static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, /* Initialize a kernel queue, including allocations of GART memory
* needed for the queue.
*/
static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev,
enum kfd_queue_type type, unsigned int queue_size) enum kfd_queue_type type, unsigned int queue_size)
{ {
struct queue_properties prop; struct queue_properties prop;
...@@ -88,7 +91,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, ...@@ -88,7 +91,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
kq->pq_gpu_addr = kq->pq->gpu_addr; kq->pq_gpu_addr = kq->pq->gpu_addr;
/* For CIK family asics, kq->eop_mem is not needed */ /* For CIK family asics, kq->eop_mem is not needed */
if (dev->device_info->asic_family > CHIP_HAWAII) { if (dev->device_info->asic_family > CHIP_MULLINS) {
retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem); retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
if (retval != 0) if (retval != 0)
goto err_eop_allocate_vidmem; goto err_eop_allocate_vidmem;
...@@ -191,7 +194,8 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, ...@@ -191,7 +194,8 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
} }
static void uninitialize(struct kernel_queue *kq) /* Uninitialize a kernel queue and free all its memory usages. */
static void kq_uninitialize(struct kernel_queue *kq)
{ {
if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ) if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
kq->mqd_mgr->destroy_mqd(kq->mqd_mgr, kq->mqd_mgr->destroy_mqd(kq->mqd_mgr,
...@@ -220,7 +224,7 @@ static void uninitialize(struct kernel_queue *kq) ...@@ -220,7 +224,7 @@ static void uninitialize(struct kernel_queue *kq)
uninit_queue(kq->queue); uninit_queue(kq->queue);
} }
static int acquire_packet_buffer(struct kernel_queue *kq, int kq_acquire_packet_buffer(struct kernel_queue *kq,
size_t packet_size_in_dwords, unsigned int **buffer_ptr) size_t packet_size_in_dwords, unsigned int **buffer_ptr)
{ {
size_t available_size; size_t available_size;
...@@ -281,7 +285,7 @@ static int acquire_packet_buffer(struct kernel_queue *kq, ...@@ -281,7 +285,7 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
return -ENOMEM; return -ENOMEM;
} }
static void submit_packet(struct kernel_queue *kq) void kq_submit_packet(struct kernel_queue *kq)
{ {
#ifdef DEBUG #ifdef DEBUG
int i; int i;
...@@ -304,7 +308,7 @@ static void submit_packet(struct kernel_queue *kq) ...@@ -304,7 +308,7 @@ static void submit_packet(struct kernel_queue *kq)
} }
} }
static void rollback_packet(struct kernel_queue *kq) void kq_rollback_packet(struct kernel_queue *kq)
{ {
if (kq->dev->device_info->doorbell_size == 8) { if (kq->dev->device_info->doorbell_size == 8) {
kq->pending_wptr64 = *kq->wptr64_kernel; kq->pending_wptr64 = *kq->wptr64_kernel;
...@@ -324,13 +328,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, ...@@ -324,13 +328,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
if (!kq) if (!kq)
return NULL; return NULL;
kq->ops.initialize = initialize; if (kq_initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE))
kq->ops.uninitialize = uninitialize;
kq->ops.acquire_packet_buffer = acquire_packet_buffer;
kq->ops.submit_packet = submit_packet;
kq->ops.rollback_packet = rollback_packet;
if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE))
return kq; return kq;
pr_err("Failed to init kernel queue\n"); pr_err("Failed to init kernel queue\n");
...@@ -341,7 +339,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, ...@@ -341,7 +339,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
void kernel_queue_uninit(struct kernel_queue *kq) void kernel_queue_uninit(struct kernel_queue *kq)
{ {
kq->ops.uninitialize(kq); kq_uninitialize(kq);
kfree(kq); kfree(kq);
} }
...@@ -361,7 +359,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev) ...@@ -361,7 +359,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
return; return;
} }
retval = kq->ops.acquire_packet_buffer(kq, 5, &buffer); retval = kq_acquire_packet_buffer(kq, 5, &buffer);
if (unlikely(retval != 0)) { if (unlikely(retval != 0)) {
pr_err(" Failed to acquire packet buffer\n"); pr_err(" Failed to acquire packet buffer\n");
pr_err("Kernel queue test failed\n"); pr_err("Kernel queue test failed\n");
...@@ -369,7 +367,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev) ...@@ -369,7 +367,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
} }
for (i = 0; i < 5; i++) for (i = 0; i < 5; i++)
buffer[i] = kq->nop_packet; buffer[i] = kq->nop_packet;
kq->ops.submit_packet(kq); kq_submit_packet(kq);
pr_err("Ending kernel queue test\n"); pr_err("Ending kernel queue test\n");
} }
......
...@@ -29,44 +29,28 @@ ...@@ -29,44 +29,28 @@
#include "kfd_priv.h" #include "kfd_priv.h"
/** /**
* struct kernel_queue_ops * kq_acquire_packet_buffer: Returns a pointer to the location in the kernel
*
* @initialize: Initialize a kernel queue, including allocations of GART memory
* needed for the queue.
*
* @uninitialize: Uninitialize a kernel queue and free all its memory usages.
*
* @acquire_packet_buffer: Returns a pointer to the location in the kernel
* queue ring buffer where the calling function can write its packet. It is * queue ring buffer where the calling function can write its packet. It is
* Guaranteed that there is enough space for that packet. It also updates the * Guaranteed that there is enough space for that packet. It also updates the
* pending write pointer to that location so subsequent calls to * pending write pointer to that location so subsequent calls to
* acquire_packet_buffer will get a correct write pointer * acquire_packet_buffer will get a correct write pointer
* *
* @submit_packet: Update the write pointer and doorbell of a kernel queue. * kq_submit_packet: Update the write pointer and doorbell of a kernel queue.
*
* @sync_with_hw: Wait until the write pointer and the read pointer of a kernel
* queue are equal, which means the CP has read all the submitted packets.
* *
* @rollback_packet: This routine is called if we failed to build an acquired * kq_rollback_packet: This routine is called if we failed to build an acquired
* packet for some reason. It just overwrites the pending wptr with the current * packet for some reason. It just overwrites the pending wptr with the current
* one * one
* *
*/ */
struct kernel_queue_ops {
bool (*initialize)(struct kernel_queue *kq, struct kfd_dev *dev, int kq_acquire_packet_buffer(struct kernel_queue *kq,
enum kfd_queue_type type, unsigned int queue_size);
void (*uninitialize)(struct kernel_queue *kq);
int (*acquire_packet_buffer)(struct kernel_queue *kq,
size_t packet_size_in_dwords, size_t packet_size_in_dwords,
unsigned int **buffer_ptr); unsigned int **buffer_ptr);
void kq_submit_packet(struct kernel_queue *kq);
void kq_rollback_packet(struct kernel_queue *kq);
void (*submit_packet)(struct kernel_queue *kq);
void (*rollback_packet)(struct kernel_queue *kq);
};
struct kernel_queue { struct kernel_queue {
struct kernel_queue_ops ops;
/* data */ /* data */
struct kfd_dev *dev; struct kfd_dev *dev;
struct mqd_manager *mqd_mgr; struct mqd_manager *mqd_mgr;
......
...@@ -278,7 +278,7 @@ int pm_send_set_resources(struct packet_manager *pm, ...@@ -278,7 +278,7 @@ int pm_send_set_resources(struct packet_manager *pm,
size = pm->pmf->set_resources_size; size = pm->pmf->set_resources_size;
mutex_lock(&pm->lock); mutex_lock(&pm->lock);
pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, kq_acquire_packet_buffer(pm->priv_queue,
size / sizeof(uint32_t), size / sizeof(uint32_t),
(unsigned int **)&buffer); (unsigned int **)&buffer);
if (!buffer) { if (!buffer) {
...@@ -289,9 +289,9 @@ int pm_send_set_resources(struct packet_manager *pm, ...@@ -289,9 +289,9 @@ int pm_send_set_resources(struct packet_manager *pm,
retval = pm->pmf->set_resources(pm, buffer, res); retval = pm->pmf->set_resources(pm, buffer, res);
if (!retval) if (!retval)
pm->priv_queue->ops.submit_packet(pm->priv_queue); kq_submit_packet(pm->priv_queue);
else else
pm->priv_queue->ops.rollback_packet(pm->priv_queue); kq_rollback_packet(pm->priv_queue);
out: out:
mutex_unlock(&pm->lock); mutex_unlock(&pm->lock);
...@@ -316,7 +316,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues) ...@@ -316,7 +316,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t); packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t);
mutex_lock(&pm->lock); mutex_lock(&pm->lock);
retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, retval = kq_acquire_packet_buffer(pm->priv_queue,
packet_size_dwords, &rl_buffer); packet_size_dwords, &rl_buffer);
if (retval) if (retval)
goto fail_acquire_packet_buffer; goto fail_acquire_packet_buffer;
...@@ -326,14 +326,14 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues) ...@@ -326,14 +326,14 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
if (retval) if (retval)
goto fail_create_runlist; goto fail_create_runlist;
pm->priv_queue->ops.submit_packet(pm->priv_queue); kq_submit_packet(pm->priv_queue);
mutex_unlock(&pm->lock); mutex_unlock(&pm->lock);
return retval; return retval;
fail_create_runlist: fail_create_runlist:
pm->priv_queue->ops.rollback_packet(pm->priv_queue); kq_rollback_packet(pm->priv_queue);
fail_acquire_packet_buffer: fail_acquire_packet_buffer:
mutex_unlock(&pm->lock); mutex_unlock(&pm->lock);
fail_create_runlist_ib: fail_create_runlist_ib:
...@@ -352,7 +352,7 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, ...@@ -352,7 +352,7 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
size = pm->pmf->query_status_size; size = pm->pmf->query_status_size;
mutex_lock(&pm->lock); mutex_lock(&pm->lock);
pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, kq_acquire_packet_buffer(pm->priv_queue,
size / sizeof(uint32_t), (unsigned int **)&buffer); size / sizeof(uint32_t), (unsigned int **)&buffer);
if (!buffer) { if (!buffer) {
pr_err("Failed to allocate buffer on kernel queue\n"); pr_err("Failed to allocate buffer on kernel queue\n");
...@@ -362,9 +362,9 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, ...@@ -362,9 +362,9 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value); retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value);
if (!retval) if (!retval)
pm->priv_queue->ops.submit_packet(pm->priv_queue); kq_submit_packet(pm->priv_queue);
else else
pm->priv_queue->ops.rollback_packet(pm->priv_queue); kq_rollback_packet(pm->priv_queue);
out: out:
mutex_unlock(&pm->lock); mutex_unlock(&pm->lock);
...@@ -381,7 +381,7 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, ...@@ -381,7 +381,7 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
size = pm->pmf->unmap_queues_size; size = pm->pmf->unmap_queues_size;
mutex_lock(&pm->lock); mutex_lock(&pm->lock);
pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, kq_acquire_packet_buffer(pm->priv_queue,
size / sizeof(uint32_t), (unsigned int **)&buffer); size / sizeof(uint32_t), (unsigned int **)&buffer);
if (!buffer) { if (!buffer) {
pr_err("Failed to allocate buffer on kernel queue\n"); pr_err("Failed to allocate buffer on kernel queue\n");
...@@ -392,9 +392,9 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, ...@@ -392,9 +392,9 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
retval = pm->pmf->unmap_queues(pm, buffer, type, filter, filter_param, retval = pm->pmf->unmap_queues(pm, buffer, type, filter, filter_param,
reset, sdma_engine); reset, sdma_engine);
if (!retval) if (!retval)
pm->priv_queue->ops.submit_packet(pm->priv_queue); kq_submit_packet(pm->priv_queue);
else else
pm->priv_queue->ops.rollback_packet(pm->priv_queue); kq_rollback_packet(pm->priv_queue);
out: out:
mutex_unlock(&pm->lock); mutex_unlock(&pm->lock);
...@@ -439,7 +439,7 @@ int pm_debugfs_hang_hws(struct packet_manager *pm) ...@@ -439,7 +439,7 @@ int pm_debugfs_hang_hws(struct packet_manager *pm)
size = pm->pmf->query_status_size; size = pm->pmf->query_status_size;
mutex_lock(&pm->lock); mutex_lock(&pm->lock);
pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, kq_acquire_packet_buffer(pm->priv_queue,
size / sizeof(uint32_t), (unsigned int **)&buffer); size / sizeof(uint32_t), (unsigned int **)&buffer);
if (!buffer) { if (!buffer) {
pr_err("Failed to allocate buffer on kernel queue\n"); pr_err("Failed to allocate buffer on kernel queue\n");
...@@ -447,7 +447,7 @@ int pm_debugfs_hang_hws(struct packet_manager *pm) ...@@ -447,7 +447,7 @@ int pm_debugfs_hang_hws(struct packet_manager *pm)
goto out; goto out;
} }
memset(buffer, 0x55, size); memset(buffer, 0x55, size);
pm->priv_queue->ops.submit_packet(pm->priv_queue); kq_submit_packet(pm->priv_queue);
pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.", pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
buffer[0], buffer[1], buffer[2], buffer[3], buffer[0], buffer[1], buffer[2], buffer[3],
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment