Commit 32fa8219 authored by Felix Kuehling's avatar Felix Kuehling Committed by Oded Gabbay

drm/amdkfd: Handle remaining BUG_ONs more gracefully v2

In most cases, BUG_ONs can be replaced with WARN_ON with an error
return. In some void functions just turn them into a WARN_ON and
possibly an early exit.

v2:
* Cleaned up error handling in pm_send_unmap_queue
* Removed redundant WARN_ON in kfd_process_destroy_delayed
Signed-off-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarOded Gabbay <oded.gabbay@gmail.com>
parent 8625ff9c
...@@ -60,7 +60,8 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev, ...@@ -60,7 +60,8 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
unsigned int *ib_packet_buff; unsigned int *ib_packet_buff;
int status; int status;
BUG_ON(!size_in_bytes); if (WARN_ON(!size_in_bytes))
return -EINVAL;
kq = dbgdev->kq; kq = dbgdev->kq;
......
...@@ -64,7 +64,8 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev) ...@@ -64,7 +64,8 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
enum DBGDEV_TYPE type = DBGDEV_TYPE_DIQ; enum DBGDEV_TYPE type = DBGDEV_TYPE_DIQ;
struct kfd_dbgmgr *new_buff; struct kfd_dbgmgr *new_buff;
BUG_ON(!pdev->init_complete); if (WARN_ON(!pdev->init_complete))
return false;
new_buff = kfd_alloc_struct(new_buff); new_buff = kfd_alloc_struct(new_buff);
if (!new_buff) { if (!new_buff) {
......
...@@ -98,7 +98,7 @@ static const struct kfd_device_info *lookup_device_info(unsigned short did) ...@@ -98,7 +98,7 @@ static const struct kfd_device_info *lookup_device_info(unsigned short did)
for (i = 0; i < ARRAY_SIZE(supported_devices); i++) { for (i = 0; i < ARRAY_SIZE(supported_devices); i++) {
if (supported_devices[i].did == did) { if (supported_devices[i].did == did) {
BUG_ON(!supported_devices[i].device_info); WARN_ON(!supported_devices[i].device_info);
return supported_devices[i].device_info; return supported_devices[i].device_info;
} }
} }
...@@ -212,8 +212,7 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid, ...@@ -212,8 +212,7 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
flags); flags);
dev = kfd_device_by_pci_dev(pdev); dev = kfd_device_by_pci_dev(pdev);
BUG_ON(!dev); if (!WARN_ON(!dev))
kfd_signal_iommu_event(dev, pasid, address, kfd_signal_iommu_event(dev, pasid, address,
flags & PPR_FAULT_WRITE, flags & PPR_FAULT_EXEC); flags & PPR_FAULT_WRITE, flags & PPR_FAULT_EXEC);
...@@ -397,9 +396,12 @@ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, ...@@ -397,9 +396,12 @@ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
{ {
unsigned int num_of_longs; unsigned int num_of_longs;
BUG_ON(buf_size < chunk_size); if (WARN_ON(buf_size < chunk_size))
BUG_ON(buf_size == 0); return -EINVAL;
BUG_ON(chunk_size == 0); if (WARN_ON(buf_size == 0))
return -EINVAL;
if (WARN_ON(chunk_size == 0))
return -EINVAL;
kfd->gtt_sa_chunk_size = chunk_size; kfd->gtt_sa_chunk_size = chunk_size;
kfd->gtt_sa_num_of_chunks = buf_size / chunk_size; kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
......
...@@ -388,7 +388,8 @@ static struct mqd_manager *get_mqd_manager_nocpsch( ...@@ -388,7 +388,8 @@ static struct mqd_manager *get_mqd_manager_nocpsch(
{ {
struct mqd_manager *mqd; struct mqd_manager *mqd;
BUG_ON(type >= KFD_MQD_TYPE_MAX); if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
return NULL;
pr_debug("mqd type %d\n", type); pr_debug("mqd type %d\n", type);
...@@ -513,7 +514,7 @@ static void uninitialize_nocpsch(struct device_queue_manager *dqm) ...@@ -513,7 +514,7 @@ static void uninitialize_nocpsch(struct device_queue_manager *dqm)
{ {
int i; int i;
BUG_ON(dqm->queue_count > 0 || dqm->processes_count > 0); WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
kfree(dqm->allocated_queues); kfree(dqm->allocated_queues);
for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++) for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
...@@ -1129,8 +1130,8 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) ...@@ -1129,8 +1130,8 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
dqm->ops.set_cache_memory_policy = set_cache_memory_policy; dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
break; break;
default: default:
BUG(); pr_err("Invalid scheduling policy %d\n", sched_policy);
break; goto out_free;
} }
switch (dev->device_info->asic_family) { switch (dev->device_info->asic_family) {
...@@ -1143,12 +1144,12 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) ...@@ -1143,12 +1144,12 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
break; break;
} }
if (dqm->ops.initialize(dqm)) { if (!dqm->ops.initialize(dqm))
return dqm;
out_free:
kfree(dqm); kfree(dqm);
return NULL; return NULL;
}
return dqm;
} }
void device_queue_manager_uninit(struct device_queue_manager *dqm) void device_queue_manager_uninit(struct device_queue_manager *dqm)
......
...@@ -65,7 +65,7 @@ static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble) ...@@ -65,7 +65,7 @@ static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
* for LDS/Scratch and GPUVM. * for LDS/Scratch and GPUVM.
*/ */
BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE || WARN_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
top_address_nybble == 0); top_address_nybble == 0);
return PRIVATE_BASE(top_address_nybble << 12) | return PRIVATE_BASE(top_address_nybble << 12) |
......
...@@ -67,7 +67,7 @@ static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble) ...@@ -67,7 +67,7 @@ static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
* for LDS/Scratch and GPUVM. * for LDS/Scratch and GPUVM.
*/ */
BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE || WARN_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
top_address_nybble == 0); top_address_nybble == 0);
return top_address_nybble << 12 | return top_address_nybble << 12 |
......
...@@ -41,7 +41,8 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, ...@@ -41,7 +41,8 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
int retval; int retval;
union PM4_MES_TYPE_3_HEADER nop; union PM4_MES_TYPE_3_HEADER nop;
BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ); if (WARN_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ))
return false;
pr_debug("Initializing queue type %d size %d\n", KFD_QUEUE_TYPE_HIQ, pr_debug("Initializing queue type %d size %d\n", KFD_QUEUE_TYPE_HIQ,
queue_size); queue_size);
...@@ -62,8 +63,8 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, ...@@ -62,8 +63,8 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
KFD_MQD_TYPE_HIQ); KFD_MQD_TYPE_HIQ);
break; break;
default: default:
BUG(); pr_err("Invalid queue type %d\n", type);
break; return false;
} }
if (!kq->mqd) if (!kq->mqd)
...@@ -305,6 +306,7 @@ void kernel_queue_uninit(struct kernel_queue *kq) ...@@ -305,6 +306,7 @@ void kernel_queue_uninit(struct kernel_queue *kq)
kfree(kq); kfree(kq);
} }
/* FIXME: Can this test be removed? */
static __attribute__((unused)) void test_kq(struct kfd_dev *dev) static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
{ {
struct kernel_queue *kq; struct kernel_queue *kq;
...@@ -314,10 +316,18 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev) ...@@ -314,10 +316,18 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
pr_err("Starting kernel queue test\n"); pr_err("Starting kernel queue test\n");
kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ); kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ);
BUG_ON(!kq); if (unlikely(!kq)) {
pr_err(" Failed to initialize HIQ\n");
pr_err("Kernel queue test failed\n");
return;
}
retval = kq->ops.acquire_packet_buffer(kq, 5, &buffer); retval = kq->ops.acquire_packet_buffer(kq, 5, &buffer);
BUG_ON(retval != 0); if (unlikely(retval != 0)) {
pr_err(" Failed to acquire packet buffer\n");
pr_err("Kernel queue test failed\n");
return;
}
for (i = 0; i < 5; i++) for (i = 0; i < 5; i++)
buffer[i] = kq->nop_packet; buffer[i] = kq->nop_packet;
kq->ops.submit_packet(kq); kq->ops.submit_packet(kq);
......
...@@ -387,7 +387,8 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, ...@@ -387,7 +387,8 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
{ {
struct mqd_manager *mqd; struct mqd_manager *mqd;
BUG_ON(type >= KFD_MQD_TYPE_MAX); if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
return NULL;
mqd = kzalloc(sizeof(*mqd), GFP_KERNEL); mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
if (!mqd) if (!mqd)
......
...@@ -233,7 +233,8 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, ...@@ -233,7 +233,8 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
{ {
struct mqd_manager *mqd; struct mqd_manager *mqd;
BUG_ON(type >= KFD_MQD_TYPE_MAX); if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
return NULL;
mqd = kzalloc(sizeof(*mqd), GFP_KERNEL); mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
if (!mqd) if (!mqd)
......
...@@ -35,7 +35,8 @@ static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes, ...@@ -35,7 +35,8 @@ static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
{ {
unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t); unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
BUG_ON((temp * sizeof(uint32_t)) > buffer_size_bytes); WARN((temp * sizeof(uint32_t)) > buffer_size_bytes,
"Runlist IB overflow");
*wptr = temp; *wptr = temp;
} }
...@@ -94,7 +95,8 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm, ...@@ -94,7 +95,8 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
{ {
int retval; int retval;
BUG_ON(pm->allocated); if (WARN_ON(pm->allocated))
return -EINVAL;
pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription); pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
...@@ -119,7 +121,8 @@ static int pm_create_runlist(struct packet_manager *pm, uint32_t *buffer, ...@@ -119,7 +121,8 @@ static int pm_create_runlist(struct packet_manager *pm, uint32_t *buffer,
{ {
struct pm4_runlist *packet; struct pm4_runlist *packet;
BUG_ON(!ib); if (WARN_ON(!ib))
return -EFAULT;
packet = (struct pm4_runlist *)buffer; packet = (struct pm4_runlist *)buffer;
...@@ -211,9 +214,8 @@ static int pm_create_map_queue_vi(struct packet_manager *pm, uint32_t *buffer, ...@@ -211,9 +214,8 @@ static int pm_create_map_queue_vi(struct packet_manager *pm, uint32_t *buffer,
use_static = false; /* no static queues under SDMA */ use_static = false; /* no static queues under SDMA */
break; break;
default: default:
pr_err("queue type %d\n", q->properties.type); WARN(1, "queue type %d", q->properties.type);
BUG(); return -EINVAL;
break;
} }
packet->bitfields3.doorbell_offset = packet->bitfields3.doorbell_offset =
q->properties.doorbell_off; q->properties.doorbell_off;
...@@ -266,8 +268,8 @@ static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer, ...@@ -266,8 +268,8 @@ static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
use_static = false; /* no static queues under SDMA */ use_static = false; /* no static queues under SDMA */
break; break;
default: default:
BUG(); WARN(1, "queue type %d", q->properties.type);
break; return -EINVAL;
} }
packet->mes_map_queues_ordinals[0].bitfields3.doorbell_offset = packet->mes_map_queues_ordinals[0].bitfields3.doorbell_offset =
...@@ -392,14 +394,16 @@ static int pm_create_runlist_ib(struct packet_manager *pm, ...@@ -392,14 +394,16 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
pr_debug("Finished map process and queues to runlist\n"); pr_debug("Finished map process and queues to runlist\n");
if (is_over_subscription) if (is_over_subscription)
pm_create_runlist(pm, &rl_buffer[rl_wptr], *rl_gpu_addr, retval = pm_create_runlist(pm, &rl_buffer[rl_wptr],
alloc_size_bytes / sizeof(uint32_t), true); *rl_gpu_addr,
alloc_size_bytes / sizeof(uint32_t),
true);
for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++) for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
pr_debug("0x%2X ", rl_buffer[i]); pr_debug("0x%2X ", rl_buffer[i]);
pr_debug("\n"); pr_debug("\n");
return 0; return retval;
} }
int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
...@@ -512,7 +516,8 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, ...@@ -512,7 +516,8 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
int retval; int retval;
struct pm4_query_status *packet; struct pm4_query_status *packet;
BUG_ON(!fence_address); if (WARN_ON(!fence_address))
return -EFAULT;
mutex_lock(&pm->lock); mutex_lock(&pm->lock);
retval = pm->priv_queue->ops.acquire_packet_buffer( retval = pm->priv_queue->ops.acquire_packet_buffer(
...@@ -577,8 +582,9 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, ...@@ -577,8 +582,9 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
engine_sel__mes_unmap_queues__sdma0 + sdma_engine; engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
break; break;
default: default:
BUG(); WARN(1, "queue type %d", type);
break; retval = -EINVAL;
goto err_invalid;
} }
if (reset) if (reset)
...@@ -610,12 +616,18 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, ...@@ -610,12 +616,18 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
queue_sel__mes_unmap_queues__perform_request_on_dynamic_queues_only; queue_sel__mes_unmap_queues__perform_request_on_dynamic_queues_only;
break; break;
default: default:
BUG(); WARN(1, "filter %d", mode);
break; retval = -EINVAL;
goto err_invalid;
} }
pm->priv_queue->ops.submit_packet(pm->priv_queue); pm->priv_queue->ops.submit_packet(pm->priv_queue);
mutex_unlock(&pm->lock);
return 0;
err_invalid:
pm->priv_queue->ops.rollback_packet(pm->priv_queue);
err_acquire_packet_buffer: err_acquire_packet_buffer:
mutex_unlock(&pm->lock); mutex_unlock(&pm->lock);
return retval; return retval;
......
...@@ -92,6 +92,6 @@ unsigned int kfd_pasid_alloc(void) ...@@ -92,6 +92,6 @@ unsigned int kfd_pasid_alloc(void)
void kfd_pasid_free(unsigned int pasid) void kfd_pasid_free(unsigned int pasid)
{ {
BUG_ON(pasid == 0 || pasid >= pasid_limit); if (!WARN_ON(pasid == 0 || pasid >= pasid_limit))
clear_bit(pasid, pasid_bitmap); clear_bit(pasid, pasid_bitmap);
} }
...@@ -79,8 +79,6 @@ struct kfd_process *kfd_create_process(const struct task_struct *thread) ...@@ -79,8 +79,6 @@ struct kfd_process *kfd_create_process(const struct task_struct *thread)
{ {
struct kfd_process *process; struct kfd_process *process;
BUG_ON(!kfd_process_wq);
if (!thread->mm) if (!thread->mm)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
...@@ -202,10 +200,8 @@ static void kfd_process_destroy_delayed(struct rcu_head *rcu) ...@@ -202,10 +200,8 @@ static void kfd_process_destroy_delayed(struct rcu_head *rcu)
struct kfd_process_release_work *work; struct kfd_process_release_work *work;
struct kfd_process *p; struct kfd_process *p;
BUG_ON(!kfd_process_wq);
p = container_of(rcu, struct kfd_process, rcu); p = container_of(rcu, struct kfd_process, rcu);
BUG_ON(atomic_read(&p->mm->mm_count) <= 0); WARN_ON(atomic_read(&p->mm->mm_count) <= 0);
mmdrop(p->mm); mmdrop(p->mm);
...@@ -229,7 +225,8 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn, ...@@ -229,7 +225,8 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
* mmu_notifier srcu is read locked * mmu_notifier srcu is read locked
*/ */
p = container_of(mn, struct kfd_process, mmu_notifier); p = container_of(mn, struct kfd_process, mmu_notifier);
BUG_ON(p->mm != mm); if (WARN_ON(p->mm != mm))
return;
mutex_lock(&kfd_processes_mutex); mutex_lock(&kfd_processes_mutex);
hash_del_rcu(&p->kfd_processes); hash_del_rcu(&p->kfd_processes);
......
...@@ -218,8 +218,8 @@ int pqm_create_queue(struct process_queue_manager *pqm, ...@@ -218,8 +218,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
kq, &pdd->qpd); kq, &pdd->qpd);
break; break;
default: default:
BUG(); WARN(1, "Invalid queue type %d", type);
break; retval = -EINVAL;
} }
if (retval != 0) { if (retval != 0) {
...@@ -272,7 +272,8 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) ...@@ -272,7 +272,8 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
dev = pqn->kq->dev; dev = pqn->kq->dev;
if (pqn->q) if (pqn->q)
dev = pqn->q->device; dev = pqn->q->device;
BUG_ON(!dev); if (WARN_ON(!dev))
return -ENODEV;
pdd = kfd_get_process_device_data(dev, pqm->process); pdd = kfd_get_process_device_data(dev, pqm->process);
if (!pdd) { if (!pdd) {
......
...@@ -799,10 +799,12 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev, ...@@ -799,10 +799,12 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
int ret; int ret;
uint32_t i; uint32_t i;
if (WARN_ON(dev->kobj_node))
return -EEXIST;
/* /*
* Creating the sysfs folders * Creating the sysfs folders
*/ */
BUG_ON(dev->kobj_node);
dev->kobj_node = kfd_alloc_struct(dev->kobj_node); dev->kobj_node = kfd_alloc_struct(dev->kobj_node);
if (!dev->kobj_node) if (!dev->kobj_node)
return -ENOMEM; return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment