Commit aba25a3e authored by Parav Pandit's avatar Parav Pandit Committed by Doug Ledford

IB/core: trivial prink cleanup.

1. Replaced printk with appropriate pr_warn, pr_err, pr_info.
2. Removed unnecessary prints around memory allocation failure
which are not required, as reported by the checkpatch script.
Signed-off-by: default avatarParav Pandit <pandit.parav@gmail.com>
Reviewed-by: default avatarHaggai Eran <haggaie@mellanox.com>
Reviewed-by: default avatarSagi Grimberg <sagig@mellanox.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent db9314cd
...@@ -1043,8 +1043,8 @@ static void ib_cache_update(struct ib_device *device, ...@@ -1043,8 +1043,8 @@ static void ib_cache_update(struct ib_device *device,
ret = ib_query_port(device, port, tprops); ret = ib_query_port(device, port, tprops);
if (ret) { if (ret) {
printk(KERN_WARNING "ib_query_port failed (%d) for %s\n", pr_warn("ib_query_port failed (%d) for %s\n",
ret, device->name); ret, device->name);
goto err; goto err;
} }
...@@ -1067,8 +1067,8 @@ static void ib_cache_update(struct ib_device *device, ...@@ -1067,8 +1067,8 @@ static void ib_cache_update(struct ib_device *device,
for (i = 0; i < pkey_cache->table_len; ++i) { for (i = 0; i < pkey_cache->table_len; ++i) {
ret = ib_query_pkey(device, port, i, pkey_cache->table + i); ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
if (ret) { if (ret) {
printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n", pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
ret, device->name, i); ret, device->name, i);
goto err; goto err;
} }
} }
...@@ -1078,8 +1078,8 @@ static void ib_cache_update(struct ib_device *device, ...@@ -1078,8 +1078,8 @@ static void ib_cache_update(struct ib_device *device,
ret = ib_query_gid(device, port, i, ret = ib_query_gid(device, port, i,
gid_cache->table + i, NULL); gid_cache->table + i, NULL);
if (ret) { if (ret) {
printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n", pr_warn("ib_query_gid failed (%d) for %s (index %d)\n",
ret, device->name, i); ret, device->name, i);
goto err; goto err;
} }
} }
...@@ -1161,8 +1161,7 @@ int ib_cache_setup_one(struct ib_device *device) ...@@ -1161,8 +1161,7 @@ int ib_cache_setup_one(struct ib_device *device)
GFP_KERNEL); GFP_KERNEL);
if (!device->cache.pkey_cache || if (!device->cache.pkey_cache ||
!device->cache.lmc_cache) { !device->cache.lmc_cache) {
printk(KERN_WARNING "Couldn't allocate cache " pr_warn("Couldn't allocate cache for %s\n", device->name);
"for %s\n", device->name);
return -ENOMEM; return -ENOMEM;
} }
......
...@@ -1713,7 +1713,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) ...@@ -1713,7 +1713,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
break; break;
default: default:
printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n", pr_err("RDMA CMA: unexpected IB CM event: %d\n",
ib_event->event); ib_event->event);
goto out; goto out;
} }
...@@ -2186,8 +2186,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv, ...@@ -2186,8 +2186,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
ret = rdma_listen(id, id_priv->backlog); ret = rdma_listen(id, id_priv->backlog);
if (ret) if (ret)
printk(KERN_WARNING "RDMA CMA: cma_listen_on_dev, error %d, " pr_warn("RDMA CMA: cma_listen_on_dev, error %d, listening on device %s\n",
"listening on device %s\n", ret, cma_dev->device->name); ret, cma_dev->device->name);
} }
static void cma_listen_on_all(struct rdma_id_private *id_priv) static void cma_listen_on_all(struct rdma_id_private *id_priv)
...@@ -3239,7 +3239,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, ...@@ -3239,7 +3239,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
event.status = 0; event.status = 0;
break; break;
default: default:
printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n", pr_err("RDMA CMA: unexpected IB CM event: %d\n",
ib_event->event); ib_event->event);
goto out; goto out;
} }
...@@ -4003,8 +4003,8 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id ...@@ -4003,8 +4003,8 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id
if ((dev_addr->bound_dev_if == ndev->ifindex) && if ((dev_addr->bound_dev_if == ndev->ifindex) &&
(net_eq(dev_net(ndev), dev_addr->net)) && (net_eq(dev_net(ndev), dev_addr->net)) &&
memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) { memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) {
printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n", pr_info("RDMA CM addr change for ndev %s used by id %p\n",
ndev->name, &id_priv->id); ndev->name, &id_priv->id);
work = kzalloc(sizeof *work, GFP_KERNEL); work = kzalloc(sizeof *work, GFP_KERNEL);
if (!work) if (!work)
return -ENOMEM; return -ENOMEM;
...@@ -4287,7 +4287,7 @@ static int __init cma_init(void) ...@@ -4287,7 +4287,7 @@ static int __init cma_init(void)
goto err; goto err;
if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table)) if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table))
printk(KERN_WARNING "RDMA CMA: failed to add netlink callback\n"); pr_warn("RDMA CMA: failed to add netlink callback\n");
cma_configfs_init(); cma_configfs_init();
return 0; return 0;
......
...@@ -115,8 +115,8 @@ static int ib_device_check_mandatory(struct ib_device *device) ...@@ -115,8 +115,8 @@ static int ib_device_check_mandatory(struct ib_device *device)
for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) { for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
if (!*(void **) ((void *) device + mandatory_table[i].offset)) { if (!*(void **) ((void *) device + mandatory_table[i].offset)) {
printk(KERN_WARNING "Device %s is missing mandatory function %s\n", pr_warn("Device %s is missing mandatory function %s\n",
device->name, mandatory_table[i].name); device->name, mandatory_table[i].name);
return -EINVAL; return -EINVAL;
} }
} }
...@@ -255,8 +255,8 @@ static int add_client_context(struct ib_device *device, struct ib_client *client ...@@ -255,8 +255,8 @@ static int add_client_context(struct ib_device *device, struct ib_client *client
context = kmalloc(sizeof *context, GFP_KERNEL); context = kmalloc(sizeof *context, GFP_KERNEL);
if (!context) { if (!context) {
printk(KERN_WARNING "Couldn't allocate client context for %s/%s\n", pr_warn("Couldn't allocate client context for %s/%s\n",
device->name, client->name); device->name, client->name);
return -ENOMEM; return -ENOMEM;
} }
...@@ -343,29 +343,29 @@ int ib_register_device(struct ib_device *device, ...@@ -343,29 +343,29 @@ int ib_register_device(struct ib_device *device,
ret = read_port_immutable(device); ret = read_port_immutable(device);
if (ret) { if (ret) {
printk(KERN_WARNING "Couldn't create per port immutable data %s\n", pr_warn("Couldn't create per port immutable data %s\n",
device->name); device->name);
goto out; goto out;
} }
ret = ib_cache_setup_one(device); ret = ib_cache_setup_one(device);
if (ret) { if (ret) {
printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n"); pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n");
goto out; goto out;
} }
memset(&device->attrs, 0, sizeof(device->attrs)); memset(&device->attrs, 0, sizeof(device->attrs));
ret = device->query_device(device, &device->attrs, &uhw); ret = device->query_device(device, &device->attrs, &uhw);
if (ret) { if (ret) {
printk(KERN_WARNING "Couldn't query the device attributes\n"); pr_warn("Couldn't query the device attributes\n");
ib_cache_cleanup_one(device); ib_cache_cleanup_one(device);
goto out; goto out;
} }
ret = ib_device_register_sysfs(device, port_callback); ret = ib_device_register_sysfs(device, port_callback);
if (ret) { if (ret) {
printk(KERN_WARNING "Couldn't register device %s with driver model\n", pr_warn("Couldn't register device %s with driver model\n",
device->name); device->name);
ib_cache_cleanup_one(device); ib_cache_cleanup_one(device);
goto out; goto out;
} }
...@@ -566,8 +566,8 @@ void ib_set_client_data(struct ib_device *device, struct ib_client *client, ...@@ -566,8 +566,8 @@ void ib_set_client_data(struct ib_device *device, struct ib_client *client,
goto out; goto out;
} }
printk(KERN_WARNING "No client context found for %s/%s\n", pr_warn("No client context found for %s/%s\n",
device->name, client->name); device->name, client->name);
out: out:
spin_unlock_irqrestore(&device->client_data_lock, flags); spin_unlock_irqrestore(&device->client_data_lock, flags);
...@@ -960,13 +960,13 @@ static int __init ib_core_init(void) ...@@ -960,13 +960,13 @@ static int __init ib_core_init(void)
ret = class_register(&ib_class); ret = class_register(&ib_class);
if (ret) { if (ret) {
printk(KERN_WARNING "Couldn't create InfiniBand device class\n"); pr_warn("Couldn't create InfiniBand device class\n");
goto err_comp; goto err_comp;
} }
ret = ibnl_init(); ret = ibnl_init();
if (ret) { if (ret) {
printk(KERN_WARNING "Couldn't init IB netlink interface\n"); pr_warn("Couldn't init IB netlink interface\n");
goto err_sysfs; goto err_sysfs;
} }
......
...@@ -150,8 +150,8 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool) ...@@ -150,8 +150,8 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
#ifdef DEBUG #ifdef DEBUG
if (fmr->ref_count !=0) { if (fmr->ref_count !=0) {
printk(KERN_WARNING PFX "Unmapping FMR 0x%08x with ref count %d\n", pr_warn(PFX "Unmapping FMR 0x%08x with ref count %d\n",
fmr, fmr->ref_count); fmr, fmr->ref_count);
} }
#endif #endif
} }
...@@ -167,7 +167,7 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool) ...@@ -167,7 +167,7 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
ret = ib_unmap_fmr(&fmr_list); ret = ib_unmap_fmr(&fmr_list);
if (ret) if (ret)
printk(KERN_WARNING PFX "ib_unmap_fmr returned %d\n", ret); pr_warn(PFX "ib_unmap_fmr returned %d\n", ret);
spin_lock_irq(&pool->pool_lock); spin_lock_irq(&pool->pool_lock);
list_splice(&unmap_list, &pool->free_list); list_splice(&unmap_list, &pool->free_list);
...@@ -222,8 +222,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, ...@@ -222,8 +222,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
device = pd->device; device = pd->device;
if (!device->alloc_fmr || !device->dealloc_fmr || if (!device->alloc_fmr || !device->dealloc_fmr ||
!device->map_phys_fmr || !device->unmap_fmr) { !device->map_phys_fmr || !device->unmap_fmr) {
printk(KERN_INFO PFX "Device %s does not support FMRs\n", pr_info(PFX "Device %s does not support FMRs\n", device->name);
device->name);
return ERR_PTR(-ENOSYS); return ERR_PTR(-ENOSYS);
} }
...@@ -233,13 +232,10 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, ...@@ -233,13 +232,10 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
max_remaps = device->attrs.max_map_per_fmr; max_remaps = device->attrs.max_map_per_fmr;
pool = kmalloc(sizeof *pool, GFP_KERNEL); pool = kmalloc(sizeof *pool, GFP_KERNEL);
if (!pool) { if (!pool)
printk(KERN_WARNING PFX "couldn't allocate pool struct\n");
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
}
pool->cache_bucket = NULL; pool->cache_bucket = NULL;
pool->flush_function = params->flush_function; pool->flush_function = params->flush_function;
pool->flush_arg = params->flush_arg; pool->flush_arg = params->flush_arg;
...@@ -251,7 +247,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, ...@@ -251,7 +247,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket, kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket,
GFP_KERNEL); GFP_KERNEL);
if (!pool->cache_bucket) { if (!pool->cache_bucket) {
printk(KERN_WARNING PFX "Failed to allocate cache in pool\n"); pr_warn(PFX "Failed to allocate cache in pool\n");
ret = -ENOMEM; ret = -ENOMEM;
goto out_free_pool; goto out_free_pool;
} }
...@@ -275,7 +271,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, ...@@ -275,7 +271,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
"ib_fmr(%s)", "ib_fmr(%s)",
device->name); device->name);
if (IS_ERR(pool->thread)) { if (IS_ERR(pool->thread)) {
printk(KERN_WARNING PFX "couldn't start cleanup thread\n"); pr_warn(PFX "couldn't start cleanup thread\n");
ret = PTR_ERR(pool->thread); ret = PTR_ERR(pool->thread);
goto out_free_pool; goto out_free_pool;
} }
...@@ -294,11 +290,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, ...@@ -294,11 +290,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
for (i = 0; i < params->pool_size; ++i) { for (i = 0; i < params->pool_size; ++i) {
fmr = kmalloc(bytes_per_fmr, GFP_KERNEL); fmr = kmalloc(bytes_per_fmr, GFP_KERNEL);
if (!fmr) { if (!fmr)
printk(KERN_WARNING PFX "failed to allocate fmr "
"struct for FMR %d\n", i);
goto out_fail; goto out_fail;
}
fmr->pool = pool; fmr->pool = pool;
fmr->remap_count = 0; fmr->remap_count = 0;
...@@ -307,8 +300,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, ...@@ -307,8 +300,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr); fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr);
if (IS_ERR(fmr->fmr)) { if (IS_ERR(fmr->fmr)) {
printk(KERN_WARNING PFX "fmr_create failed " pr_warn(PFX "fmr_create failed for FMR %d\n",
"for FMR %d\n", i); i);
kfree(fmr); kfree(fmr);
goto out_fail; goto out_fail;
} }
...@@ -363,8 +356,8 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool) ...@@ -363,8 +356,8 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
} }
if (i < pool->pool_size) if (i < pool->pool_size)
printk(KERN_WARNING PFX "pool still has %d regions registered\n", pr_warn(PFX "pool still has %d regions registered\n",
pool->pool_size - i); pool->pool_size - i);
kfree(pool->cache_bucket); kfree(pool->cache_bucket);
kfree(pool); kfree(pool);
...@@ -463,7 +456,7 @@ struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle, ...@@ -463,7 +456,7 @@ struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
list_add(&fmr->list, &pool->free_list); list_add(&fmr->list, &pool->free_list);
spin_unlock_irqrestore(&pool->pool_lock, flags); spin_unlock_irqrestore(&pool->pool_lock, flags);
printk(KERN_WARNING PFX "fmr_map returns %d\n", result); pr_warn(PFX "fmr_map returns %d\n", result);
return ERR_PTR(result); return ERR_PTR(result);
} }
...@@ -517,8 +510,8 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr) ...@@ -517,8 +510,8 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
#ifdef DEBUG #ifdef DEBUG
if (fmr->ref_count < 0) if (fmr->ref_count < 0)
printk(KERN_WARNING PFX "FMR %p has ref count %d < 0\n", pr_warn(PFX "FMR %p has ref count %d < 0\n",
fmr, fmr->ref_count); fmr, fmr->ref_count);
#endif #endif
spin_unlock_irqrestore(&pool->pool_lock, flags); spin_unlock_irqrestore(&pool->pool_lock, flags);
......
...@@ -44,7 +44,7 @@ static u64 value_read(int offset, int size, void *structure) ...@@ -44,7 +44,7 @@ static u64 value_read(int offset, int size, void *structure)
case 4: return be32_to_cpup((__be32 *) (structure + offset)); case 4: return be32_to_cpup((__be32 *) (structure + offset));
case 8: return be64_to_cpup((__be64 *) (structure + offset)); case 8: return be64_to_cpup((__be64 *) (structure + offset));
default: default:
printk(KERN_WARNING "Field size %d bits not handled\n", size * 8); pr_warn("Field size %d bits not handled\n", size * 8);
return 0; return 0;
} }
} }
...@@ -104,9 +104,8 @@ void ib_pack(const struct ib_field *desc, ...@@ -104,9 +104,8 @@ void ib_pack(const struct ib_field *desc,
} else { } else {
if (desc[i].offset_bits % 8 || if (desc[i].offset_bits % 8 ||
desc[i].size_bits % 8) { desc[i].size_bits % 8) {
printk(KERN_WARNING "Structure field %s of size %d " pr_warn("Structure field %s of size %d bits is not byte-aligned\n",
"bits is not byte-aligned\n", desc[i].field_name, desc[i].size_bits);
desc[i].field_name, desc[i].size_bits);
} }
if (desc[i].struct_size_bytes) if (desc[i].struct_size_bytes)
...@@ -132,7 +131,7 @@ static void value_write(int offset, int size, u64 val, void *structure) ...@@ -132,7 +131,7 @@ static void value_write(int offset, int size, u64 val, void *structure)
case 32: *(__be32 *) (structure + offset) = cpu_to_be32(val); break; case 32: *(__be32 *) (structure + offset) = cpu_to_be32(val); break;
case 64: *(__be64 *) (structure + offset) = cpu_to_be64(val); break; case 64: *(__be64 *) (structure + offset) = cpu_to_be64(val); break;
default: default:
printk(KERN_WARNING "Field size %d bits not handled\n", size * 8); pr_warn("Field size %d bits not handled\n", size * 8);
} }
} }
...@@ -188,9 +187,8 @@ void ib_unpack(const struct ib_field *desc, ...@@ -188,9 +187,8 @@ void ib_unpack(const struct ib_field *desc,
} else { } else {
if (desc[i].offset_bits % 8 || if (desc[i].offset_bits % 8 ||
desc[i].size_bits % 8) { desc[i].size_bits % 8) {
printk(KERN_WARNING "Structure field %s of size %d " pr_warn("Structure field %s of size %d bits is not byte-aligned\n",
"bits is not byte-aligned\n", desc[i].field_name, desc[i].size_bits);
desc[i].field_name, desc[i].size_bits);
} }
memcpy(structure + desc[i].struct_offset_bytes, memcpy(structure + desc[i].struct_offset_bytes,
......
...@@ -864,13 +864,12 @@ static void update_sm_ah(struct work_struct *work) ...@@ -864,13 +864,12 @@ static void update_sm_ah(struct work_struct *work)
struct ib_ah_attr ah_attr; struct ib_ah_attr ah_attr;
if (ib_query_port(port->agent->device, port->port_num, &port_attr)) { if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
printk(KERN_WARNING "Couldn't query port\n"); pr_warn("Couldn't query port\n");
return; return;
} }
new_ah = kmalloc(sizeof *new_ah, GFP_KERNEL); new_ah = kmalloc(sizeof *new_ah, GFP_KERNEL);
if (!new_ah) { if (!new_ah) {
printk(KERN_WARNING "Couldn't allocate new SM AH\n");
return; return;
} }
...@@ -880,7 +879,7 @@ static void update_sm_ah(struct work_struct *work) ...@@ -880,7 +879,7 @@ static void update_sm_ah(struct work_struct *work)
new_ah->pkey_index = 0; new_ah->pkey_index = 0;
if (ib_find_pkey(port->agent->device, port->port_num, if (ib_find_pkey(port->agent->device, port->port_num,
IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index)) IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
printk(KERN_ERR "Couldn't find index for default PKey\n"); pr_err("Couldn't find index for default PKey\n");
memset(&ah_attr, 0, sizeof ah_attr); memset(&ah_attr, 0, sizeof ah_attr);
ah_attr.dlid = port_attr.sm_lid; ah_attr.dlid = port_attr.sm_lid;
...@@ -889,7 +888,7 @@ static void update_sm_ah(struct work_struct *work) ...@@ -889,7 +888,7 @@ static void update_sm_ah(struct work_struct *work)
new_ah->ah = ib_create_ah(port->agent->qp->pd, &ah_attr); new_ah->ah = ib_create_ah(port->agent->qp->pd, &ah_attr);
if (IS_ERR(new_ah->ah)) { if (IS_ERR(new_ah->ah)) {
printk(KERN_WARNING "Couldn't create new SM AH\n"); pr_warn("Couldn't create new SM AH\n");
kfree(new_ah); kfree(new_ah);
return; return;
} }
...@@ -1800,13 +1799,13 @@ static int __init ib_sa_init(void) ...@@ -1800,13 +1799,13 @@ static int __init ib_sa_init(void)
ret = ib_register_client(&sa_client); ret = ib_register_client(&sa_client);
if (ret) { if (ret) {
printk(KERN_ERR "Couldn't register ib_sa client\n"); pr_err("Couldn't register ib_sa client\n");
goto err1; goto err1;
} }
ret = mcast_init(); ret = mcast_init();
if (ret) { if (ret) {
printk(KERN_ERR "Couldn't initialize multicast handling\n"); pr_err("Couldn't initialize multicast handling\n");
goto err2; goto err2;
} }
......
...@@ -1234,7 +1234,7 @@ static int find_overflow_devnum(void) ...@@ -1234,7 +1234,7 @@ static int find_overflow_devnum(void)
ret = alloc_chrdev_region(&overflow_maj, 0, IB_UCM_MAX_DEVICES, ret = alloc_chrdev_region(&overflow_maj, 0, IB_UCM_MAX_DEVICES,
"infiniband_cm"); "infiniband_cm");
if (ret) { if (ret) {
printk(KERN_ERR "ucm: couldn't register dynamic device number\n"); pr_err("ucm: couldn't register dynamic device number\n");
return ret; return ret;
} }
} }
...@@ -1329,19 +1329,19 @@ static int __init ib_ucm_init(void) ...@@ -1329,19 +1329,19 @@ static int __init ib_ucm_init(void)
ret = register_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES, ret = register_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES,
"infiniband_cm"); "infiniband_cm");
if (ret) { if (ret) {
printk(KERN_ERR "ucm: couldn't register device number\n"); pr_err("ucm: couldn't register device number\n");
goto error1; goto error1;
} }
ret = class_create_file(&cm_class, &class_attr_abi_version.attr); ret = class_create_file(&cm_class, &class_attr_abi_version.attr);
if (ret) { if (ret) {
printk(KERN_ERR "ucm: couldn't create abi_version attribute\n"); pr_err("ucm: couldn't create abi_version attribute\n");
goto error2; goto error2;
} }
ret = ib_register_client(&ucm_client); ret = ib_register_client(&ucm_client);
if (ret) { if (ret) {
printk(KERN_ERR "ucm: couldn't register client\n"); pr_err("ucm: couldn't register client\n");
goto error3; goto error3;
} }
return 0; return 0;
......
...@@ -314,7 +314,7 @@ static void ucma_removal_event_handler(struct rdma_cm_id *cm_id) ...@@ -314,7 +314,7 @@ static void ucma_removal_event_handler(struct rdma_cm_id *cm_id)
} }
} }
if (!event_found) if (!event_found)
printk(KERN_ERR "ucma_removal_event_handler: warning: connect request event wasn't found\n"); pr_err("ucma_removal_event_handler: warning: connect request event wasn't found\n");
} }
static int ucma_event_handler(struct rdma_cm_id *cm_id, static int ucma_event_handler(struct rdma_cm_id *cm_id,
...@@ -1716,13 +1716,13 @@ static int __init ucma_init(void) ...@@ -1716,13 +1716,13 @@ static int __init ucma_init(void)
ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version); ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
if (ret) { if (ret) {
printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n"); pr_err("rdma_ucm: couldn't create abi_version attr\n");
goto err1; goto err1;
} }
ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table); ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
if (!ucma_ctl_table_hdr) { if (!ucma_ctl_table_hdr) {
printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n"); pr_err("rdma_ucm: couldn't register sysctl paths\n");
ret = -ENOMEM; ret = -ENOMEM;
goto err2; goto err2;
} }
......
...@@ -479,8 +479,8 @@ int ib_ud_header_unpack(void *buf, ...@@ -479,8 +479,8 @@ int ib_ud_header_unpack(void *buf,
buf += IB_LRH_BYTES; buf += IB_LRH_BYTES;
if (header->lrh.link_version != 0) { if (header->lrh.link_version != 0) {
printk(KERN_WARNING "Invalid LRH.link_version %d\n", pr_warn("Invalid LRH.link_version %d\n",
header->lrh.link_version); header->lrh.link_version);
return -EINVAL; return -EINVAL;
} }
...@@ -496,20 +496,20 @@ int ib_ud_header_unpack(void *buf, ...@@ -496,20 +496,20 @@ int ib_ud_header_unpack(void *buf,
buf += IB_GRH_BYTES; buf += IB_GRH_BYTES;
if (header->grh.ip_version != 6) { if (header->grh.ip_version != 6) {
printk(KERN_WARNING "Invalid GRH.ip_version %d\n", pr_warn("Invalid GRH.ip_version %d\n",
header->grh.ip_version); header->grh.ip_version);
return -EINVAL; return -EINVAL;
} }
if (header->grh.next_header != 0x1b) { if (header->grh.next_header != 0x1b) {
printk(KERN_WARNING "Invalid GRH.next_header 0x%02x\n", pr_warn("Invalid GRH.next_header 0x%02x\n",
header->grh.next_header); header->grh.next_header);
return -EINVAL; return -EINVAL;
} }
break; break;
default: default:
printk(KERN_WARNING "Invalid LRH.link_next_header %d\n", pr_warn("Invalid LRH.link_next_header %d\n",
header->lrh.link_next_header); header->lrh.link_next_header);
return -EINVAL; return -EINVAL;
} }
...@@ -525,14 +525,13 @@ int ib_ud_header_unpack(void *buf, ...@@ -525,14 +525,13 @@ int ib_ud_header_unpack(void *buf,
header->immediate_present = 1; header->immediate_present = 1;
break; break;
default: default:
printk(KERN_WARNING "Invalid BTH.opcode 0x%02x\n", pr_warn("Invalid BTH.opcode 0x%02x\n", header->bth.opcode);
header->bth.opcode);
return -EINVAL; return -EINVAL;
} }
if (header->bth.transport_header_version != 0) { if (header->bth.transport_header_version != 0) {
printk(KERN_WARNING "Invalid BTH.transport_header_version %d\n", pr_warn("Invalid BTH.transport_header_version %d\n",
header->bth.transport_header_version); header->bth.transport_header_version);
return -EINVAL; return -EINVAL;
} }
......
...@@ -1056,7 +1056,7 @@ static int find_overflow_devnum(void) ...@@ -1056,7 +1056,7 @@ static int find_overflow_devnum(void)
ret = alloc_chrdev_region(&overflow_maj, 0, IB_UVERBS_MAX_DEVICES, ret = alloc_chrdev_region(&overflow_maj, 0, IB_UVERBS_MAX_DEVICES,
"infiniband_verbs"); "infiniband_verbs");
if (ret) { if (ret) {
printk(KERN_ERR "user_verbs: couldn't register dynamic device number\n"); pr_err("user_verbs: couldn't register dynamic device number\n");
return ret; return ret;
} }
} }
...@@ -1277,14 +1277,14 @@ static int __init ib_uverbs_init(void) ...@@ -1277,14 +1277,14 @@ static int __init ib_uverbs_init(void)
ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES, ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES,
"infiniband_verbs"); "infiniband_verbs");
if (ret) { if (ret) {
printk(KERN_ERR "user_verbs: couldn't register device number\n"); pr_err("user_verbs: couldn't register device number\n");
goto out; goto out;
} }
uverbs_class = class_create(THIS_MODULE, "infiniband_verbs"); uverbs_class = class_create(THIS_MODULE, "infiniband_verbs");
if (IS_ERR(uverbs_class)) { if (IS_ERR(uverbs_class)) {
ret = PTR_ERR(uverbs_class); ret = PTR_ERR(uverbs_class);
printk(KERN_ERR "user_verbs: couldn't create class infiniband_verbs\n"); pr_err("user_verbs: couldn't create class infiniband_verbs\n");
goto out_chrdev; goto out_chrdev;
} }
...@@ -1292,13 +1292,13 @@ static int __init ib_uverbs_init(void) ...@@ -1292,13 +1292,13 @@ static int __init ib_uverbs_init(void)
ret = class_create_file(uverbs_class, &class_attr_abi_version.attr); ret = class_create_file(uverbs_class, &class_attr_abi_version.attr);
if (ret) { if (ret) {
printk(KERN_ERR "user_verbs: couldn't create abi_version attribute\n"); pr_err("user_verbs: couldn't create abi_version attribute\n");
goto out_class; goto out_class;
} }
ret = ib_register_client(&uverbs_client); ret = ib_register_client(&uverbs_client);
if (ret) { if (ret) {
printk(KERN_ERR "user_verbs: couldn't register client\n"); pr_err("user_verbs: couldn't register client\n");
goto out_class; goto out_class;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment