Commit 3cd96564 authored by Roland Dreier's avatar Roland Dreier

IB: Whitespace fixes

Remove some trailing whitespace that has snuck in despite the best
efforts of whitespace=error-all.  Also fix a few other whitespace
bogosities.
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent f06d2653
......@@ -14,7 +14,7 @@ config INFINIBAND_USER_MAD
---help---
Userspace InfiniBand Management Datagram (MAD) support. This
is the kernel side of the userspace MAD support, which allows
userspace processes to send and receive MADs. You will also
userspace processes to send and receive MADs. You will also
need libibumad from <http://www.openib.org>.
config INFINIBAND_USER_ACCESS
......
......@@ -327,10 +327,10 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
}
EXPORT_SYMBOL(rdma_addr_cancel);
static int netevent_callback(struct notifier_block *self, unsigned long event,
static int netevent_callback(struct notifier_block *self, unsigned long event,
void *ctx)
{
if (event == NETEVENT_NEIGH_UPDATE) {
if (event == NETEVENT_NEIGH_UPDATE) {
struct neighbour *neigh = ctx;
if (neigh->dev->type == ARPHRD_INFINIBAND &&
......
......@@ -179,7 +179,7 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
if (IS_ERR(ah))
return PTR_ERR(ah);
m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
cm_id_priv->av.pkey_index,
0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
GFP_ATOMIC);
......
......@@ -613,7 +613,7 @@ static void cma_destroy_listen(struct rdma_id_private *id_priv)
if (id_priv->cma_dev) {
switch (id_priv->id.device->node_type) {
case IB_NODE_CA:
if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
ib_destroy_cm_id(id_priv->cm_id.ib);
break;
default:
......@@ -692,13 +692,13 @@ void rdma_destroy_id(struct rdma_cm_id *id)
if (id_priv->cma_dev) {
switch (id->device->node_type) {
case IB_NODE_CA:
if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
ib_destroy_cm_id(id_priv->cm_id.ib);
break;
default:
break;
}
mutex_lock(&lock);
mutex_lock(&lock);
cma_detach_from_dev(id_priv);
mutex_unlock(&lock);
}
......@@ -1492,7 +1492,7 @@ static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
if (cma_any_addr(&cur_id->id.route.addr.src_addr))
return -EADDRNOTAVAIL;
cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr;
if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr)
return -EADDRINUSE;
......
......@@ -1246,8 +1246,8 @@ static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
int i;
for (i = 0; i < MAX_MGMT_OUI; i++)
/* Is there matching OUI for this vendor class ? */
if (!memcmp(vendor_class->oui[i], oui, 3))
/* Is there matching OUI for this vendor class ? */
if (!memcmp(vendor_class->oui[i], oui, 3))
return i;
return -1;
......@@ -2237,7 +2237,7 @@ static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
&mad_agent_priv->send_list, agent_list) {
if (mad_send_wr->status == IB_WC_SUCCESS) {
mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
}
}
......@@ -2528,10 +2528,10 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
}
}
sg_list.addr = dma_map_single(qp_info->port_priv->
device->dma_device,
device->dma_device,
&mad_priv->grh,
sizeof *mad_priv -
sizeof mad_priv->header,
sizeof mad_priv->header,
DMA_FROM_DEVICE);
pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
......@@ -2606,7 +2606,7 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
struct ib_qp *qp;
attr = kmalloc(sizeof *attr, GFP_KERNEL);
if (!attr) {
if (!attr) {
printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n");
return -ENOMEM;
}
......
......@@ -391,7 +391,7 @@ static inline int window_size(struct ib_mad_agent_private *agent)
static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list,
int seg_num)
{
struct ib_mad_recv_buf *seg_buf;
struct ib_mad_recv_buf *seg_buf;
int cur_seg_num;
list_for_each_entry_reverse(seg_buf, rmpp_list, list) {
......
......@@ -887,7 +887,7 @@ static void send_handler(struct ib_mad_agent *agent,
idr_remove(&query_idr, query->id);
spin_unlock_irqrestore(&idr_lock, flags);
ib_free_send_mad(mad_send_wc->send_buf);
ib_free_send_mad(mad_send_wc->send_buf);
kref_put(&query->sm_ah->ref, free_sm_ah);
query->release(query);
}
......
......@@ -68,7 +68,7 @@ struct port_table_attribute {
int index;
};
static inline int ibdev_is_alive(const struct ib_device *dev)
static inline int ibdev_is_alive(const struct ib_device *dev)
{
return dev->reg_state == IB_DEV_REGISTERED;
}
......
......@@ -309,9 +309,9 @@ static int ib_ucm_event_process(struct ib_cm_event *evt,
info = evt->param.apr_rcvd.apr_info;
break;
case IB_CM_SIDR_REQ_RECEIVED:
uvt->resp.u.sidr_req_resp.pkey =
uvt->resp.u.sidr_req_resp.pkey =
evt->param.sidr_req_rcvd.pkey;
uvt->resp.u.sidr_req_resp.port =
uvt->resp.u.sidr_req_resp.port =
evt->param.sidr_req_rcvd.port;
uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE;
break;
......@@ -1237,7 +1237,7 @@ static struct class ucm_class = {
static ssize_t show_ibdev(struct class_device *class_dev, char *buf)
{
struct ib_ucm_device *dev;
dev = container_of(class_dev, struct ib_ucm_device, class_dev);
return sprintf(buf, "%s\n", dev->ib_dev->name);
}
......
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Voltaire, Inc. All rights reserved.
* Copyright (c) 2005 Voltaire, Inc. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
......
......@@ -1676,7 +1676,6 @@ ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
break;
}
if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp))
ret = -EFAULT;
......@@ -1726,7 +1725,6 @@ ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
break;
}
if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp))
ret = -EFAULT;
......
......@@ -2126,9 +2126,9 @@ int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv)
dd->ipath_rx_pol_inv = new_pol_inv;
val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
INFINIPATH_XGXS_RX_POL_SHIFT);
val |= ((u64)dd->ipath_rx_pol_inv) <<
INFINIPATH_XGXS_RX_POL_SHIFT;
INFINIPATH_XGXS_RX_POL_SHIFT);
val |= ((u64)dd->ipath_rx_pol_inv) <<
INFINIPATH_XGXS_RX_POL_SHIFT;
ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
}
return 0;
......
......@@ -544,11 +544,11 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
wq = &(*cur_qp)->rq;
wqe = be32_to_cpu(cqe->wqe);
wqe_index = wqe >> wq->wqe_shift;
/*
* WQE addr == base - 1 might be reported in receive completion
* with error instead of (rq size - 1) by Sinai FW 1.0.800 and
* Arbel FW 5.1.400. This bug should be fixed in later FW revs.
*/
/*
* WQE addr == base - 1 might be reported in receive completion
* with error instead of (rq size - 1) by Sinai FW 1.0.800 and
* Arbel FW 5.1.400. This bug should be fixed in later FW revs.
*/
if (unlikely(wqe_index < 0))
wqe_index = wq->max - 1;
entry->wr_id = (*cur_qp)->wrid[wqe_index];
......
......@@ -795,7 +795,7 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
}
if (priv->broadcast) {
rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
list_add_tail(&priv->broadcast->list, &remove_list);
priv->broadcast = NULL;
}
......
......@@ -330,7 +330,7 @@ static int srp_send_req(struct srp_target_port *target)
req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
SRP_BUF_FORMAT_INDIRECT);
/*
* In the published SRP specification (draft rev. 16a), the
* In the published SRP specification (draft rev. 16a), the
* port identifier format is 8 bytes of ID extension followed
* by 8 bytes of GUID. Older drafts put the two halves in the
* opposite order, so that the GUID comes first.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment