Commit 34d56893 authored by Leon Romanovsky's avatar Leon Romanovsky Committed by Jason Gunthorpe

RDMA/cxgb4: Use sizeof() notation

Convert various sizeof call sites to be written in standard format
sizeof().
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent a80287c8
...@@ -953,7 +953,7 @@ static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, ...@@ -953,7 +953,7 @@ static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
mpalen = sizeof(*mpa) + ep->plen; mpalen = sizeof(*mpa) + ep->plen;
if (mpa_rev_to_use == 2) if (mpa_rev_to_use == 2)
mpalen += sizeof(struct mpa_v2_conn_params); mpalen += sizeof(struct mpa_v2_conn_params);
wrlen = roundup(mpalen + sizeof *req, 16); wrlen = roundup(mpalen + sizeof(*req), 16);
skb = get_skb(skb, wrlen, GFP_KERNEL); skb = get_skb(skb, wrlen, GFP_KERNEL);
if (!skb) { if (!skb) {
connect_reply_upcall(ep, -ENOMEM); connect_reply_upcall(ep, -ENOMEM);
...@@ -997,8 +997,9 @@ static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, ...@@ -997,8 +997,9 @@ static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
} }
if (mpa_rev_to_use == 2) { if (mpa_rev_to_use == 2) {
mpa->private_data_size = htons(ntohs(mpa->private_data_size) + mpa->private_data_size =
sizeof (struct mpa_v2_conn_params)); htons(ntohs(mpa->private_data_size) +
sizeof(struct mpa_v2_conn_params));
pr_debug("initiator ird %u ord %u\n", ep->ird, pr_debug("initiator ird %u ord %u\n", ep->ird,
ep->ord); ep->ord);
mpa_v2_params.ird = htons((u16)ep->ird); mpa_v2_params.ird = htons((u16)ep->ird);
...@@ -1057,7 +1058,7 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) ...@@ -1057,7 +1058,7 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
mpalen = sizeof(*mpa) + plen; mpalen = sizeof(*mpa) + plen;
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
mpalen += sizeof(struct mpa_v2_conn_params); mpalen += sizeof(struct mpa_v2_conn_params);
wrlen = roundup(mpalen + sizeof *req, 16); wrlen = roundup(mpalen + sizeof(*req), 16);
skb = get_skb(NULL, wrlen, GFP_KERNEL); skb = get_skb(NULL, wrlen, GFP_KERNEL);
if (!skb) { if (!skb) {
...@@ -1088,8 +1089,9 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) ...@@ -1088,8 +1089,9 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
mpa->flags |= MPA_ENHANCED_RDMA_CONN; mpa->flags |= MPA_ENHANCED_RDMA_CONN;
mpa->private_data_size = htons(ntohs(mpa->private_data_size) + mpa->private_data_size =
sizeof (struct mpa_v2_conn_params)); htons(ntohs(mpa->private_data_size) +
sizeof(struct mpa_v2_conn_params));
mpa_v2_params.ird = htons(((u16)ep->ird) | mpa_v2_params.ird = htons(((u16)ep->ird) |
(peer2peer ? MPA_V2_PEER2PEER_MODEL : (peer2peer ? MPA_V2_PEER2PEER_MODEL :
0)); 0));
...@@ -1136,7 +1138,7 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) ...@@ -1136,7 +1138,7 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
mpalen = sizeof(*mpa) + plen; mpalen = sizeof(*mpa) + plen;
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
mpalen += sizeof(struct mpa_v2_conn_params); mpalen += sizeof(struct mpa_v2_conn_params);
wrlen = roundup(mpalen + sizeof *req, 16); wrlen = roundup(mpalen + sizeof(*req), 16);
skb = get_skb(NULL, wrlen, GFP_KERNEL); skb = get_skb(NULL, wrlen, GFP_KERNEL);
if (!skb) { if (!skb) {
...@@ -1171,8 +1173,9 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) ...@@ -1171,8 +1173,9 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
mpa->flags |= MPA_ENHANCED_RDMA_CONN; mpa->flags |= MPA_ENHANCED_RDMA_CONN;
mpa->private_data_size = htons(ntohs(mpa->private_data_size) + mpa->private_data_size =
sizeof (struct mpa_v2_conn_params)); htons(ntohs(mpa->private_data_size) +
sizeof(struct mpa_v2_conn_params));
mpa_v2_params.ird = htons((u16)ep->ird); mpa_v2_params.ird = htons((u16)ep->ird);
mpa_v2_params.ord = htons((u16)ep->ord); mpa_v2_params.ord = htons((u16)ep->ord);
if (peer2peer && (ep->mpa_attr.p2p_type != if (peer2peer && (ep->mpa_attr.p2p_type !=
......
...@@ -43,7 +43,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, ...@@ -43,7 +43,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
int wr_len; int wr_len;
int ret; int ret;
wr_len = sizeof *res_wr + sizeof *res; wr_len = sizeof(*res_wr) + sizeof(*res);
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
res_wr = __skb_put_zero(skb, wr_len); res_wr = __skb_put_zero(skb, wr_len);
...@@ -117,7 +117,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, ...@@ -117,7 +117,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
} }
/* build fw_ri_res_wr */ /* build fw_ri_res_wr */
wr_len = sizeof *res_wr + sizeof *res; wr_len = sizeof(*res_wr) + sizeof(*res);
skb = alloc_skb(wr_len, GFP_KERNEL); skb = alloc_skb(wr_len, GFP_KERNEL);
if (!skb) { if (!skb) {
...@@ -1095,10 +1095,10 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, ...@@ -1095,10 +1095,10 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
if (ucontext) { if (ucontext) {
ret = -ENOMEM; ret = -ENOMEM;
mm = kmalloc(sizeof *mm, GFP_KERNEL); mm = kmalloc(sizeof(*mm), GFP_KERNEL);
if (!mm) if (!mm)
goto err_remove_handle; goto err_remove_handle;
mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); mm2 = kmalloc(sizeof(*mm2), GFP_KERNEL);
if (!mm2) if (!mm2)
goto err_free_mm; goto err_free_mm;
......
...@@ -327,7 +327,7 @@ static int qp_open(struct inode *inode, struct file *file) ...@@ -327,7 +327,7 @@ static int qp_open(struct inode *inode, struct file *file)
unsigned long index; unsigned long index;
int count = 1; int count = 1;
qpd = kmalloc(sizeof *qpd, GFP_KERNEL); qpd = kmalloc(sizeof(*qpd), GFP_KERNEL);
if (!qpd) if (!qpd)
return -ENOMEM; return -ENOMEM;
...@@ -421,7 +421,7 @@ static int stag_open(struct inode *inode, struct file *file) ...@@ -421,7 +421,7 @@ static int stag_open(struct inode *inode, struct file *file)
int ret = 0; int ret = 0;
int count = 1; int count = 1;
stagd = kmalloc(sizeof *stagd, GFP_KERNEL); stagd = kmalloc(sizeof(*stagd), GFP_KERNEL);
if (!stagd) { if (!stagd) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
...@@ -1075,7 +1075,7 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) ...@@ -1075,7 +1075,7 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
pr_info("Chelsio T4/T5 RDMA Driver - version %s\n", pr_info("Chelsio T4/T5 RDMA Driver - version %s\n",
DRV_VERSION); DRV_VERSION);
ctx = kzalloc(sizeof *ctx, GFP_KERNEL); ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) { if (!ctx) {
ctx = ERR_PTR(-ENOMEM); ctx = ERR_PTR(-ENOMEM);
goto out; goto out;
...@@ -1243,10 +1243,9 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) ...@@ -1243,10 +1243,9 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
case CXGB4_STATE_START_RECOVERY: case CXGB4_STATE_START_RECOVERY:
pr_info("%s: Fatal Error\n", pci_name(ctx->lldi.pdev)); pr_info("%s: Fatal Error\n", pci_name(ctx->lldi.pdev));
if (ctx->dev) { if (ctx->dev) {
struct ib_event event; struct ib_event event = {};
ctx->dev->rdev.flags |= T4_FATAL_ERROR; ctx->dev->rdev.flags |= T4_FATAL_ERROR;
memset(&event, 0, sizeof event);
event.event = IB_EVENT_DEVICE_FATAL; event.event = IB_EVENT_DEVICE_FATAL;
event.device = &ctx->dev->ibdev; event.device = &ctx->dev->ibdev;
ib_dispatch_event(&event); ib_dispatch_event(&event);
......
...@@ -130,8 +130,9 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, ...@@ -130,8 +130,9 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE : copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE :
len; len;
wr_len = roundup(sizeof *req + sizeof *sc + wr_len = roundup(sizeof(*req) + sizeof(*sc) +
roundup(copy_len, T4_ULPTX_MIN_IO), 16); roundup(copy_len, T4_ULPTX_MIN_IO),
16);
if (!skb) { if (!skb) {
skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
......
...@@ -271,7 +271,6 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro ...@@ -271,7 +271,6 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
return -EINVAL; return -EINVAL;
dev = to_c4iw_dev(ibdev); dev = to_c4iw_dev(ibdev);
memset(props, 0, sizeof *props);
memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6); memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type); props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type);
props->fw_ver = dev->rdev.lldi.fw_vers; props->fw_ver = dev->rdev.lldi.fw_vers;
......
...@@ -303,7 +303,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, ...@@ -303,7 +303,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
wq->rq.msn = 1; wq->rq.msn = 1;
/* build fw_ri_res_wr */ /* build fw_ri_res_wr */
wr_len = sizeof *res_wr + 2 * sizeof *res; wr_len = sizeof(*res_wr) + 2 * sizeof(*res);
if (need_rq) if (need_rq)
wr_len += sizeof(*res); wr_len += sizeof(*res);
skb = alloc_skb(wr_len, GFP_KERNEL); skb = alloc_skb(wr_len, GFP_KERNEL);
...@@ -439,7 +439,7 @@ static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp, ...@@ -439,7 +439,7 @@ static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
rem -= len; rem -= len;
} }
} }
len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp); len = roundup(plen + sizeof(*immdp), 16) - (plen + sizeof(*immdp));
if (len) if (len)
memset(dstp, 0, len); memset(dstp, 0, len);
immdp->op = FW_RI_DATA_IMMD; immdp->op = FW_RI_DATA_IMMD;
...@@ -528,7 +528,7 @@ static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, ...@@ -528,7 +528,7 @@ static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
T4_MAX_SEND_INLINE, &plen); T4_MAX_SEND_INLINE, &plen);
if (ret) if (ret)
return ret; return ret;
size = sizeof wqe->send + sizeof(struct fw_ri_immd) + size = sizeof(wqe->send) + sizeof(struct fw_ri_immd) +
plen; plen;
} else { } else {
ret = build_isgl((__be64 *)sq->queue, ret = build_isgl((__be64 *)sq->queue,
...@@ -537,7 +537,7 @@ static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, ...@@ -537,7 +537,7 @@ static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
wr->sg_list, wr->num_sge, &plen); wr->sg_list, wr->num_sge, &plen);
if (ret) if (ret)
return ret; return ret;
size = sizeof wqe->send + sizeof(struct fw_ri_isgl) + size = sizeof(wqe->send) + sizeof(struct fw_ri_isgl) +
wr->num_sge * sizeof(struct fw_ri_sge); wr->num_sge * sizeof(struct fw_ri_sge);
} }
} else { } else {
...@@ -545,7 +545,7 @@ static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, ...@@ -545,7 +545,7 @@ static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
wqe->send.u.immd_src[0].r1 = 0; wqe->send.u.immd_src[0].r1 = 0;
wqe->send.u.immd_src[0].r2 = 0; wqe->send.u.immd_src[0].r2 = 0;
wqe->send.u.immd_src[0].immdlen = 0; wqe->send.u.immd_src[0].immdlen = 0;
size = sizeof wqe->send + sizeof(struct fw_ri_immd); size = sizeof(wqe->send) + sizeof(struct fw_ri_immd);
plen = 0; plen = 0;
} }
*len16 = DIV_ROUND_UP(size, 16); *len16 = DIV_ROUND_UP(size, 16);
...@@ -579,7 +579,7 @@ static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, ...@@ -579,7 +579,7 @@ static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
T4_MAX_WRITE_INLINE, &plen); T4_MAX_WRITE_INLINE, &plen);
if (ret) if (ret)
return ret; return ret;
size = sizeof wqe->write + sizeof(struct fw_ri_immd) + size = sizeof(wqe->write) + sizeof(struct fw_ri_immd) +
plen; plen;
} else { } else {
ret = build_isgl((__be64 *)sq->queue, ret = build_isgl((__be64 *)sq->queue,
...@@ -588,7 +588,7 @@ static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, ...@@ -588,7 +588,7 @@ static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
wr->sg_list, wr->num_sge, &plen); wr->sg_list, wr->num_sge, &plen);
if (ret) if (ret)
return ret; return ret;
size = sizeof wqe->write + sizeof(struct fw_ri_isgl) + size = sizeof(wqe->write) + sizeof(struct fw_ri_isgl) +
wr->num_sge * sizeof(struct fw_ri_sge); wr->num_sge * sizeof(struct fw_ri_sge);
} }
} else { } else {
...@@ -596,7 +596,7 @@ static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, ...@@ -596,7 +596,7 @@ static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
wqe->write.u.immd_src[0].r1 = 0; wqe->write.u.immd_src[0].r1 = 0;
wqe->write.u.immd_src[0].r2 = 0; wqe->write.u.immd_src[0].r2 = 0;
wqe->write.u.immd_src[0].immdlen = 0; wqe->write.u.immd_src[0].immdlen = 0;
size = sizeof wqe->write + sizeof(struct fw_ri_immd); size = sizeof(wqe->write) + sizeof(struct fw_ri_immd);
plen = 0; plen = 0;
} }
*len16 = DIV_ROUND_UP(size, 16); *len16 = DIV_ROUND_UP(size, 16);
...@@ -683,7 +683,7 @@ static int build_rdma_read(union t4_wr *wqe, const struct ib_send_wr *wr, ...@@ -683,7 +683,7 @@ static int build_rdma_read(union t4_wr *wqe, const struct ib_send_wr *wr,
} }
wqe->read.r2 = 0; wqe->read.r2 = 0;
wqe->read.r5 = 0; wqe->read.r5 = 0;
*len16 = DIV_ROUND_UP(sizeof wqe->read, 16); *len16 = DIV_ROUND_UP(sizeof(wqe->read), 16);
return 0; return 0;
} }
...@@ -766,8 +766,8 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, ...@@ -766,8 +766,8 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
&wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL); &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
if (ret) if (ret)
return ret; return ret;
*len16 = DIV_ROUND_UP(sizeof wqe->recv + *len16 = DIV_ROUND_UP(
wr->num_sge * sizeof(struct fw_ri_sge), 16); sizeof(wqe->recv) + wr->num_sge * sizeof(struct fw_ri_sge), 16);
return 0; return 0;
} }
...@@ -886,7 +886,7 @@ static int build_inv_stag(union t4_wr *wqe, const struct ib_send_wr *wr, ...@@ -886,7 +886,7 @@ static int build_inv_stag(union t4_wr *wqe, const struct ib_send_wr *wr,
{ {
wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
wqe->inv.r2 = 0; wqe->inv.r2 = 0;
*len16 = DIV_ROUND_UP(sizeof wqe->inv, 16); *len16 = DIV_ROUND_UP(sizeof(wqe->inv), 16);
return 0; return 0;
} }
...@@ -1606,7 +1606,7 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, ...@@ -1606,7 +1606,7 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
wqe->u.terminate.type = FW_RI_TYPE_TERMINATE; wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term); wqe->u.terminate.immdlen = cpu_to_be32(sizeof(*term));
term = (struct terminate_message *)wqe->u.terminate.termmsg; term = (struct terminate_message *)wqe->u.terminate.termmsg;
if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) { if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
term->layer_etype = qhp->attr.layer_etype; term->layer_etype = qhp->attr.layer_etype;
...@@ -1751,16 +1751,15 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, ...@@ -1751,16 +1751,15 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init) static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
{ {
pr_debug("p2p_type = %d\n", p2p_type); pr_debug("p2p_type = %d\n", p2p_type);
memset(&init->u, 0, sizeof init->u); memset(&init->u, 0, sizeof(init->u));
switch (p2p_type) { switch (p2p_type) {
case FW_RI_INIT_P2PTYPE_RDMA_WRITE: case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
init->u.write.opcode = FW_RI_RDMA_WRITE_WR; init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
init->u.write.stag_sink = cpu_to_be32(1); init->u.write.stag_sink = cpu_to_be32(1);
init->u.write.to_sink = cpu_to_be64(1); init->u.write.to_sink = cpu_to_be64(1);
init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD; init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write + init->u.write.len16 = DIV_ROUND_UP(
sizeof(struct fw_ri_immd), sizeof(init->u.write) + sizeof(struct fw_ri_immd), 16);
16);
break; break;
case FW_RI_INIT_P2PTYPE_READ_REQ: case FW_RI_INIT_P2PTYPE_READ_REQ:
init->u.write.opcode = FW_RI_RDMA_READ_WR; init->u.write.opcode = FW_RI_RDMA_READ_WR;
...@@ -1768,7 +1767,7 @@ static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init) ...@@ -1768,7 +1767,7 @@ static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
init->u.read.to_src_lo = cpu_to_be32(1); init->u.read.to_src_lo = cpu_to_be32(1);
init->u.read.stag_sink = cpu_to_be32(1); init->u.read.stag_sink = cpu_to_be32(1);
init->u.read.to_sink_lo = cpu_to_be32(1); init->u.read.to_sink_lo = cpu_to_be32(1);
init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16); init->u.read.len16 = DIV_ROUND_UP(sizeof(init->u.read), 16);
break; break;
} }
} }
...@@ -1782,7 +1781,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) ...@@ -1782,7 +1781,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
pr_debug("qhp %p qid 0x%x tid %u ird %u ord %u\n", qhp, pr_debug("qhp %p qid 0x%x tid %u ird %u ord %u\n", qhp,
qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord); qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
skb = alloc_skb(sizeof *wqe, GFP_KERNEL); skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
if (!skb) { if (!skb) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
...@@ -2302,7 +2301,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, ...@@ -2302,7 +2301,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
ucontext->key += PAGE_SIZE; ucontext->key += PAGE_SIZE;
} }
spin_unlock(&ucontext->mmap_lock); spin_unlock(&ucontext->mmap_lock);
ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (ret) if (ret)
goto err_free_ma_sync_key; goto err_free_ma_sync_key;
sq_key_mm->key = uresp.sq_key; sq_key_mm->key = uresp.sq_key;
...@@ -2386,7 +2385,7 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -2386,7 +2385,7 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct c4iw_dev *rhp; struct c4iw_dev *rhp;
struct c4iw_qp *qhp; struct c4iw_qp *qhp;
enum c4iw_qp_attr_mask mask = 0; enum c4iw_qp_attr_mask mask = 0;
struct c4iw_qp_attributes attrs; struct c4iw_qp_attributes attrs = {};
pr_debug("ib_qp %p\n", ibqp); pr_debug("ib_qp %p\n", ibqp);
...@@ -2398,7 +2397,6 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -2398,7 +2397,6 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (!attr_mask) if (!attr_mask)
return 0; return 0;
memset(&attrs, 0, sizeof attrs);
qhp = to_c4iw_qp(ibqp); qhp = to_c4iw_qp(ibqp);
rhp = qhp->rhp; rhp = qhp->rhp;
...@@ -2482,8 +2480,8 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -2482,8 +2480,8 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
{ {
struct c4iw_qp *qhp = to_c4iw_qp(ibqp); struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
memset(attr, 0, sizeof *attr); memset(attr, 0, sizeof(*attr));
memset(init_attr, 0, sizeof *init_attr); memset(init_attr, 0, sizeof(*init_attr));
attr->qp_state = to_ib_qp_state(qhp->attr.state); attr->qp_state = to_ib_qp_state(qhp->attr.state);
init_attr->cap.max_send_wr = qhp->attr.sq_num_entries; init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries; init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
......
...@@ -126,7 +126,7 @@ u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) ...@@ -126,7 +126,7 @@ u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
rdev->stats.qid.cur += rdev->qpmask + 1; rdev->stats.qid.cur += rdev->qpmask + 1;
mutex_unlock(&rdev->stats.lock); mutex_unlock(&rdev->stats.lock);
for (i = qid+1; i & rdev->qpmask; i++) { for (i = qid+1; i & rdev->qpmask; i++) {
entry = kmalloc(sizeof *entry, GFP_KERNEL); entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) if (!entry)
goto out; goto out;
entry->qid = i; entry->qid = i;
...@@ -137,13 +137,13 @@ u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) ...@@ -137,13 +137,13 @@ u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
* now put the same ids on the qp list since they all * now put the same ids on the qp list since they all
* map to the same db/gts page. * map to the same db/gts page.
*/ */
entry = kmalloc(sizeof *entry, GFP_KERNEL); entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) if (!entry)
goto out; goto out;
entry->qid = qid; entry->qid = qid;
list_add_tail(&entry->entry, &uctx->qpids); list_add_tail(&entry->entry, &uctx->qpids);
for (i = qid+1; i & rdev->qpmask; i++) { for (i = qid+1; i & rdev->qpmask; i++) {
entry = kmalloc(sizeof *entry, GFP_KERNEL); entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) if (!entry)
goto out; goto out;
entry->qid = i; entry->qid = i;
...@@ -165,7 +165,7 @@ void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, ...@@ -165,7 +165,7 @@ void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
{ {
struct c4iw_qid_list *entry; struct c4iw_qid_list *entry;
entry = kmalloc(sizeof *entry, GFP_KERNEL); entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) if (!entry)
return; return;
pr_debug("qid 0x%x\n", qid); pr_debug("qid 0x%x\n", qid);
...@@ -200,7 +200,7 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) ...@@ -200,7 +200,7 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
rdev->stats.qid.cur += rdev->qpmask + 1; rdev->stats.qid.cur += rdev->qpmask + 1;
mutex_unlock(&rdev->stats.lock); mutex_unlock(&rdev->stats.lock);
for (i = qid+1; i & rdev->qpmask; i++) { for (i = qid+1; i & rdev->qpmask; i++) {
entry = kmalloc(sizeof *entry, GFP_KERNEL); entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) if (!entry)
goto out; goto out;
entry->qid = i; entry->qid = i;
...@@ -211,13 +211,13 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) ...@@ -211,13 +211,13 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
* now put the same ids on the cq list since they all * now put the same ids on the cq list since they all
* map to the same db/gts page. * map to the same db/gts page.
*/ */
entry = kmalloc(sizeof *entry, GFP_KERNEL); entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) if (!entry)
goto out; goto out;
entry->qid = qid; entry->qid = qid;
list_add_tail(&entry->entry, &uctx->cqids); list_add_tail(&entry->entry, &uctx->cqids);
for (i = qid; i & rdev->qpmask; i++) { for (i = qid; i & rdev->qpmask; i++) {
entry = kmalloc(sizeof *entry, GFP_KERNEL); entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) if (!entry)
goto out; goto out;
entry->qid = i; entry->qid = i;
...@@ -239,7 +239,7 @@ void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid, ...@@ -239,7 +239,7 @@ void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
{ {
struct c4iw_qid_list *entry; struct c4iw_qid_list *entry;
entry = kmalloc(sizeof *entry, GFP_KERNEL); entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) if (!entry)
return; return;
pr_debug("qid 0x%x\n", qid); pr_debug("qid 0x%x\n", qid);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment