Commit 700456bd authored by Joe Perches's avatar Joe Perches Committed by Doug Ledford

cxgb4: Use more common logging style

Convert printks to pr_<level>

Miscellanea:

o Coalesce formats
o Realign arguments
Signed-off-by: default avatarJoe Perches <joe@perches.com>
Reviewed-by: default avatarSteve Wise <swise@opengridcomputing.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent b7b37ee0
...@@ -467,7 +467,7 @@ static struct net_device *get_real_dev(struct net_device *egress_dev) ...@@ -467,7 +467,7 @@ static struct net_device *get_real_dev(struct net_device *egress_dev)
static void arp_failure_discard(void *handle, struct sk_buff *skb) static void arp_failure_discard(void *handle, struct sk_buff *skb)
{ {
pr_err(MOD "ARP failure\n"); pr_err("ARP failure\n");
kfree_skb(skb); kfree_skb(skb);
} }
...@@ -528,7 +528,7 @@ static void pass_accept_rpl_arp_failure(void *handle, struct sk_buff *skb) ...@@ -528,7 +528,7 @@ static void pass_accept_rpl_arp_failure(void *handle, struct sk_buff *skb)
{ {
struct c4iw_ep *ep = handle; struct c4iw_ep *ep = handle;
pr_err(MOD "ARP failure during accept - tid %u -dropping connection\n", pr_err("ARP failure during accept - tid %u - dropping connection\n",
ep->hwtid); ep->hwtid);
__state_set(&ep->com, DEAD); __state_set(&ep->com, DEAD);
...@@ -542,7 +542,7 @@ static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) ...@@ -542,7 +542,7 @@ static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
{ {
struct c4iw_ep *ep = handle; struct c4iw_ep *ep = handle;
printk(KERN_ERR MOD "ARP failure during connect\n"); pr_err("ARP failure during connect\n");
connect_reply_upcall(ep, -EHOSTUNREACH); connect_reply_upcall(ep, -EHOSTUNREACH);
__state_set(&ep->com, DEAD); __state_set(&ep->com, DEAD);
if (ep->com.remote_addr.ss_family == AF_INET6) { if (ep->com.remote_addr.ss_family == AF_INET6) {
...@@ -724,8 +724,7 @@ static int send_connect(struct c4iw_ep *ep) ...@@ -724,8 +724,7 @@ static int send_connect(struct c4iw_ep *ep)
skb = get_skb(NULL, wrlen, GFP_KERNEL); skb = get_skb(NULL, wrlen, GFP_KERNEL);
if (!skb) { if (!skb) {
printk(KERN_ERR MOD "%s - failed to alloc skb.\n", pr_err("%s - failed to alloc skb\n", __func__);
__func__);
return -ENOMEM; return -ENOMEM;
} }
set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
...@@ -1023,7 +1022,7 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) ...@@ -1023,7 +1022,7 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
skb = get_skb(NULL, wrlen, GFP_KERNEL); skb = get_skb(NULL, wrlen, GFP_KERNEL);
if (!skb) { if (!skb) {
printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); pr_err("%s - cannot alloc skb!\n", __func__);
return -ENOMEM; return -ENOMEM;
} }
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
...@@ -1103,7 +1102,7 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) ...@@ -1103,7 +1102,7 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
skb = get_skb(NULL, wrlen, GFP_KERNEL); skb = get_skb(NULL, wrlen, GFP_KERNEL);
if (!skb) { if (!skb) {
printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); pr_err("%s - cannot alloc skb!\n", __func__);
return -ENOMEM; return -ENOMEM;
} }
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
...@@ -1379,7 +1378,7 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits) ...@@ -1379,7 +1378,7 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
skb = get_skb(NULL, wrlen, GFP_KERNEL); skb = get_skb(NULL, wrlen, GFP_KERNEL);
if (!skb) { if (!skb) {
printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n"); pr_err("update_rx_credits - cannot alloc skb!\n");
return 0; return 0;
} }
...@@ -1454,8 +1453,8 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) ...@@ -1454,8 +1453,8 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
/* Validate MPA header. */ /* Validate MPA header. */
if (mpa->revision > mpa_rev) { if (mpa->revision > mpa_rev) {
printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," pr_err("%s MPA version mismatch. Local = %d, Received = %d\n",
" Received = %d\n", __func__, mpa_rev, mpa->revision); __func__, mpa_rev, mpa->revision);
err = -EPROTO; err = -EPROTO;
goto err_stop_timer; goto err_stop_timer;
} }
...@@ -1610,7 +1609,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) ...@@ -1610,7 +1609,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
* supports, generate TERM message * supports, generate TERM message
*/ */
if (rtr_mismatch) { if (rtr_mismatch) {
printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__); pr_err("%s: RTR mismatch, sending TERM\n", __func__);
attrs.layer_etype = LAYER_MPA | DDP_LLP; attrs.layer_etype = LAYER_MPA | DDP_LLP;
attrs.ecode = MPA_NOMATCH_RTR; attrs.ecode = MPA_NOMATCH_RTR;
attrs.next_state = C4IW_QP_STATE_TERMINATE; attrs.next_state = C4IW_QP_STATE_TERMINATE;
...@@ -1629,8 +1628,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) ...@@ -1629,8 +1628,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
* initiator ORD. * initiator ORD.
*/ */
if (insuff_ird) { if (insuff_ird) {
printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n", pr_err("%s: Insufficient IRD, sending TERM\n", __func__);
__func__);
attrs.layer_etype = LAYER_MPA | DDP_LLP; attrs.layer_etype = LAYER_MPA | DDP_LLP;
attrs.ecode = MPA_INSUFF_IRD; attrs.ecode = MPA_INSUFF_IRD;
attrs.next_state = C4IW_QP_STATE_TERMINATE; attrs.next_state = C4IW_QP_STATE_TERMINATE;
...@@ -1701,8 +1699,8 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) ...@@ -1701,8 +1699,8 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
* Validate MPA Header. * Validate MPA Header.
*/ */
if (mpa->revision > mpa_rev) { if (mpa->revision > mpa_rev) {
printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," pr_err("%s MPA version mismatch. Local = %d, Received = %d\n",
" Received = %d\n", __func__, mpa_rev, mpa->revision); __func__, mpa_rev, mpa->revision);
goto err_stop_timer; goto err_stop_timer;
} }
...@@ -1866,7 +1864,7 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -1866,7 +1864,7 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
ep = get_ep_from_tid(dev, tid); ep = get_ep_from_tid(dev, tid);
if (!ep) { if (!ep) {
printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n"); pr_warn("Abort rpl to freed endpoint\n");
return 0; return 0;
} }
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
...@@ -1878,8 +1876,7 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -1878,8 +1876,7 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
release = 1; release = 1;
break; break;
default: default:
printk(KERN_ERR "%s ep %p state %d\n", pr_err("%s ep %p state %d\n", __func__, ep, ep->com.state);
__func__, ep, ep->com.state);
break; break;
} }
mutex_unlock(&ep->com.mutex); mutex_unlock(&ep->com.mutex);
...@@ -2124,7 +2121,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep) ...@@ -2124,7 +2121,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
*/ */
ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep); ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
if (ep->atid == -1) { if (ep->atid == -1) {
pr_err("%s - cannot alloc atid.\n", __func__); pr_err("%s - cannot alloc atid\n", __func__);
err = -ENOMEM; err = -ENOMEM;
goto fail2; goto fail2;
} }
...@@ -2151,7 +2148,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep) ...@@ -2151,7 +2148,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
ra = (__u8 *)&raddr6->sin6_addr; ra = (__u8 *)&raddr6->sin6_addr;
} }
if (!ep->dst) { if (!ep->dst) {
pr_err("%s - cannot find route.\n", __func__); pr_err("%s - cannot find route\n", __func__);
err = -EHOSTUNREACH; err = -EHOSTUNREACH;
goto fail3; goto fail3;
} }
...@@ -2159,7 +2156,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep) ...@@ -2159,7 +2156,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
ep->com.dev->rdev.lldi.adapter_type, ep->com.dev->rdev.lldi.adapter_type,
ep->com.cm_id->tos); ep->com.cm_id->tos);
if (err) { if (err) {
pr_err("%s - cannot alloc l2e.\n", __func__); pr_err("%s - cannot alloc l2e\n", __func__);
goto fail4; goto fail4;
} }
...@@ -2493,15 +2490,13 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2493,15 +2490,13 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
&parent_ep->com.local_addr)->sin6_scope_id); &parent_ep->com.local_addr)->sin6_scope_id);
} }
if (!dst) { if (!dst) {
printk(KERN_ERR MOD "%s - failed to find dst entry!\n", pr_err("%s - failed to find dst entry!\n", __func__);
__func__);
goto reject; goto reject;
} }
child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
if (!child_ep) { if (!child_ep) {
printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", pr_err("%s - failed to allocate ep entry!\n", __func__);
__func__);
dst_release(dst); dst_release(dst);
goto reject; goto reject;
} }
...@@ -2509,8 +2504,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2509,8 +2504,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
err = import_ep(child_ep, iptype, peer_ip, dst, dev, false, err = import_ep(child_ep, iptype, peer_ip, dst, dev, false,
parent_ep->com.dev->rdev.lldi.adapter_type, tos); parent_ep->com.dev->rdev.lldi.adapter_type, tos);
if (err) { if (err) {
printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", pr_err("%s - failed to allocate l2t entry!\n", __func__);
__func__);
dst_release(dst); dst_release(dst);
kfree(child_ep); kfree(child_ep);
goto reject; goto reject;
...@@ -2797,9 +2791,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2797,9 +2791,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
&attrs, 1); &attrs, 1);
if (ret) if (ret)
printk(KERN_ERR MOD pr_err("%s - qp <- error failed!\n", __func__);
"%s - qp <- error failed!\n",
__func__);
} }
peer_abort_upcall(ep); peer_abort_upcall(ep);
break; break;
...@@ -2918,13 +2910,13 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2918,13 +2910,13 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
BUG_ON(!ep); BUG_ON(!ep);
if (ep && ep->com.qp) { if (ep && ep->com.qp) {
printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid, pr_warn("TERM received tid %u qpid %u\n",
ep->com.qp->wq.sq.qid); tid, ep->com.qp->wq.sq.qid);
attrs.next_state = C4IW_QP_STATE_TERMINATE; attrs.next_state = C4IW_QP_STATE_TERMINATE;
c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
} else } else
printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid); pr_warn("TERM received tid %u no ep/qp\n", tid);
c4iw_put_ep(&ep->com); c4iw_put_ep(&ep->com);
return 0; return 0;
...@@ -3188,7 +3180,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ...@@ -3188,7 +3180,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
} }
ep = alloc_ep(sizeof(*ep), GFP_KERNEL); ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
if (!ep) { if (!ep) {
printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); pr_err("%s - cannot alloc ep\n", __func__);
err = -ENOMEM; err = -ENOMEM;
goto out; goto out;
} }
...@@ -3228,7 +3220,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ...@@ -3228,7 +3220,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
*/ */
ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep); ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
if (ep->atid == -1) { if (ep->atid == -1) {
printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); pr_err("%s - cannot alloc atid\n", __func__);
err = -ENOMEM; err = -ENOMEM;
goto fail2; goto fail2;
} }
...@@ -3292,7 +3284,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ...@@ -3292,7 +3284,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
raddr6->sin6_scope_id); raddr6->sin6_scope_id);
} }
if (!ep->dst) { if (!ep->dst) {
printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); pr_err("%s - cannot find route\n", __func__);
err = -EHOSTUNREACH; err = -EHOSTUNREACH;
goto fail3; goto fail3;
} }
...@@ -3300,7 +3292,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ...@@ -3300,7 +3292,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true, err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true,
ep->com.dev->rdev.lldi.adapter_type, cm_id->tos); ep->com.dev->rdev.lldi.adapter_type, cm_id->tos);
if (err) { if (err) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); pr_err("%s - cannot alloc l2e\n", __func__);
goto fail4; goto fail4;
} }
...@@ -3414,7 +3406,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) ...@@ -3414,7 +3406,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
ep = alloc_ep(sizeof(*ep), GFP_KERNEL); ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
if (!ep) { if (!ep) {
printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); pr_err("%s - cannot alloc ep\n", __func__);
err = -ENOMEM; err = -ENOMEM;
goto fail1; goto fail1;
} }
...@@ -3439,7 +3431,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) ...@@ -3439,7 +3431,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_id->m_local_addr.ss_family, ep); cm_id->m_local_addr.ss_family, ep);
if (ep->stid == -1) { if (ep->stid == -1) {
printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__); pr_err("%s - cannot alloc stid\n", __func__);
err = -ENOMEM; err = -ENOMEM;
goto fail2; goto fail2;
} }
...@@ -3600,8 +3592,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) ...@@ -3600,8 +3592,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
C4IW_QP_ATTR_NEXT_STATE, C4IW_QP_ATTR_NEXT_STATE,
&attrs, 1); &attrs, 1);
if (ret) if (ret)
pr_err(MOD pr_err("%s - qp <- error failed!\n",
"%s - qp <- error failed!\n",
__func__); __func__);
} }
fatal = 1; fatal = 1;
...@@ -4157,8 +4148,8 @@ static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -4157,8 +4148,8 @@ static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct cpl_set_tcb_rpl *rpl = cplhdr(skb); struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
if (rpl->status != CPL_ERR_NONE) { if (rpl->status != CPL_ERR_NONE) {
printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u " pr_err("Unexpected SET_TCB_RPL status %u for tid %u\n",
"for tid %u\n", rpl->status, GET_TID(rpl)); rpl->status, GET_TID(rpl));
} }
kfree_skb(skb); kfree_skb(skb);
return 0; return 0;
...@@ -4186,8 +4177,8 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -4186,8 +4177,8 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
sched(dev, skb); sched(dev, skb);
break; break;
default: default:
printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__, pr_err("%s unexpected fw6 msg type %u\n",
rpl->type); __func__, rpl->type);
kfree_skb(skb); kfree_skb(skb);
break; break;
} }
...@@ -4203,8 +4194,7 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -4203,8 +4194,7 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
ep = get_ep_from_tid(dev, tid); ep = get_ep_from_tid(dev, tid);
/* This EP will be dereferenced in peer_abort() */ /* This EP will be dereferenced in peer_abort() */
if (!ep) { if (!ep) {
printk(KERN_WARNING MOD pr_warn("Abort on non-existent endpoint, tid %d\n", tid);
"Abort on non-existent endpoint, tid %d\n", tid);
kfree_skb(skb); kfree_skb(skb);
return 0; return 0;
} }
......
...@@ -159,7 +159,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, ...@@ -159,7 +159,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
&cq->bar2_qid, &cq->bar2_qid,
user ? &cq->bar2_pa : NULL); user ? &cq->bar2_pa : NULL);
if (user && !cq->bar2_pa) { if (user && !cq->bar2_pa) {
pr_warn(MOD "%s: cqid %u not in BAR2 range.\n", pr_warn("%s: cqid %u not in BAR2 range\n",
pci_name(rdev->lldi.pdev), cq->cqid); pci_name(rdev->lldi.pdev), cq->cqid);
ret = -EINVAL; ret = -EINVAL;
goto err4; goto err4;
...@@ -766,8 +766,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) ...@@ -766,8 +766,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
wc->opcode = IB_WC_SEND; wc->opcode = IB_WC_SEND;
break; break;
default: default:
printk(KERN_ERR MOD "Unexpected opcode %d " pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
"in the CQE received for QPID=0x%0x\n",
CQE_OPCODE(&cqe), CQE_QPID(&cqe)); CQE_OPCODE(&cqe), CQE_QPID(&cqe));
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
...@@ -822,8 +821,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) ...@@ -822,8 +821,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
wc->status = IB_WC_WR_FLUSH_ERR; wc->status = IB_WC_WR_FLUSH_ERR;
break; break;
default: default:
printk(KERN_ERR MOD pr_err("Unexpected cqe_status 0x%x for QPID=0x%0x\n",
"Unexpected cqe_status 0x%x for QPID=0x%0x\n",
CQE_STATUS(&cqe), CQE_QPID(&cqe)); CQE_STATUS(&cqe), CQE_QPID(&cqe));
wc->status = IB_WC_FATAL_ERR; wc->status = IB_WC_FATAL_ERR;
} }
......
...@@ -334,7 +334,7 @@ static int qp_release(struct inode *inode, struct file *file) ...@@ -334,7 +334,7 @@ static int qp_release(struct inode *inode, struct file *file)
{ {
struct c4iw_debugfs_data *qpd = file->private_data; struct c4iw_debugfs_data *qpd = file->private_data;
if (!qpd) { if (!qpd) {
printk(KERN_INFO "%s null qpd?\n", __func__); pr_info("%s null qpd?\n", __func__);
return 0; return 0;
} }
vfree(qpd->buf); vfree(qpd->buf);
...@@ -422,7 +422,7 @@ static int stag_release(struct inode *inode, struct file *file) ...@@ -422,7 +422,7 @@ static int stag_release(struct inode *inode, struct file *file)
{ {
struct c4iw_debugfs_data *stagd = file->private_data; struct c4iw_debugfs_data *stagd = file->private_data;
if (!stagd) { if (!stagd) {
printk(KERN_INFO "%s null stagd?\n", __func__); pr_info("%s null stagd?\n", __func__);
return 0; return 0;
} }
vfree(stagd->buf); vfree(stagd->buf);
...@@ -796,15 +796,14 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) ...@@ -796,15 +796,14 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
* cqid and qpid range must match for now. * cqid and qpid range must match for now.
*/ */
if (rdev->lldi.udb_density != rdev->lldi.ucq_density) { if (rdev->lldi.udb_density != rdev->lldi.ucq_density) {
pr_err(MOD "%s: unsupported udb/ucq densities %u/%u\n", pr_err("%s: unsupported udb/ucq densities %u/%u\n",
pci_name(rdev->lldi.pdev), rdev->lldi.udb_density, pci_name(rdev->lldi.pdev), rdev->lldi.udb_density,
rdev->lldi.ucq_density); rdev->lldi.ucq_density);
return -EINVAL; return -EINVAL;
} }
if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start || if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start ||
rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) { rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) {
pr_err(MOD "%s: unsupported qp and cq id ranges " pr_err("%s: unsupported qp and cq id ranges qp start %u size %u cq start %u size %u\n",
"qp start %u size %u cq start %u size %u\n",
pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start, pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start,
rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size, rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size,
rdev->lldi.vr->cq.size); rdev->lldi.vr->cq.size);
...@@ -843,22 +842,22 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) ...@@ -843,22 +842,22 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD); err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
if (err) { if (err) {
printk(KERN_ERR MOD "error %d initializing resources\n", err); pr_err("error %d initializing resources\n", err);
return err; return err;
} }
err = c4iw_pblpool_create(rdev); err = c4iw_pblpool_create(rdev);
if (err) { if (err) {
printk(KERN_ERR MOD "error %d initializing pbl pool\n", err); pr_err("error %d initializing pbl pool\n", err);
goto destroy_resource; goto destroy_resource;
} }
err = c4iw_rqtpool_create(rdev); err = c4iw_rqtpool_create(rdev);
if (err) { if (err) {
printk(KERN_ERR MOD "error %d initializing rqt pool\n", err); pr_err("error %d initializing rqt pool\n", err);
goto destroy_pblpool; goto destroy_pblpool;
} }
err = c4iw_ocqp_pool_create(rdev); err = c4iw_ocqp_pool_create(rdev);
if (err) { if (err) {
printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err); pr_err("error %d initializing ocqp pool\n", err);
goto destroy_rqtpool; goto destroy_rqtpool;
} }
rdev->status_page = (struct t4_dev_status_page *) rdev->status_page = (struct t4_dev_status_page *)
...@@ -954,17 +953,17 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) ...@@ -954,17 +953,17 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
int ret; int ret;
if (!rdma_supported(infop)) { if (!rdma_supported(infop)) {
printk(KERN_INFO MOD "%s: RDMA not supported on this device.\n", pr_info("%s: RDMA not supported on this device\n",
pci_name(infop->pdev)); pci_name(infop->pdev));
return ERR_PTR(-ENOSYS); return ERR_PTR(-ENOSYS);
} }
if (!ocqp_supported(infop)) if (!ocqp_supported(infop))
pr_info("%s: On-Chip Queues not supported on this device.\n", pr_info("%s: On-Chip Queues not supported on this device\n",
pci_name(infop->pdev)); pci_name(infop->pdev));
devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
if (!devp) { if (!devp) {
printk(KERN_ERR MOD "Cannot allocate ib device\n"); pr_err("Cannot allocate ib device\n");
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
devp->rdev.lldi = *infop; devp->rdev.lldi = *infop;
...@@ -1000,7 +999,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) ...@@ -1000,7 +999,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
devp->rdev.bar2_kva = ioremap_wc(devp->rdev.bar2_pa, devp->rdev.bar2_kva = ioremap_wc(devp->rdev.bar2_pa,
pci_resource_len(devp->rdev.lldi.pdev, 2)); pci_resource_len(devp->rdev.lldi.pdev, 2));
if (!devp->rdev.bar2_kva) { if (!devp->rdev.bar2_kva) {
pr_err(MOD "Unable to ioremap BAR2\n"); pr_err("Unable to ioremap BAR2\n");
ib_dealloc_device(&devp->ibdev); ib_dealloc_device(&devp->ibdev);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
...@@ -1012,7 +1011,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) ...@@ -1012,7 +1011,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
devp->rdev.lldi.vr->ocq.size); devp->rdev.lldi.vr->ocq.size);
if (!devp->rdev.oc_mw_kva) { if (!devp->rdev.oc_mw_kva) {
pr_err(MOD "Unable to ioremap onchip mem\n"); pr_err("Unable to ioremap onchip mem\n");
ib_dealloc_device(&devp->ibdev); ib_dealloc_device(&devp->ibdev);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
...@@ -1025,7 +1024,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) ...@@ -1025,7 +1024,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
ret = c4iw_rdev_open(&devp->rdev); ret = c4iw_rdev_open(&devp->rdev);
if (ret) { if (ret) {
printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret); pr_err("Unable to open CXIO rdev err %d\n", ret);
ib_dealloc_device(&devp->ibdev); ib_dealloc_device(&devp->ibdev);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
...@@ -1138,8 +1137,7 @@ static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl, ...@@ -1138,8 +1137,7 @@ static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl,
goto out; goto out;
if (c4iw_handlers[opcode] == NULL) { if (c4iw_handlers[opcode] == NULL) {
pr_info("%s no handler opcode 0x%x...\n", __func__, pr_info("%s no handler opcode 0x%x...\n", __func__, opcode);
opcode);
kfree_skb(skb); kfree_skb(skb);
goto out; goto out;
} }
...@@ -1176,13 +1174,11 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, ...@@ -1176,13 +1174,11 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
if (recv_rx_pkt(dev, gl, rsp)) if (recv_rx_pkt(dev, gl, rsp))
return 0; return 0;
pr_info("%s: unexpected FL contents at %p, " \ pr_info("%s: unexpected FL contents at %p, RSS %#llx, FL %#llx, len %u\n",
"RSS %#llx, FL %#llx, len %u\n", pci_name(ctx->lldi.pdev), gl->va,
pci_name(ctx->lldi.pdev), gl->va, be64_to_cpu(*rsp),
(unsigned long long)be64_to_cpu(*rsp), be64_to_cpu(*(__force __be64 *)gl->va),
(unsigned long long)be64_to_cpu( gl->tot_len);
*(__force __be64 *)gl->va),
gl->tot_len);
return 0; return 0;
} else { } else {
...@@ -1195,8 +1191,7 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, ...@@ -1195,8 +1191,7 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
if (c4iw_handlers[opcode]) { if (c4iw_handlers[opcode]) {
c4iw_handlers[opcode](dev, skb); c4iw_handlers[opcode](dev, skb);
} else { } else {
pr_info("%s no handler opcode 0x%x...\n", __func__, pr_info("%s no handler opcode 0x%x...\n", __func__, opcode);
opcode);
kfree_skb(skb); kfree_skb(skb);
} }
...@@ -1212,14 +1207,13 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) ...@@ -1212,14 +1207,13 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
PDBG("%s new_state %u\n", __func__, new_state); PDBG("%s new_state %u\n", __func__, new_state);
switch (new_state) { switch (new_state) {
case CXGB4_STATE_UP: case CXGB4_STATE_UP:
printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev)); pr_info("%s: Up\n", pci_name(ctx->lldi.pdev));
if (!ctx->dev) { if (!ctx->dev) {
int ret; int ret;
ctx->dev = c4iw_alloc(&ctx->lldi); ctx->dev = c4iw_alloc(&ctx->lldi);
if (IS_ERR(ctx->dev)) { if (IS_ERR(ctx->dev)) {
printk(KERN_ERR MOD pr_err("%s: initialization failed: %ld\n",
"%s: initialization failed: %ld\n",
pci_name(ctx->lldi.pdev), pci_name(ctx->lldi.pdev),
PTR_ERR(ctx->dev)); PTR_ERR(ctx->dev));
ctx->dev = NULL; ctx->dev = NULL;
...@@ -1227,22 +1221,19 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) ...@@ -1227,22 +1221,19 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
} }
ret = c4iw_register_device(ctx->dev); ret = c4iw_register_device(ctx->dev);
if (ret) { if (ret) {
printk(KERN_ERR MOD pr_err("%s: RDMA registration failed: %d\n",
"%s: RDMA registration failed: %d\n",
pci_name(ctx->lldi.pdev), ret); pci_name(ctx->lldi.pdev), ret);
c4iw_dealloc(ctx); c4iw_dealloc(ctx);
} }
} }
break; break;
case CXGB4_STATE_DOWN: case CXGB4_STATE_DOWN:
printk(KERN_INFO MOD "%s: Down\n", pr_info("%s: Down\n", pci_name(ctx->lldi.pdev));
pci_name(ctx->lldi.pdev));
if (ctx->dev) if (ctx->dev)
c4iw_remove(ctx); c4iw_remove(ctx);
break; break;
case CXGB4_STATE_START_RECOVERY: case CXGB4_STATE_START_RECOVERY:
printk(KERN_INFO MOD "%s: Fatal Error\n", pr_info("%s: Fatal Error\n", pci_name(ctx->lldi.pdev));
pci_name(ctx->lldi.pdev));
if (ctx->dev) { if (ctx->dev) {
struct ib_event event; struct ib_event event;
...@@ -1255,8 +1246,7 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) ...@@ -1255,8 +1246,7 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
} }
break; break;
case CXGB4_STATE_DETACH: case CXGB4_STATE_DETACH:
printk(KERN_INFO MOD "%s: Detach\n", pr_info("%s: Detach\n", pci_name(ctx->lldi.pdev));
pci_name(ctx->lldi.pdev));
if (ctx->dev) if (ctx->dev)
c4iw_remove(ctx); c4iw_remove(ctx);
break; break;
...@@ -1406,9 +1396,7 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list) ...@@ -1406,9 +1396,7 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
t4_sq_host_wq_pidx(&qp->wq), t4_sq_host_wq_pidx(&qp->wq),
t4_sq_wq_size(&qp->wq)); t4_sq_wq_size(&qp->wq));
if (ret) { if (ret) {
pr_err(MOD "%s: Fatal error - " pr_err("%s: Fatal error - DB overflow recovery failed - error syncing SQ qid %u\n",
"DB overflow recovery failed - "
"error syncing SQ qid %u\n",
pci_name(ctx->lldi.pdev), qp->wq.sq.qid); pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
spin_unlock(&qp->lock); spin_unlock(&qp->lock);
spin_unlock_irq(&qp->rhp->lock); spin_unlock_irq(&qp->rhp->lock);
...@@ -1422,9 +1410,7 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list) ...@@ -1422,9 +1410,7 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
t4_rq_wq_size(&qp->wq)); t4_rq_wq_size(&qp->wq));
if (ret) { if (ret) {
pr_err(MOD "%s: Fatal error - " pr_err("%s: Fatal error - DB overflow recovery failed - error syncing RQ qid %u\n",
"DB overflow recovery failed - "
"error syncing RQ qid %u\n",
pci_name(ctx->lldi.pdev), qp->wq.rq.qid); pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
spin_unlock(&qp->lock); spin_unlock(&qp->lock);
spin_unlock_irq(&qp->rhp->lock); spin_unlock_irq(&qp->rhp->lock);
...@@ -1455,7 +1441,7 @@ static void recover_queues(struct uld_ctx *ctx) ...@@ -1455,7 +1441,7 @@ static void recover_queues(struct uld_ctx *ctx)
/* flush the SGE contexts */ /* flush the SGE contexts */
ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]); ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]);
if (ret) { if (ret) {
printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n", pr_err("%s: Fatal error - DB overflow recovery failed\n",
pci_name(ctx->lldi.pdev)); pci_name(ctx->lldi.pdev));
return; return;
} }
...@@ -1513,8 +1499,8 @@ static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...) ...@@ -1513,8 +1499,8 @@ static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
mutex_unlock(&ctx->dev->rdev.stats.lock); mutex_unlock(&ctx->dev->rdev.stats.lock);
break; break;
default: default:
printk(KERN_WARNING MOD "%s: unknown control cmd %u\n", pr_warn("%s: unknown control cmd %u\n",
pci_name(ctx->lldi.pdev), control); pci_name(ctx->lldi.pdev), control);
break; break;
} }
return 0; return 0;
...@@ -1543,8 +1529,7 @@ static int __init c4iw_init_module(void) ...@@ -1543,8 +1529,7 @@ static int __init c4iw_init_module(void)
c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL); c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
if (!c4iw_debugfs_root) if (!c4iw_debugfs_root)
printk(KERN_WARNING MOD pr_warn("could not create debugfs entry, continuing\n");
"could not create debugfs entry, continuing\n");
cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info); cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
......
...@@ -124,8 +124,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) ...@@ -124,8 +124,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
spin_lock_irq(&dev->lock); spin_lock_irq(&dev->lock);
qhp = get_qhp(dev, CQE_QPID(err_cqe)); qhp = get_qhp(dev, CQE_QPID(err_cqe));
if (!qhp) { if (!qhp) {
printk(KERN_ERR MOD "BAD AE qpid 0x%x opcode %d " pr_err("BAD AE qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
"status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
CQE_QPID(err_cqe), CQE_QPID(err_cqe),
CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe), CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe), CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
...@@ -140,8 +139,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) ...@@ -140,8 +139,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
cqid = qhp->attr.rcq; cqid = qhp->attr.rcq;
chp = get_chp(dev, cqid); chp = get_chp(dev, cqid);
if (!chp) { if (!chp) {
printk(KERN_ERR MOD "BAD AE cqid 0x%x qpid 0x%x opcode %d " pr_err("BAD AE cqid 0x%x qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
"status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
cqid, CQE_QPID(err_cqe), cqid, CQE_QPID(err_cqe),
CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe), CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe), CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
...@@ -165,7 +163,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) ...@@ -165,7 +163,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
/* Completion Events */ /* Completion Events */
case T4_ERR_SUCCESS: case T4_ERR_SUCCESS:
printk(KERN_ERR MOD "AE with status 0!\n"); pr_err("AE with status 0!\n");
break; break;
case T4_ERR_STAG: case T4_ERR_STAG:
...@@ -207,7 +205,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) ...@@ -207,7 +205,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
break; break;
default: default:
printk(KERN_ERR MOD "Unknown T4 status 0x%x QPID 0x%x\n", pr_err("Unknown T4 status 0x%x QPID 0x%x\n",
CQE_STATUS(err_cqe), qhp->wq.sq.qid); CQE_STATUS(err_cqe), qhp->wq.sq.qid);
post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL); post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
break; break;
......
...@@ -64,6 +64,12 @@ ...@@ -64,6 +64,12 @@
#define DRV_NAME "iw_cxgb4" #define DRV_NAME "iw_cxgb4"
#define MOD DRV_NAME ":" #define MOD DRV_NAME ":"
#ifdef pr_fmt
#undef pr_fmt
#endif
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
extern int c4iw_debug; extern int c4iw_debug;
#define PDBG(fmt, args...) \ #define PDBG(fmt, args...) \
do { \ do { \
......
...@@ -234,10 +234,8 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, ...@@ -234,10 +234,8 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
if (is_t5(rdev->lldi.adapter_type) && use_dsgl) { if (is_t5(rdev->lldi.adapter_type) && use_dsgl) {
if (len > inline_threshold) { if (len > inline_threshold) {
if (_c4iw_write_mem_dma(rdev, addr, len, data, skb)) { if (_c4iw_write_mem_dma(rdev, addr, len, data, skb)) {
printk_ratelimited(KERN_WARNING pr_warn_ratelimited("%s: dma map failure (non fatal)\n",
"%s: dma map" pci_name(rdev->lldi.pdev));
" failure (non fatal)\n",
pci_name(rdev->lldi.pdev));
return _c4iw_write_mem_inline(rdev, addr, len, return _c4iw_write_mem_inline(rdev, addr, len,
data, skb); data, skb);
} else { } else {
......
...@@ -123,7 +123,6 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev, ...@@ -123,7 +123,6 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
{ {
struct c4iw_ucontext *context; struct c4iw_ucontext *context;
struct c4iw_dev *rhp = to_c4iw_dev(ibdev); struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
static int warned;
struct c4iw_alloc_ucontext_resp uresp; struct c4iw_alloc_ucontext_resp uresp;
int ret = 0; int ret = 0;
struct c4iw_mm_entry *mm = NULL; struct c4iw_mm_entry *mm = NULL;
...@@ -141,8 +140,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev, ...@@ -141,8 +140,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
kref_init(&context->kref); kref_init(&context->kref);
if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) { if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
if (!warned++) pr_err_once("Warning - downlevel libcxgb4 (non-fatal), device status page disabled\n");
pr_err(MOD "Warning - downlevel libcxgb4 (non-fatal), device status page disabled.");
rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED; rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED;
} else { } else {
mm = kmalloc(sizeof(*mm), GFP_KERNEL); mm = kmalloc(sizeof(*mm), GFP_KERNEL);
......
...@@ -275,7 +275,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, ...@@ -275,7 +275,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
* User mode must have bar2 access. * User mode must have bar2 access.
*/ */
if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) { if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) {
pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n", pr_warn("%s: sqid %u or rqid %u not in BAR2 range\n",
pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid); pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
goto free_dma; goto free_dma;
} }
...@@ -1671,8 +1671,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, ...@@ -1671,8 +1671,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
goto err; goto err;
break; break;
default: default:
printk(KERN_ERR "%s in a bad state %d\n", pr_err("%s in a bad state %d\n", __func__, qhp->attr.state);
__func__, qhp->attr.state);
ret = -EINVAL; ret = -EINVAL;
goto err; goto err;
break; break;
......
...@@ -293,10 +293,8 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev) ...@@ -293,10 +293,8 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev)
PDBG("%s failed to add PBL chunk (%x/%x)\n", PDBG("%s failed to add PBL chunk (%x/%x)\n",
__func__, pbl_start, pbl_chunk); __func__, pbl_start, pbl_chunk);
if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) { if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
printk(KERN_WARNING MOD pr_warn("Failed to add all PBL chunks (%x/%x)\n",
"Failed to add all PBL chunks (%x/%x)\n", pbl_start, pbl_top - pbl_start);
pbl_start,
pbl_top - pbl_start);
return 0; return 0;
} }
pbl_chunk >>= 1; pbl_chunk >>= 1;
...@@ -326,7 +324,7 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size) ...@@ -326,7 +324,7 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6); unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6); PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
if (!addr) if (!addr)
pr_warn_ratelimited(MOD "%s: Out of RQT memory\n", pr_warn_ratelimited("%s: Out of RQT memory\n",
pci_name(rdev->lldi.pdev)); pci_name(rdev->lldi.pdev));
mutex_lock(&rdev->stats.lock); mutex_lock(&rdev->stats.lock);
if (addr) { if (addr) {
...@@ -366,9 +364,8 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev) ...@@ -366,9 +364,8 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
PDBG("%s failed to add RQT chunk (%x/%x)\n", PDBG("%s failed to add RQT chunk (%x/%x)\n",
__func__, rqt_start, rqt_chunk); __func__, rqt_start, rqt_chunk);
if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) { if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) {
printk(KERN_WARNING MOD pr_warn("Failed to add all RQT chunks (%x/%x)\n",
"Failed to add all RQT chunks (%x/%x)\n", rqt_start, rqt_top - rqt_start);
rqt_start, rqt_top - rqt_start);
return 0; return 0;
} }
rqt_chunk >>= 1; rqt_chunk >>= 1;
...@@ -432,9 +429,8 @@ int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev) ...@@ -432,9 +429,8 @@ int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
PDBG("%s failed to add OCQP chunk (%x/%x)\n", PDBG("%s failed to add OCQP chunk (%x/%x)\n",
__func__, start, chunk); __func__, start, chunk);
if (chunk <= 1024 << MIN_OCQP_SHIFT) { if (chunk <= 1024 << MIN_OCQP_SHIFT) {
printk(KERN_WARNING MOD pr_warn("Failed to add all OCQP chunks (%x/%x)\n",
"Failed to add all OCQP chunks (%x/%x)\n", start, top - start);
start, top - start);
return 0; return 0;
} }
chunk >>= 1; chunk >>= 1;
......
...@@ -656,7 +656,7 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) ...@@ -656,7 +656,7 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) { if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) {
ret = -EOVERFLOW; ret = -EOVERFLOW;
cq->error = 1; cq->error = 1;
printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid); pr_err("cq overflow cqid %u\n", cq->cqid);
BUG_ON(1); BUG_ON(1);
} else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) { } else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment