Commit ded85032 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma

Pull rdma fixes from Doug Ledford:

 - Smattering of miscellanous fixes

 - A five patch series for i40iw that had a patch (5/5) that was larger
   than I would like, but I took it because it's needed for large scale
   users

 - An 8 patch series for bnxt_re that landed right as I was leaving on
   PTO and so had to wait until now...they are all appropriate fixes for
   -rc IMO

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (22 commits)
  bnxt_re: Don't issue cmd to delete GID for QP1 GID entry before the QP is destroyed
  bnxt_re: Fix memory leak in FRMR path
  bnxt_re: Remove RTNL lock dependency in bnxt_re_query_port
  bnxt_re: Fix race between the netdev register and unregister events
  bnxt_re: Free up devices in module_exit path
  bnxt_re: Fix compare and swap atomic operands
  bnxt_re: Stop issuing further cmds to FW once a cmd times out
  bnxt_re: Fix update of qplib_qp.mtu when modified
  i40iw: Add support for port reuse on active side connections
  i40iw: Add missing VLAN priority
  i40iw: Call i40iw_cm_disconn on modify QP to disconnect
  i40iw: Prevent multiple netdev event notifier registrations
  i40iw: Fail open if there are no available MSI-X vectors
  RDMA/vmw_pvrdma: Fix reporting correct opcodes for completion
  IB/bnxt_re: Fix frame stack compilation warning
  IB/mlx5: fix debugfs cleanup
  IB/ocrdma: fix incorrect fall-through on switch statement
  IB/ipoib: Suppress the retry related completion errors
  iw_cxgb4: remove the stid on listen create failure
  iw_cxgb4: drop listen destroy replies if no ep found
  ...
parents 71aa60f6 89aaca54
...@@ -1646,7 +1646,7 @@ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid) ...@@ -1646,7 +1646,7 @@ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
*/ */
if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) { if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
if (attr.qp_state >= IB_QPS_INIT) { if (attr.qp_state >= IB_QPS_INIT) {
if (qp->device->get_link_layer(qp->device, attr.port_num) != if (rdma_port_get_link_layer(qp->device, attr.port_num) !=
IB_LINK_LAYER_INFINIBAND) IB_LINK_LAYER_INFINIBAND)
return true; return true;
goto lid_check; goto lid_check;
...@@ -1655,7 +1655,7 @@ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid) ...@@ -1655,7 +1655,7 @@ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
/* Can't get a quick answer, iterate over all ports */ /* Can't get a quick answer, iterate over all ports */
for (port = 0; port < qp->device->phys_port_cnt; port++) for (port = 0; port < qp->device->phys_port_cnt; port++)
if (qp->device->get_link_layer(qp->device, port) != if (rdma_port_get_link_layer(qp->device, port) !=
IB_LINK_LAYER_INFINIBAND) IB_LINK_LAYER_INFINIBAND)
num_eth_ports++; num_eth_ports++;
......
...@@ -93,11 +93,13 @@ struct bnxt_re_dev { ...@@ -93,11 +93,13 @@ struct bnxt_re_dev {
struct ib_device ibdev; struct ib_device ibdev;
struct list_head list; struct list_head list;
unsigned long flags; unsigned long flags;
#define BNXT_RE_FLAG_NETDEV_REGISTERED 0 #define BNXT_RE_FLAG_NETDEV_REGISTERED 0
#define BNXT_RE_FLAG_IBDEV_REGISTERED 1 #define BNXT_RE_FLAG_IBDEV_REGISTERED 1
#define BNXT_RE_FLAG_GOT_MSIX 2 #define BNXT_RE_FLAG_GOT_MSIX 2
#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 8 #define BNXT_RE_FLAG_HAVE_L2_REF 3
#define BNXT_RE_FLAG_QOS_WORK_REG 16 #define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4
#define BNXT_RE_FLAG_QOS_WORK_REG 5
#define BNXT_RE_FLAG_TASK_IN_PROG 6
struct net_device *netdev; struct net_device *netdev;
unsigned int version, major, minor; unsigned int version, major, minor;
struct bnxt_en_dev *en_dev; struct bnxt_en_dev *en_dev;
...@@ -108,6 +110,8 @@ struct bnxt_re_dev { ...@@ -108,6 +110,8 @@ struct bnxt_re_dev {
struct delayed_work worker; struct delayed_work worker;
u8 cur_prio_map; u8 cur_prio_map;
u8 active_speed;
u8 active_width;
/* FP Notification Queue (CQ & SRQ) */ /* FP Notification Queue (CQ & SRQ) */
struct tasklet_struct nq_task; struct tasklet_struct nq_task;
......
...@@ -259,14 +259,9 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num, ...@@ -259,14 +259,9 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
port_attr->sm_sl = 0; port_attr->sm_sl = 0;
port_attr->subnet_timeout = 0; port_attr->subnet_timeout = 0;
port_attr->init_type_reply = 0; port_attr->init_type_reply = 0;
/* call the underlying netdev's ethtool hooks to query speed settings port_attr->active_speed = rdev->active_speed;
* for which we acquire rtnl_lock _only_ if it's registered with port_attr->active_width = rdev->active_width;
* IB stack to avoid race in the NETDEV_UNREG path
*/
if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
if (ib_get_eth_speed(ibdev, port_num, &port_attr->active_speed,
&port_attr->active_width))
return -EINVAL;
return 0; return 0;
} }
...@@ -319,6 +314,7 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num, ...@@ -319,6 +314,7 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
struct bnxt_re_gid_ctx *ctx, **ctx_tbl; struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
struct bnxt_qplib_gid *gid_to_del;
/* Delete the entry from the hardware */ /* Delete the entry from the hardware */
ctx = *context; ctx = *context;
...@@ -328,11 +324,25 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num, ...@@ -328,11 +324,25 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
if (sgid_tbl && sgid_tbl->active) { if (sgid_tbl && sgid_tbl->active) {
if (ctx->idx >= sgid_tbl->max) if (ctx->idx >= sgid_tbl->max)
return -EINVAL; return -EINVAL;
gid_to_del = &sgid_tbl->tbl[ctx->idx];
/* DEL_GID is called in WQ context(netdevice_event_work_handler)
* or via the ib_unregister_device path. In the former case QP1
* may not be destroyed yet, in which case just return as FW
* needs that entry to be present and will fail it's deletion.
* We could get invoked again after QP1 is destroyed OR get an
* ADD_GID call with a different GID value for the same index
* where we issue MODIFY_GID cmd to update the GID entry -- TBD
*/
if (ctx->idx == 0 &&
rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
ctx->refcnt == 1 && rdev->qp1_sqp) {
dev_dbg(rdev_to_dev(rdev),
"Trying to delete GID0 while QP1 is alive\n");
return -EFAULT;
}
ctx->refcnt--; ctx->refcnt--;
if (!ctx->refcnt) { if (!ctx->refcnt) {
rc = bnxt_qplib_del_sgid(sgid_tbl, rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
&sgid_tbl->tbl[ctx->idx],
true);
if (rc) { if (rc) {
dev_err(rdev_to_dev(rdev), dev_err(rdev_to_dev(rdev),
"Failed to remove GID: %#x", rc); "Failed to remove GID: %#x", rc);
...@@ -816,6 +826,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp) ...@@ -816,6 +826,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
kfree(rdev->sqp_ah); kfree(rdev->sqp_ah);
kfree(rdev->qp1_sqp); kfree(rdev->qp1_sqp);
rdev->qp1_sqp = NULL;
rdev->sqp_ah = NULL;
} }
if (!IS_ERR_OR_NULL(qp->rumem)) if (!IS_ERR_OR_NULL(qp->rumem))
...@@ -1436,11 +1448,14 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, ...@@ -1436,11 +1448,14 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
qp->qplib_qp.modify_flags |= qp->qplib_qp.modify_flags |=
CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu); qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
} else if (qp_attr->qp_state == IB_QPS_RTR) { } else if (qp_attr->qp_state == IB_QPS_RTR) {
qp->qplib_qp.modify_flags |= qp->qplib_qp.modify_flags |=
CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
qp->qplib_qp.path_mtu = qp->qplib_qp.path_mtu =
__from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu)); __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
qp->qplib_qp.mtu =
ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
} }
if (qp_attr_mask & IB_QP_TIMEOUT) { if (qp_attr_mask & IB_QP_TIMEOUT) {
...@@ -1551,43 +1566,46 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, ...@@ -1551,43 +1566,46 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
{ {
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
struct bnxt_re_dev *rdev = qp->rdev; struct bnxt_re_dev *rdev = qp->rdev;
struct bnxt_qplib_qp qplib_qp; struct bnxt_qplib_qp *qplib_qp;
int rc; int rc;
memset(&qplib_qp, 0, sizeof(struct bnxt_qplib_qp)); qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
qplib_qp.id = qp->qplib_qp.id; if (!qplib_qp)
qplib_qp.ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index; return -ENOMEM;
qplib_qp->id = qp->qplib_qp.id;
qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
rc = bnxt_qplib_query_qp(&rdev->qplib_res, &qplib_qp); rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
if (rc) { if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to query HW QP"); dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
return rc; goto out;
} }
qp_attr->qp_state = __to_ib_qp_state(qplib_qp.state); qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
qp_attr->en_sqd_async_notify = qplib_qp.en_sqd_async_notify ? 1 : 0; qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp.access); qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
qp_attr->pkey_index = qplib_qp.pkey_index; qp_attr->pkey_index = qplib_qp->pkey_index;
qp_attr->qkey = qplib_qp.qkey; qp_attr->qkey = qplib_qp->qkey;
qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp.ah.flow_label, rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
qplib_qp.ah.host_sgid_index, qplib_qp->ah.host_sgid_index,
qplib_qp.ah.hop_limit, qplib_qp->ah.hop_limit,
qplib_qp.ah.traffic_class); qplib_qp->ah.traffic_class);
rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp.ah.dgid.data); rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp.ah.sl); rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp.ah.dmac); ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
qp_attr->path_mtu = __to_ib_mtu(qplib_qp.path_mtu); qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
qp_attr->timeout = qplib_qp.timeout; qp_attr->timeout = qplib_qp->timeout;
qp_attr->retry_cnt = qplib_qp.retry_cnt; qp_attr->retry_cnt = qplib_qp->retry_cnt;
qp_attr->rnr_retry = qplib_qp.rnr_retry; qp_attr->rnr_retry = qplib_qp->rnr_retry;
qp_attr->min_rnr_timer = qplib_qp.min_rnr_timer; qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
qp_attr->rq_psn = qplib_qp.rq.psn; qp_attr->rq_psn = qplib_qp->rq.psn;
qp_attr->max_rd_atomic = qplib_qp.max_rd_atomic; qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
qp_attr->sq_psn = qplib_qp.sq.psn; qp_attr->sq_psn = qplib_qp->sq.psn;
qp_attr->max_dest_rd_atomic = qplib_qp.max_dest_rd_atomic; qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
qp_init_attr->sq_sig_type = qplib_qp.sig_type ? IB_SIGNAL_ALL_WR : qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
IB_SIGNAL_REQ_WR; IB_SIGNAL_REQ_WR;
qp_attr->dest_qp_num = qplib_qp.dest_qpn; qp_attr->dest_qp_num = qplib_qp->dest_qpn;
qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe; qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge; qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
...@@ -1596,7 +1614,9 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, ...@@ -1596,7 +1614,9 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data; qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
qp_init_attr->cap = qp_attr->cap; qp_init_attr->cap = qp_attr->cap;
return 0; out:
kfree(qplib_qp);
return rc;
} }
/* Routine for sending QP1 packets for RoCE V1 an V2 /* Routine for sending QP1 packets for RoCE V1 an V2
...@@ -1908,6 +1928,7 @@ static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr, ...@@ -1908,6 +1928,7 @@ static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
switch (wr->opcode) { switch (wr->opcode) {
case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_CMP_AND_SWP:
wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP; wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
wqe->atomic.swap_data = atomic_wr(wr)->swap; wqe->atomic.swap_data = atomic_wr(wr)->swap;
break; break;
case IB_WR_ATOMIC_FETCH_AND_ADD: case IB_WR_ATOMIC_FETCH_AND_ADD:
...@@ -3062,7 +3083,7 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr) ...@@ -3062,7 +3083,7 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
return rc; return rc;
} }
if (mr->npages && mr->pages) { if (mr->pages) {
rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
&mr->qplib_frpl); &mr->qplib_frpl);
kfree(mr->pages); kfree(mr->pages);
......
...@@ -1161,6 +1161,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) ...@@ -1161,6 +1161,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
} }
} }
set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags); set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
&rdev->active_width);
bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE); bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE);
bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_GID_CHANGE); bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_GID_CHANGE);
...@@ -1255,10 +1257,14 @@ static void bnxt_re_task(struct work_struct *work) ...@@ -1255,10 +1257,14 @@ static void bnxt_re_task(struct work_struct *work)
else if (netif_carrier_ok(rdev->netdev)) else if (netif_carrier_ok(rdev->netdev))
bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
IB_EVENT_PORT_ACTIVE); IB_EVENT_PORT_ACTIVE);
ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
&rdev->active_width);
break; break;
default: default:
break; break;
} }
smp_mb__before_atomic();
clear_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags);
kfree(re_work); kfree(re_work);
} }
...@@ -1317,6 +1323,11 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, ...@@ -1317,6 +1323,11 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
break; break;
case NETDEV_UNREGISTER: case NETDEV_UNREGISTER:
/* netdev notifier will call NETDEV_UNREGISTER again later since
* we are still holding the reference to the netdev
*/
if (test_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags))
goto exit;
bnxt_re_ib_unreg(rdev, false); bnxt_re_ib_unreg(rdev, false);
bnxt_re_remove_one(rdev); bnxt_re_remove_one(rdev);
bnxt_re_dev_unreg(rdev); bnxt_re_dev_unreg(rdev);
...@@ -1335,6 +1346,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, ...@@ -1335,6 +1346,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
re_work->vlan_dev = (real_dev == netdev ? re_work->vlan_dev = (real_dev == netdev ?
NULL : netdev); NULL : netdev);
INIT_WORK(&re_work->work, bnxt_re_task); INIT_WORK(&re_work->work, bnxt_re_task);
set_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags);
queue_work(bnxt_re_wq, &re_work->work); queue_work(bnxt_re_wq, &re_work->work);
} }
} }
...@@ -1375,6 +1387,22 @@ static int __init bnxt_re_mod_init(void) ...@@ -1375,6 +1387,22 @@ static int __init bnxt_re_mod_init(void)
static void __exit bnxt_re_mod_exit(void) static void __exit bnxt_re_mod_exit(void)
{ {
struct bnxt_re_dev *rdev;
LIST_HEAD(to_be_deleted);
mutex_lock(&bnxt_re_dev_lock);
/* Free all adapter allocated resources */
if (!list_empty(&bnxt_re_dev_list))
list_splice_init(&bnxt_re_dev_list, &to_be_deleted);
mutex_unlock(&bnxt_re_dev_lock);
list_for_each_entry(rdev, &to_be_deleted, list) {
dev_info(rdev_to_dev(rdev), "Unregistering Device");
bnxt_re_dev_stop(rdev);
bnxt_re_ib_unreg(rdev, true);
bnxt_re_remove_one(rdev);
bnxt_re_dev_unreg(rdev);
}
unregister_netdevice_notifier(&bnxt_re_netdev_notifier); unregister_netdevice_notifier(&bnxt_re_netdev_notifier);
if (bnxt_re_wq) if (bnxt_re_wq)
destroy_workqueue(bnxt_re_wq); destroy_workqueue(bnxt_re_wq);
......
...@@ -107,6 +107,9 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, ...@@ -107,6 +107,9 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
return -EINVAL; return -EINVAL;
} }
if (test_bit(FIRMWARE_TIMED_OUT, &rcfw->flags))
return -ETIMEDOUT;
/* Cmdq are in 16-byte units, each request can consume 1 or more /* Cmdq are in 16-byte units, each request can consume 1 or more
* cmdqe * cmdqe
*/ */
...@@ -226,6 +229,7 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, ...@@ -226,6 +229,7 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
/* timed out */ /* timed out */
dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec", dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec",
cookie, opcode, RCFW_CMD_WAIT_TIME_MS); cookie, opcode, RCFW_CMD_WAIT_TIME_MS);
set_bit(FIRMWARE_TIMED_OUT, &rcfw->flags);
return rc; return rc;
} }
......
...@@ -162,8 +162,9 @@ struct bnxt_qplib_rcfw { ...@@ -162,8 +162,9 @@ struct bnxt_qplib_rcfw {
unsigned long *cmdq_bitmap; unsigned long *cmdq_bitmap;
u32 bmap_size; u32 bmap_size;
unsigned long flags; unsigned long flags;
#define FIRMWARE_INITIALIZED_FLAG 1 #define FIRMWARE_INITIALIZED_FLAG BIT(0)
#define FIRMWARE_FIRST_FLAG BIT(31) #define FIRMWARE_FIRST_FLAG BIT(31)
#define FIRMWARE_TIMED_OUT BIT(3)
wait_queue_head_t waitq; wait_queue_head_t waitq;
int (*aeq_handler)(struct bnxt_qplib_rcfw *, int (*aeq_handler)(struct bnxt_qplib_rcfw *,
struct creq_func_event *); struct creq_func_event *);
......
...@@ -2333,9 +2333,14 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2333,9 +2333,14 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
unsigned int stid = GET_TID(rpl); unsigned int stid = GET_TID(rpl);
struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid); struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
if (!ep) {
pr_debug("%s stid %d lookup failure!\n", __func__, stid);
goto out;
}
pr_debug("%s ep %p\n", __func__, ep); pr_debug("%s ep %p\n", __func__, ep);
c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
c4iw_put_ep(&ep->com); c4iw_put_ep(&ep->com);
out:
return 0; return 0;
} }
...@@ -2594,9 +2599,9 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -2594,9 +2599,9 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
c4iw_put_ep(&child_ep->com); c4iw_put_ep(&child_ep->com);
reject: reject:
reject_cr(dev, hwtid, skb); reject_cr(dev, hwtid, skb);
out:
if (parent_ep) if (parent_ep)
c4iw_put_ep(&parent_ep->com); c4iw_put_ep(&parent_ep->com);
out:
return 0; return 0;
} }
...@@ -3457,7 +3462,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) ...@@ -3457,7 +3462,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_id->provider_data = ep; cm_id->provider_data = ep;
goto out; goto out;
} }
remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
ep->com.local_addr.ss_family); ep->com.local_addr.ss_family);
fail2: fail2:
......
...@@ -201,7 +201,6 @@ enum init_completion_state { ...@@ -201,7 +201,6 @@ enum init_completion_state {
CEQ_CREATED, CEQ_CREATED,
ILQ_CREATED, ILQ_CREATED,
IEQ_CREATED, IEQ_CREATED,
INET_NOTIFIER,
IP_ADDR_REGISTERED, IP_ADDR_REGISTERED,
RDMA_DEV_REGISTERED RDMA_DEV_REGISTERED
}; };
......
...@@ -1504,23 +1504,40 @@ static void i40iw_add_hte_node(struct i40iw_cm_core *cm_core, ...@@ -1504,23 +1504,40 @@ static void i40iw_add_hte_node(struct i40iw_cm_core *cm_core,
} }
/** /**
* listen_port_in_use - determine if port is in use * i40iw_port_in_use - determine if port is in use
* @port: Listen port number * @port: port number
* @active_side: flag for listener side vs active side
*/ */
static bool i40iw_listen_port_in_use(struct i40iw_cm_core *cm_core, u16 port) static bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port, bool active_side)
{ {
struct i40iw_cm_listener *listen_node; struct i40iw_cm_listener *listen_node;
struct i40iw_cm_node *cm_node;
unsigned long flags; unsigned long flags;
bool ret = false; bool ret = false;
spin_lock_irqsave(&cm_core->listen_list_lock, flags); if (active_side) {
list_for_each_entry(listen_node, &cm_core->listen_nodes, list) { /* search connected node list */
if (listen_node->loc_port == port) { spin_lock_irqsave(&cm_core->ht_lock, flags);
ret = true; list_for_each_entry(cm_node, &cm_core->connected_nodes, list) {
break; if (cm_node->loc_port == port) {
ret = true;
break;
}
}
if (!ret)
clear_bit(port, cm_core->active_side_ports);
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
} else {
spin_lock_irqsave(&cm_core->listen_list_lock, flags);
list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
if (listen_node->loc_port == port) {
ret = true;
break;
}
} }
spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
} }
spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
return ret; return ret;
} }
...@@ -1868,7 +1885,7 @@ static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core, ...@@ -1868,7 +1885,7 @@ static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core,
spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
if (listener->iwdev) { if (listener->iwdev) {
if (apbvt_del && !i40iw_listen_port_in_use(cm_core, listener->loc_port)) if (apbvt_del && !i40iw_port_in_use(cm_core, listener->loc_port, false))
i40iw_manage_apbvt(listener->iwdev, i40iw_manage_apbvt(listener->iwdev,
listener->loc_port, listener->loc_port,
I40IW_MANAGE_APBVT_DEL); I40IW_MANAGE_APBVT_DEL);
...@@ -2247,21 +2264,21 @@ static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node) ...@@ -2247,21 +2264,21 @@ static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
if (cm_node->listener) { if (cm_node->listener) {
i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true); i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true);
} else { } else {
if (!i40iw_listen_port_in_use(cm_core, cm_node->loc_port) && if (!i40iw_port_in_use(cm_core, cm_node->loc_port, true) && cm_node->apbvt_set) {
cm_node->apbvt_set) {
i40iw_manage_apbvt(cm_node->iwdev, i40iw_manage_apbvt(cm_node->iwdev,
cm_node->loc_port, cm_node->loc_port,
I40IW_MANAGE_APBVT_DEL); I40IW_MANAGE_APBVT_DEL);
i40iw_get_addr_info(cm_node, &nfo); cm_node->apbvt_set = 0;
if (cm_node->qhash_set) { }
i40iw_manage_qhash(cm_node->iwdev, i40iw_get_addr_info(cm_node, &nfo);
&nfo, if (cm_node->qhash_set) {
I40IW_QHASH_TYPE_TCP_ESTABLISHED, i40iw_manage_qhash(cm_node->iwdev,
I40IW_QHASH_MANAGE_TYPE_DELETE, &nfo,
NULL, I40IW_QHASH_TYPE_TCP_ESTABLISHED,
false); I40IW_QHASH_MANAGE_TYPE_DELETE,
cm_node->qhash_set = 0; NULL,
} false);
cm_node->qhash_set = 0;
} }
} }
...@@ -3255,7 +3272,8 @@ static void i40iw_init_tcp_ctx(struct i40iw_cm_node *cm_node, ...@@ -3255,7 +3272,8 @@ static void i40iw_init_tcp_ctx(struct i40iw_cm_node *cm_node,
tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss)); tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss));
if (cm_node->vlan_id < VLAN_TAG_PRESENT) { if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
tcp_info->insert_vlan_tag = true; tcp_info->insert_vlan_tag = true;
tcp_info->vlan_tag = cpu_to_le16(cm_node->vlan_id); tcp_info->vlan_tag = cpu_to_le16(((u16)cm_node->user_pri << I40IW_VLAN_PRIO_SHIFT) |
cm_node->vlan_id);
} }
if (cm_node->ipv4) { if (cm_node->ipv4) {
tcp_info->src_port = cpu_to_le16(cm_node->loc_port); tcp_info->src_port = cpu_to_le16(cm_node->loc_port);
...@@ -3737,10 +3755,8 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ...@@ -3737,10 +3755,8 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct sockaddr_in *raddr; struct sockaddr_in *raddr;
struct sockaddr_in6 *laddr6; struct sockaddr_in6 *laddr6;
struct sockaddr_in6 *raddr6; struct sockaddr_in6 *raddr6;
bool qhash_set = false; int ret = 0;
int apbvt_set = 0; unsigned long flags;
int err = 0;
enum i40iw_status_code status;
ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn); ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
if (!ibqp) if (!ibqp)
...@@ -3789,32 +3805,6 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ...@@ -3789,32 +3805,6 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_info.user_pri = rt_tos2priority(cm_id->tos); cm_info.user_pri = rt_tos2priority(cm_id->tos);
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n", i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n",
__func__, cm_id->tos, cm_info.user_pri); __func__, cm_id->tos, cm_info.user_pri);
if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) ||
(!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32,
raddr6->sin6_addr.in6_u.u6_addr32,
sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) {
status = i40iw_manage_qhash(iwdev,
&cm_info,
I40IW_QHASH_TYPE_TCP_ESTABLISHED,
I40IW_QHASH_MANAGE_TYPE_ADD,
NULL,
true);
if (status)
return -EINVAL;
qhash_set = true;
}
status = i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD);
if (status) {
i40iw_manage_qhash(iwdev,
&cm_info,
I40IW_QHASH_TYPE_TCP_ESTABLISHED,
I40IW_QHASH_MANAGE_TYPE_DELETE,
NULL,
false);
return -EINVAL;
}
apbvt_set = 1;
cm_id->add_ref(cm_id); cm_id->add_ref(cm_id);
cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev, cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev,
conn_param->private_data_len, conn_param->private_data_len,
...@@ -3822,17 +3812,40 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ...@@ -3822,17 +3812,40 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
&cm_info); &cm_info);
if (IS_ERR(cm_node)) { if (IS_ERR(cm_node)) {
err = PTR_ERR(cm_node); ret = PTR_ERR(cm_node);
goto err_out; cm_id->rem_ref(cm_id);
return ret;
}
if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) ||
(!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32,
raddr6->sin6_addr.in6_u.u6_addr32,
sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) {
if (i40iw_manage_qhash(iwdev, &cm_info, I40IW_QHASH_TYPE_TCP_ESTABLISHED,
I40IW_QHASH_MANAGE_TYPE_ADD, NULL, true)) {
ret = -EINVAL;
goto err;
}
cm_node->qhash_set = true;
} }
spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
if (!test_and_set_bit(cm_info.loc_port, iwdev->cm_core.active_side_ports)) {
spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
if (i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD)) {
ret = -EINVAL;
goto err;
}
} else {
spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
}
cm_node->apbvt_set = true;
i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord); i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord);
if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO && if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO &&
!cm_node->ord_size) !cm_node->ord_size)
cm_node->ord_size = 1; cm_node->ord_size = 1;
cm_node->apbvt_set = apbvt_set;
cm_node->qhash_set = qhash_set;
iwqp->cm_node = cm_node; iwqp->cm_node = cm_node;
cm_node->iwqp = iwqp; cm_node->iwqp = iwqp;
iwqp->cm_id = cm_id; iwqp->cm_id = cm_id;
...@@ -3840,11 +3853,9 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ...@@ -3840,11 +3853,9 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (cm_node->state != I40IW_CM_STATE_OFFLOADED) { if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
cm_node->state = I40IW_CM_STATE_SYN_SENT; cm_node->state = I40IW_CM_STATE_SYN_SENT;
err = i40iw_send_syn(cm_node, 0); ret = i40iw_send_syn(cm_node, 0);
if (err) { if (ret)
i40iw_rem_ref_cm_node(cm_node); goto err;
goto err_out;
}
} }
i40iw_debug(cm_node->dev, i40iw_debug(cm_node->dev,
...@@ -3853,9 +3864,10 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ...@@ -3853,9 +3864,10 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_node->rem_port, cm_node->rem_port,
cm_node, cm_node,
cm_node->cm_id); cm_node->cm_id);
return 0; return 0;
err_out: err:
if (cm_info.ipv4) if (cm_info.ipv4)
i40iw_debug(&iwdev->sc_dev, i40iw_debug(&iwdev->sc_dev,
I40IW_DEBUG_CM, I40IW_DEBUG_CM,
...@@ -3867,22 +3879,10 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ...@@ -3867,22 +3879,10 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
"Api - connect() FAILED: dest addr=%pI6", "Api - connect() FAILED: dest addr=%pI6",
cm_info.rem_addr); cm_info.rem_addr);
if (qhash_set) i40iw_rem_ref_cm_node(cm_node);
i40iw_manage_qhash(iwdev,
&cm_info,
I40IW_QHASH_TYPE_TCP_ESTABLISHED,
I40IW_QHASH_MANAGE_TYPE_DELETE,
NULL,
false);
if (apbvt_set && !i40iw_listen_port_in_use(&iwdev->cm_core,
cm_info.loc_port))
i40iw_manage_apbvt(iwdev,
cm_info.loc_port,
I40IW_MANAGE_APBVT_DEL);
cm_id->rem_ref(cm_id); cm_id->rem_ref(cm_id);
iwdev->cm_core.stats_connect_errs++; iwdev->cm_core.stats_connect_errs++;
return err; return ret;
} }
/** /**
......
...@@ -71,6 +71,9 @@ ...@@ -71,6 +71,9 @@
#define I40IW_HW_IRD_SETTING_32 32 #define I40IW_HW_IRD_SETTING_32 32
#define I40IW_HW_IRD_SETTING_64 64 #define I40IW_HW_IRD_SETTING_64 64
#define MAX_PORTS 65536
#define I40IW_VLAN_PRIO_SHIFT 13
enum ietf_mpa_flags { enum ietf_mpa_flags {
IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */ IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */
IETF_MPA_FLAGS_CRC = 0x40, /* receive Markers */ IETF_MPA_FLAGS_CRC = 0x40, /* receive Markers */
...@@ -411,6 +414,8 @@ struct i40iw_cm_core { ...@@ -411,6 +414,8 @@ struct i40iw_cm_core {
spinlock_t ht_lock; /* manage hash table */ spinlock_t ht_lock; /* manage hash table */
spinlock_t listen_list_lock; /* listen list */ spinlock_t listen_list_lock; /* listen list */
unsigned long active_side_ports[BITS_TO_LONGS(MAX_PORTS)];
u64 stats_nodes_created; u64 stats_nodes_created;
u64 stats_nodes_destroyed; u64 stats_nodes_destroyed;
u64 stats_listen_created; u64 stats_listen_created;
......
...@@ -99,8 +99,6 @@ static struct notifier_block i40iw_net_notifier = { ...@@ -99,8 +99,6 @@ static struct notifier_block i40iw_net_notifier = {
.notifier_call = i40iw_net_event .notifier_call = i40iw_net_event
}; };
static atomic_t i40iw_notifiers_registered;
/** /**
* i40iw_find_i40e_handler - find a handler given a client info * i40iw_find_i40e_handler - find a handler given a client info
* @ldev: pointer to a client info * @ldev: pointer to a client info
...@@ -1376,11 +1374,20 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev, ...@@ -1376,11 +1374,20 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
*/ */
static void i40iw_register_notifiers(void) static void i40iw_register_notifiers(void)
{ {
if (atomic_inc_return(&i40iw_notifiers_registered) == 1) { register_inetaddr_notifier(&i40iw_inetaddr_notifier);
register_inetaddr_notifier(&i40iw_inetaddr_notifier); register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
register_inet6addr_notifier(&i40iw_inetaddr6_notifier); register_netevent_notifier(&i40iw_net_notifier);
register_netevent_notifier(&i40iw_net_notifier); }
}
/**
* i40iw_unregister_notifiers - unregister tcp ip notifiers
*/
static void i40iw_unregister_notifiers(void)
{
unregister_netevent_notifier(&i40iw_net_notifier);
unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
} }
/** /**
...@@ -1400,6 +1407,11 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev, ...@@ -1400,6 +1407,11 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
u32 i; u32 i;
u32 size; u32 size;
if (!ldev->msix_count) {
i40iw_pr_err("No MSI-X vectors\n");
return I40IW_ERR_CONFIG;
}
iwdev->msix_count = ldev->msix_count; iwdev->msix_count = ldev->msix_count;
size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count; size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count;
...@@ -1462,12 +1474,6 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev) ...@@ -1462,12 +1474,6 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev)
if (!iwdev->reset) if (!iwdev->reset)
i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
/* fallthrough */ /* fallthrough */
case INET_NOTIFIER:
if (!atomic_dec_return(&i40iw_notifiers_registered)) {
unregister_netevent_notifier(&i40iw_net_notifier);
unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
}
/* fallthrough */ /* fallthrough */
case PBLE_CHUNK_MEM: case PBLE_CHUNK_MEM:
i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc); i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
...@@ -1550,7 +1556,7 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl, ...@@ -1550,7 +1556,7 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
status = i40iw_save_msix_info(iwdev, ldev); status = i40iw_save_msix_info(iwdev, ldev);
if (status) if (status)
goto exit; return status;
iwdev->hw.dev_context = (void *)ldev->pcidev; iwdev->hw.dev_context = (void *)ldev->pcidev;
iwdev->hw.hw_addr = ldev->hw_addr; iwdev->hw.hw_addr = ldev->hw_addr;
status = i40iw_allocate_dma_mem(&iwdev->hw, status = i40iw_allocate_dma_mem(&iwdev->hw,
...@@ -1667,8 +1673,6 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client) ...@@ -1667,8 +1673,6 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
break; break;
iwdev->init_state = PBLE_CHUNK_MEM; iwdev->init_state = PBLE_CHUNK_MEM;
iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM); iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM);
i40iw_register_notifiers();
iwdev->init_state = INET_NOTIFIER;
status = i40iw_add_mac_ip(iwdev); status = i40iw_add_mac_ip(iwdev);
if (status) if (status)
break; break;
...@@ -2018,6 +2022,8 @@ static int __init i40iw_init_module(void) ...@@ -2018,6 +2022,8 @@ static int __init i40iw_init_module(void)
i40iw_client.type = I40E_CLIENT_IWARP; i40iw_client.type = I40E_CLIENT_IWARP;
spin_lock_init(&i40iw_handler_lock); spin_lock_init(&i40iw_handler_lock);
ret = i40e_register_client(&i40iw_client); ret = i40e_register_client(&i40iw_client);
i40iw_register_notifiers();
return ret; return ret;
} }
...@@ -2029,6 +2035,7 @@ static int __init i40iw_init_module(void) ...@@ -2029,6 +2035,7 @@ static int __init i40iw_init_module(void)
*/ */
static void __exit i40iw_exit_module(void) static void __exit i40iw_exit_module(void)
{ {
i40iw_unregister_notifiers();
i40e_unregister_client(&i40iw_client); i40e_unregister_client(&i40iw_client);
} }
......
...@@ -160,7 +160,7 @@ int i40iw_inetaddr_event(struct notifier_block *notifier, ...@@ -160,7 +160,7 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
return NOTIFY_DONE; return NOTIFY_DONE;
iwdev = &hdl->device; iwdev = &hdl->device;
if (iwdev->init_state < INET_NOTIFIER) if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
return NOTIFY_DONE; return NOTIFY_DONE;
netdev = iwdev->ldev->netdev; netdev = iwdev->ldev->netdev;
...@@ -217,7 +217,7 @@ int i40iw_inet6addr_event(struct notifier_block *notifier, ...@@ -217,7 +217,7 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
return NOTIFY_DONE; return NOTIFY_DONE;
iwdev = &hdl->device; iwdev = &hdl->device;
if (iwdev->init_state < INET_NOTIFIER) if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
return NOTIFY_DONE; return NOTIFY_DONE;
netdev = iwdev->ldev->netdev; netdev = iwdev->ldev->netdev;
...@@ -266,7 +266,7 @@ int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void * ...@@ -266,7 +266,7 @@ int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *
if (!iwhdl) if (!iwhdl)
return NOTIFY_DONE; return NOTIFY_DONE;
iwdev = &iwhdl->device; iwdev = &iwhdl->device;
if (iwdev->init_state < INET_NOTIFIER) if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
return NOTIFY_DONE; return NOTIFY_DONE;
p = (__be32 *)neigh->primary_key; p = (__be32 *)neigh->primary_key;
i40iw_copy_ip_ntohl(local_ipaddr, p); i40iw_copy_ip_ntohl(local_ipaddr, p);
......
...@@ -1027,7 +1027,19 @@ int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -1027,7 +1027,19 @@ int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED; iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
iwqp->last_aeq = I40IW_AE_RESET_SENT; iwqp->last_aeq = I40IW_AE_RESET_SENT;
spin_unlock_irqrestore(&iwqp->lock, flags); spin_unlock_irqrestore(&iwqp->lock, flags);
i40iw_cm_disconn(iwqp);
} }
} else {
spin_lock_irqsave(&iwqp->lock, flags);
if (iwqp->cm_id) {
if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
iwqp->cm_id->add_ref(iwqp->cm_id);
i40iw_schedule_cm_timer(iwqp->cm_node,
(struct i40iw_puda_buf *)iwqp,
I40IW_TIMER_TYPE_CLOSE, 1, 0);
}
}
spin_unlock_irqrestore(&iwqp->lock, flags);
} }
} }
return 0; return 0;
......
...@@ -3837,11 +3837,13 @@ static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev) ...@@ -3837,11 +3837,13 @@ static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
if (!dbg) if (!dbg)
return -ENOMEM; return -ENOMEM;
dev->delay_drop.dbg = dbg;
dbg->dir_debugfs = dbg->dir_debugfs =
debugfs_create_dir("delay_drop", debugfs_create_dir("delay_drop",
dev->mdev->priv.dbg_root); dev->mdev->priv.dbg_root);
if (!dbg->dir_debugfs) if (!dbg->dir_debugfs)
return -ENOMEM; goto out_debugfs;
dbg->events_cnt_debugfs = dbg->events_cnt_debugfs =
debugfs_create_atomic_t("num_timeout_events", 0400, debugfs_create_atomic_t("num_timeout_events", 0400,
...@@ -3865,8 +3867,6 @@ static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev) ...@@ -3865,8 +3867,6 @@ static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
if (!dbg->timeout_debugfs) if (!dbg->timeout_debugfs)
goto out_debugfs; goto out_debugfs;
dev->delay_drop.dbg = dbg;
return 0; return 0;
out_debugfs: out_debugfs:
......
...@@ -252,7 +252,10 @@ static int ocrdma_get_mbx_errno(u32 status) ...@@ -252,7 +252,10 @@ static int ocrdma_get_mbx_errno(u32 status)
case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES: case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES:
err_num = -EAGAIN; err_num = -EAGAIN;
break; break;
default:
err_num = -EFAULT;
} }
break;
default: default:
err_num = -EFAULT; err_num = -EFAULT;
} }
......
...@@ -416,9 +416,34 @@ static inline enum ib_wc_status pvrdma_wc_status_to_ib( ...@@ -416,9 +416,34 @@ static inline enum ib_wc_status pvrdma_wc_status_to_ib(
return (enum ib_wc_status)status; return (enum ib_wc_status)status;
} }
static inline int pvrdma_wc_opcode_to_ib(int opcode) static inline int pvrdma_wc_opcode_to_ib(unsigned int opcode)
{ {
return opcode; switch (opcode) {
case PVRDMA_WC_SEND:
return IB_WC_SEND;
case PVRDMA_WC_RDMA_WRITE:
return IB_WC_RDMA_WRITE;
case PVRDMA_WC_RDMA_READ:
return IB_WC_RDMA_READ;
case PVRDMA_WC_COMP_SWAP:
return IB_WC_COMP_SWAP;
case PVRDMA_WC_FETCH_ADD:
return IB_WC_FETCH_ADD;
case PVRDMA_WC_LOCAL_INV:
return IB_WC_LOCAL_INV;
case PVRDMA_WC_FAST_REG_MR:
return IB_WC_REG_MR;
case PVRDMA_WC_MASKED_COMP_SWAP:
return IB_WC_MASKED_COMP_SWAP;
case PVRDMA_WC_MASKED_FETCH_ADD:
return IB_WC_MASKED_FETCH_ADD;
case PVRDMA_WC_RECV:
return IB_WC_RECV;
case PVRDMA_WC_RECV_RDMA_WITH_IMM:
return IB_WC_RECV_RDMA_WITH_IMM;
default:
return IB_WC_SEND;
}
} }
static inline int pvrdma_wc_flags_to_ib(int flags) static inline int pvrdma_wc_flags_to_ib(int flags)
......
...@@ -823,12 +823,18 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -823,12 +823,18 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
wc->status != IB_WC_WR_FLUSH_ERR) { wc->status != IB_WC_WR_FLUSH_ERR) {
struct ipoib_neigh *neigh; struct ipoib_neigh *neigh;
if (wc->status != IB_WC_RNR_RETRY_EXC_ERR) /* IB_WC[_RNR]_RETRY_EXC_ERR error is part of the life cycle,
ipoib_warn(priv, "failed cm send event (status=%d, wrid=%d vend_err %x)\n", * so don't make waves.
wc->status, wr_id, wc->vendor_err); */
if (wc->status == IB_WC_RNR_RETRY_EXC_ERR ||
wc->status == IB_WC_RETRY_EXC_ERR)
ipoib_dbg(priv,
"%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n",
__func__, wc->status, wr_id, wc->vendor_err);
else else
ipoib_dbg(priv, "failed cm send event (status=%d, wrid=%d vend_err %x)\n", ipoib_warn(priv,
wc->status, wr_id, wc->vendor_err); "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n",
__func__, wc->status, wr_id, wc->vendor_err);
spin_lock_irqsave(&priv->lock, flags); spin_lock_irqsave(&priv->lock, flags);
neigh = tx->neigh; neigh = tx->neigh;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment