Commit b9c0f4bd authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "The good news is people are testing rc1 in the RDMA world - the bad
  news is testing of the for-next area is not as good as I had hoped, as
  we really should have caught at least the rdma_connect_locked() issue
  before now.

  Notable merge window regressions that didn't get caught/fixed in time
  for rc1:

   - Fix in kernel users of rxe, they were broken by the rapid fix to
     undo the uABI breakage in rxe from another patch

   - EFA userspace needs to read the GID table but was broken with the
     new GID table logic

   - Fix user triggerable deadlock in mlx5 using devlink reload

   - Fix deadlock in several ULPs using rdma_connect from the CM handler
     callbacks

   - Memory leak in qedr"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/qedr: Fix memory leak in iWARP CM
  RDMA: Add rdma_connect_locked()
  RDMA/uverbs: Fix false error in query gid IOCTL
  RDMA/mlx5: Fix devlink deadlock on net namespace deletion
  RDMA/rxe: Fix small problem in network_type patch
parents 598a5976 a2267f8a
...@@ -405,10 +405,10 @@ static int cma_comp_exch(struct rdma_id_private *id_priv, ...@@ -405,10 +405,10 @@ static int cma_comp_exch(struct rdma_id_private *id_priv,
/* /*
* The FSM uses a funny double locking where state is protected by both * The FSM uses a funny double locking where state is protected by both
* the handler_mutex and the spinlock. State is not allowed to change * the handler_mutex and the spinlock. State is not allowed to change
* away from a handler_mutex protected value without also holding * to/from a handler_mutex protected value without also holding
* handler_mutex. * handler_mutex.
*/ */
if (comp == RDMA_CM_CONNECT) if (comp == RDMA_CM_CONNECT || exch == RDMA_CM_CONNECT)
lockdep_assert_held(&id_priv->handler_mutex); lockdep_assert_held(&id_priv->handler_mutex);
spin_lock_irqsave(&id_priv->lock, flags); spin_lock_irqsave(&id_priv->lock, flags);
...@@ -4038,17 +4038,23 @@ static int cma_connect_iw(struct rdma_id_private *id_priv, ...@@ -4038,17 +4038,23 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
return ret; return ret;
} }
int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) /**
* rdma_connect_locked - Initiate an active connection request.
* @id: Connection identifier to connect.
* @conn_param: Connection information used for connected QPs.
*
* Same as rdma_connect() but can only be called from the
* RDMA_CM_EVENT_ROUTE_RESOLVED handler callback.
*/
int rdma_connect_locked(struct rdma_cm_id *id,
struct rdma_conn_param *conn_param)
{ {
struct rdma_id_private *id_priv = struct rdma_id_private *id_priv =
container_of(id, struct rdma_id_private, id); container_of(id, struct rdma_id_private, id);
int ret; int ret;
mutex_lock(&id_priv->handler_mutex); if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT))
if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) { return -EINVAL;
ret = -EINVAL;
goto err_unlock;
}
if (!id->qp) { if (!id->qp) {
id_priv->qp_num = conn_param->qp_num; id_priv->qp_num = conn_param->qp_num;
...@@ -4066,11 +4072,33 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) ...@@ -4066,11 +4072,33 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
ret = -ENOSYS; ret = -ENOSYS;
if (ret) if (ret)
goto err_state; goto err_state;
mutex_unlock(&id_priv->handler_mutex);
return 0; return 0;
err_state: err_state:
cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED); cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
err_unlock: return ret;
}
EXPORT_SYMBOL(rdma_connect_locked);
/**
* rdma_connect - Initiate an active connection request.
* @id: Connection identifier to connect.
* @conn_param: Connection information used for connected QPs.
*
* Users must have resolved a route for the rdma_cm_id to connect with by having
* called rdma_resolve_route before calling this routine.
*
* This call will either connect to a remote QP or obtain remote QP information
* for unconnected rdma_cm_id's. The actual operation is based on the
* rdma_cm_id's port space.
*/
int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
{
struct rdma_id_private *id_priv =
container_of(id, struct rdma_id_private, id);
int ret;
mutex_lock(&id_priv->handler_mutex);
ret = rdma_connect_locked(id, conn_param);
mutex_unlock(&id_priv->handler_mutex); mutex_unlock(&id_priv->handler_mutex);
return ret; return ret;
} }
......
...@@ -401,9 +401,6 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_ENTRY)( ...@@ -401,9 +401,6 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_ENTRY)(
if (!rdma_is_port_valid(ib_dev, port_num)) if (!rdma_is_port_valid(ib_dev, port_num))
return -EINVAL; return -EINVAL;
if (!rdma_ib_or_roce(ib_dev, port_num))
return -EOPNOTSUPP;
gid_attr = rdma_get_gid_attr(ib_dev, port_num, gid_index); gid_attr = rdma_get_gid_attr(ib_dev, port_num, gid_index);
if (IS_ERR(gid_attr)) if (IS_ERR(gid_attr))
return PTR_ERR(gid_attr); return PTR_ERR(gid_attr);
......
...@@ -3305,7 +3305,8 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num) ...@@ -3305,7 +3305,8 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
int err; int err;
dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event; dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
err = register_netdevice_notifier(&dev->port[port_num].roce.nb); err = register_netdevice_notifier_net(mlx5_core_net(dev->mdev),
&dev->port[port_num].roce.nb);
if (err) { if (err) {
dev->port[port_num].roce.nb.notifier_call = NULL; dev->port[port_num].roce.nb.notifier_call = NULL;
return err; return err;
...@@ -3317,7 +3318,8 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num) ...@@ -3317,7 +3318,8 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num) static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
{ {
if (dev->port[port_num].roce.nb.notifier_call) { if (dev->port[port_num].roce.nb.notifier_call) {
unregister_netdevice_notifier(&dev->port[port_num].roce.nb); unregister_netdevice_notifier_net(mlx5_core_net(dev->mdev),
&dev->port[port_num].roce.nb);
dev->port[port_num].roce.nb.notifier_call = NULL; dev->port[port_num].roce.nb.notifier_call = NULL;
} }
} }
......
...@@ -727,6 +727,7 @@ int qedr_iw_destroy_listen(struct iw_cm_id *cm_id) ...@@ -727,6 +727,7 @@ int qedr_iw_destroy_listen(struct iw_cm_id *cm_id)
listener->qed_handle); listener->qed_handle);
cm_id->rem_ref(cm_id); cm_id->rem_ref(cm_id);
kfree(listener);
return rc; return rc;
} }
......
...@@ -16,15 +16,24 @@ void rxe_init_av(struct rdma_ah_attr *attr, struct rxe_av *av) ...@@ -16,15 +16,24 @@ void rxe_init_av(struct rdma_ah_attr *attr, struct rxe_av *av)
int rxe_av_chk_attr(struct rxe_dev *rxe, struct rdma_ah_attr *attr) int rxe_av_chk_attr(struct rxe_dev *rxe, struct rdma_ah_attr *attr)
{ {
const struct ib_global_route *grh = rdma_ah_read_grh(attr);
struct rxe_port *port; struct rxe_port *port;
int type;
port = &rxe->port; port = &rxe->port;
if (rdma_ah_get_ah_flags(attr) & IB_AH_GRH) { if (rdma_ah_get_ah_flags(attr) & IB_AH_GRH) {
u8 sgid_index = rdma_ah_read_grh(attr)->sgid_index; if (grh->sgid_index > port->attr.gid_tbl_len) {
pr_warn("invalid sgid index = %d\n",
grh->sgid_index);
return -EINVAL;
}
if (sgid_index > port->attr.gid_tbl_len) { type = rdma_gid_attr_network_type(grh->sgid_attr);
pr_warn("invalid sgid index = %d\n", sgid_index); if (type < RDMA_NETWORK_IPV4 ||
type > RDMA_NETWORK_IPV6) {
pr_warn("invalid network type for rdma_rxe = %d\n",
type);
return -EINVAL; return -EINVAL;
} }
} }
...@@ -65,11 +74,29 @@ void rxe_av_to_attr(struct rxe_av *av, struct rdma_ah_attr *attr) ...@@ -65,11 +74,29 @@ void rxe_av_to_attr(struct rxe_av *av, struct rdma_ah_attr *attr)
void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr) void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr)
{ {
const struct ib_gid_attr *sgid_attr = attr->grh.sgid_attr; const struct ib_gid_attr *sgid_attr = attr->grh.sgid_attr;
int ibtype;
int type;
rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid); rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid);
rdma_gid2ip((struct sockaddr *)&av->dgid_addr, rdma_gid2ip((struct sockaddr *)&av->dgid_addr,
&rdma_ah_read_grh(attr)->dgid); &rdma_ah_read_grh(attr)->dgid);
av->network_type = rdma_gid_attr_network_type(sgid_attr);
ibtype = rdma_gid_attr_network_type(sgid_attr);
switch (ibtype) {
case RDMA_NETWORK_IPV4:
type = RXE_NETWORK_TYPE_IPV4;
break;
case RDMA_NETWORK_IPV6:
type = RXE_NETWORK_TYPE_IPV4;
break;
default:
/* not reached - checked in rxe_av_chk_attr */
type = 0;
break;
}
av->network_type = type;
} }
struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt) struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt)
......
...@@ -442,7 +442,7 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, ...@@ -442,7 +442,7 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
if (IS_ERR(attr)) if (IS_ERR(attr))
return NULL; return NULL;
if (av->network_type == RXE_NETWORK_TYPE_IPV6) if (av->network_type == RXE_NETWORK_TYPE_IPV4)
hdr_len = ETH_HLEN + sizeof(struct udphdr) + hdr_len = ETH_HLEN + sizeof(struct udphdr) +
sizeof(struct iphdr); sizeof(struct iphdr);
else else
......
...@@ -620,7 +620,7 @@ static void iser_route_handler(struct rdma_cm_id *cma_id) ...@@ -620,7 +620,7 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
conn_param.private_data = (void *)&req_hdr; conn_param.private_data = (void *)&req_hdr;
conn_param.private_data_len = sizeof(struct iser_cm_hdr); conn_param.private_data_len = sizeof(struct iser_cm_hdr);
ret = rdma_connect(cma_id, &conn_param); ret = rdma_connect_locked(cma_id, &conn_param);
if (ret) { if (ret) {
iser_err("failure connecting: %d\n", ret); iser_err("failure connecting: %d\n", ret);
goto failure; goto failure;
......
...@@ -1674,9 +1674,9 @@ static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con) ...@@ -1674,9 +1674,9 @@ static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
uuid_copy(&msg.sess_uuid, &sess->s.uuid); uuid_copy(&msg.sess_uuid, &sess->s.uuid);
uuid_copy(&msg.paths_uuid, &clt->paths_uuid); uuid_copy(&msg.paths_uuid, &clt->paths_uuid);
err = rdma_connect(con->c.cm_id, &param); err = rdma_connect_locked(con->c.cm_id, &param);
if (err) if (err)
rtrs_err(clt, "rdma_connect(): %d\n", err); rtrs_err(clt, "rdma_connect_locked(): %d\n", err);
return err; return err;
} }
......
...@@ -90,9 +90,4 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev, ...@@ -90,9 +90,4 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
u32 key_type, u32 *p_key_id); u32 key_type, u32 *p_key_id);
void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id); void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
{
return devlink_net(priv_to_devlink(dev));
}
#endif #endif
...@@ -1890,10 +1890,10 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue) ...@@ -1890,10 +1890,10 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize); priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
} }
ret = rdma_connect(queue->cm_id, &param); ret = rdma_connect_locked(queue->cm_id, &param);
if (ret) { if (ret) {
dev_err(ctrl->ctrl.device, dev_err(ctrl->ctrl.device,
"rdma_connect failed (%d).\n", ret); "rdma_connect_locked failed (%d).\n", ret);
goto out_destroy_queue_ib; goto out_destroy_queue_ib;
} }
......
...@@ -1213,4 +1213,22 @@ static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev) ...@@ -1213,4 +1213,22 @@ static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev)
return val.vbool; return val.vbool;
} }
/**
* mlx5_core_net - Provide net namespace of the mlx5_core_dev
* @dev: mlx5 core device
*
* mlx5_core_net() returns the net namespace of mlx5 core device.
* This can be called only in below described limited context.
* (a) When a devlink instance for mlx5_core is registered and
* when devlink reload operation is disabled.
* or
* (b) during devlink reload reload_down() and reload_up callbacks
* where it is ensured that devlink instance's net namespace is
* stable.
*/
static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
{
return devlink_net(priv_to_devlink(dev));
}
#endif /* MLX5_DRIVER_H */ #endif /* MLX5_DRIVER_H */
...@@ -227,19 +227,9 @@ void rdma_destroy_qp(struct rdma_cm_id *id); ...@@ -227,19 +227,9 @@ void rdma_destroy_qp(struct rdma_cm_id *id);
int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
int *qp_attr_mask); int *qp_attr_mask);
/**
* rdma_connect - Initiate an active connection request.
* @id: Connection identifier to connect.
* @conn_param: Connection information used for connected QPs.
*
* Users must have resolved a route for the rdma_cm_id to connect with
* by having called rdma_resolve_route before calling this routine.
*
* This call will either connect to a remote QP or obtain remote QP
* information for unconnected rdma_cm_id's. The actual operation is
* based on the rdma_cm_id's port space.
*/
int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param); int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
int rdma_connect_locked(struct rdma_cm_id *id,
struct rdma_conn_param *conn_param);
int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
struct rdma_ucm_ece *ece); struct rdma_ucm_ece *ece);
......
...@@ -956,9 +956,10 @@ int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id, bool isv6) ...@@ -956,9 +956,10 @@ int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id, bool isv6)
rds_ib_cm_fill_conn_param(conn, &conn_param, &dp, rds_ib_cm_fill_conn_param(conn, &conn_param, &dp,
conn->c_proposed_version, conn->c_proposed_version,
UINT_MAX, UINT_MAX, isv6); UINT_MAX, UINT_MAX, isv6);
ret = rdma_connect(cm_id, &conn_param); ret = rdma_connect_locked(cm_id, &conn_param);
if (ret) if (ret)
rds_ib_conn_error(conn, "rdma_connect failed (%d)\n", ret); rds_ib_conn_error(conn, "rdma_connect_locked failed (%d)\n",
ret);
out: out:
/* Beware - returning non-zero tells the rdma_cm to destroy /* Beware - returning non-zero tells the rdma_cm to destroy
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment