Commit f3b5ad89 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "More fixes from testing done on the rc kernel, including more SELinux
  testing. Looking forward, lockdep found regression today in ipoib
  which is still being fixed.

  Summary:

   - Fix for SELinux on the umad SMI path. Some old hardware does not
     fill the PKey properly exposing another bug in the newer SELinux
     code.

   - Check the input port as we can exceed array bounds from this user
     supplied value

   - Users are unable to use the hash field support as they want due to
     incorrect checks on the field restrictions, correct that so the
     feature works as intended

   - User triggerable oops in the NETLINK_RDMA handler

   - cxgb4 driver fix for a bad interaction with CQ flushing in iser
     caused by patches in this merge window, and bad CQ flushing during
     normal close.

   - Unbalanced memalloc_noio in ipoib in an error path"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  IB/ipoib: Restore MM behavior in case of tx_ring allocation failure
  iw_cxgb4: only insert drain cqes if wq is flushed
  iw_cxgb4: only clear the ARMED bit if a notification is needed
  RDMA/netlink: Fix general protection fault
  IB/mlx4: Fix RSS hash fields restrictions
  IB/core: Don't enforce PKey security on SMI MADs
  IB/core: Bound check alternate path port number
parents f25e2295 9d98e19b
...@@ -4458,7 +4458,7 @@ static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -4458,7 +4458,7 @@ static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len; return skb->len;
} }
static const struct rdma_nl_cbs cma_cb_table[] = { static const struct rdma_nl_cbs cma_cb_table[RDMA_NL_RDMA_CM_NUM_OPS] = {
[RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats}, [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats},
}; };
......
...@@ -1146,7 +1146,7 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, ...@@ -1146,7 +1146,7 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
} }
EXPORT_SYMBOL(ib_get_net_dev_by_params); EXPORT_SYMBOL(ib_get_net_dev_by_params);
static const struct rdma_nl_cbs ibnl_ls_cb_table[] = { static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
[RDMA_NL_LS_OP_RESOLVE] = { [RDMA_NL_LS_OP_RESOLVE] = {
.doit = ib_nl_handle_resolve_resp, .doit = ib_nl_handle_resolve_resp,
.flags = RDMA_NL_ADMIN_PERM, .flags = RDMA_NL_ADMIN_PERM,
......
...@@ -80,7 +80,7 @@ const char *__attribute_const__ iwcm_reject_msg(int reason) ...@@ -80,7 +80,7 @@ const char *__attribute_const__ iwcm_reject_msg(int reason)
} }
EXPORT_SYMBOL(iwcm_reject_msg); EXPORT_SYMBOL(iwcm_reject_msg);
static struct rdma_nl_cbs iwcm_nl_cb_table[] = { static struct rdma_nl_cbs iwcm_nl_cb_table[RDMA_NL_IWPM_NUM_OPS] = {
[RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb}, [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
[RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
[RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
......
...@@ -303,7 +303,7 @@ out: cb->args[0] = idx; ...@@ -303,7 +303,7 @@ out: cb->args[0] = idx;
return skb->len; return skb->len;
} }
static const struct rdma_nl_cbs nldev_cb_table[] = { static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
[RDMA_NLDEV_CMD_GET] = { [RDMA_NLDEV_CMD_GET] = {
.doit = nldev_get_doit, .doit = nldev_get_doit,
.dump = nldev_get_dumpit, .dump = nldev_get_dumpit,
......
...@@ -739,8 +739,11 @@ int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index) ...@@ -739,8 +739,11 @@ int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
if (!rdma_protocol_ib(map->agent.device, map->agent.port_num)) if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
return 0; return 0;
if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed) if (map->agent.qp->qp_type == IB_QPT_SMI) {
if (!map->agent.smp_allowed)
return -EACCES; return -EACCES;
return 0;
}
return ib_security_pkey_access(map->agent.device, return ib_security_pkey_access(map->agent.device,
map->agent.port_num, map->agent.port_num,
......
...@@ -1971,6 +1971,12 @@ static int modify_qp(struct ib_uverbs_file *file, ...@@ -1971,6 +1971,12 @@ static int modify_qp(struct ib_uverbs_file *file,
goto release_qp; goto release_qp;
} }
if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
!rdma_is_port_valid(qp->device, cmd->base.alt_port_num)) {
ret = -EINVAL;
goto release_qp;
}
attr->qp_state = cmd->base.qp_state; attr->qp_state = cmd->base.qp_state;
attr->cur_qp_state = cmd->base.cur_qp_state; attr->cur_qp_state = cmd->base.cur_qp_state;
attr->path_mtu = cmd->base.path_mtu; attr->path_mtu = cmd->base.path_mtu;
......
...@@ -395,6 +395,11 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp) ...@@ -395,6 +395,11 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
{ {
if (CQE_OPCODE(cqe) == C4IW_DRAIN_OPCODE) {
WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid);
return 0;
}
if (CQE_OPCODE(cqe) == FW_RI_TERMINATE) if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
return 0; return 0;
......
...@@ -868,7 +868,12 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -868,7 +868,12 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
qhp = to_c4iw_qp(ibqp); qhp = to_c4iw_qp(ibqp);
spin_lock_irqsave(&qhp->lock, flag); spin_lock_irqsave(&qhp->lock, flag);
if (t4_wq_in_error(&qhp->wq)) {
/*
* If the qp has been flushed, then just insert a special
* drain cqe.
*/
if (qhp->wq.flushed) {
spin_unlock_irqrestore(&qhp->lock, flag); spin_unlock_irqrestore(&qhp->lock, flag);
complete_sq_drain_wr(qhp, wr); complete_sq_drain_wr(qhp, wr);
return err; return err;
...@@ -1011,7 +1016,12 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -1011,7 +1016,12 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
qhp = to_c4iw_qp(ibqp); qhp = to_c4iw_qp(ibqp);
spin_lock_irqsave(&qhp->lock, flag); spin_lock_irqsave(&qhp->lock, flag);
if (t4_wq_in_error(&qhp->wq)) {
/*
* If the qp has been flushed, then just insert a special
* drain cqe.
*/
if (qhp->wq.flushed) {
spin_unlock_irqrestore(&qhp->lock, flag); spin_unlock_irqrestore(&qhp->lock, flag);
complete_rq_drain_wr(qhp, wr); complete_rq_drain_wr(qhp, wr);
return err; return err;
...@@ -1285,21 +1295,21 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, ...@@ -1285,21 +1295,21 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
spin_unlock_irqrestore(&rchp->lock, flag); spin_unlock_irqrestore(&rchp->lock, flag);
if (schp == rchp) { if (schp == rchp) {
if (t4_clear_cq_armed(&rchp->cq) && if ((rq_flushed || sq_flushed) &&
(rq_flushed || sq_flushed)) { t4_clear_cq_armed(&rchp->cq)) {
spin_lock_irqsave(&rchp->comp_handler_lock, flag); spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, (*rchp->ibcq.comp_handler)(&rchp->ibcq,
rchp->ibcq.cq_context); rchp->ibcq.cq_context);
spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
} }
} else { } else {
if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) { if (rq_flushed && t4_clear_cq_armed(&rchp->cq)) {
spin_lock_irqsave(&rchp->comp_handler_lock, flag); spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, (*rchp->ibcq.comp_handler)(&rchp->ibcq,
rchp->ibcq.cq_context); rchp->ibcq.cq_context);
spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
} }
if (t4_clear_cq_armed(&schp->cq) && sq_flushed) { if (sq_flushed && t4_clear_cq_armed(&schp->cq)) {
spin_lock_irqsave(&schp->comp_handler_lock, flag); spin_lock_irqsave(&schp->comp_handler_lock, flag);
(*schp->ibcq.comp_handler)(&schp->ibcq, (*schp->ibcq.comp_handler)(&schp->ibcq,
schp->ibcq.cq_context); schp->ibcq.cq_context);
......
...@@ -666,6 +666,19 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx, ...@@ -666,6 +666,19 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
return (-EOPNOTSUPP); return (-EOPNOTSUPP);
} }
if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4 |
MLX4_IB_RX_HASH_DST_IPV4 |
MLX4_IB_RX_HASH_SRC_IPV6 |
MLX4_IB_RX_HASH_DST_IPV6 |
MLX4_IB_RX_HASH_SRC_PORT_TCP |
MLX4_IB_RX_HASH_DST_PORT_TCP |
MLX4_IB_RX_HASH_SRC_PORT_UDP |
MLX4_IB_RX_HASH_DST_PORT_UDP)) {
pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n",
ucmd->rx_hash_fields_mask);
return (-EOPNOTSUPP);
}
if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) && if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) &&
(ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) { (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) {
rss_ctx->flags = MLX4_RSS_IPV4; rss_ctx->flags = MLX4_RSS_IPV4;
...@@ -691,11 +704,11 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx, ...@@ -691,11 +704,11 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
return (-EOPNOTSUPP); return (-EOPNOTSUPP);
} }
if (rss_ctx->flags & MLX4_RSS_IPV4) { if (rss_ctx->flags & MLX4_RSS_IPV4)
rss_ctx->flags |= MLX4_RSS_UDP_IPV4; rss_ctx->flags |= MLX4_RSS_UDP_IPV4;
} else if (rss_ctx->flags & MLX4_RSS_IPV6) { if (rss_ctx->flags & MLX4_RSS_IPV6)
rss_ctx->flags |= MLX4_RSS_UDP_IPV6; rss_ctx->flags |= MLX4_RSS_UDP_IPV6;
} else { if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
pr_debug("RX Hash fields_mask is not supported - UDP must be set with IPv4 or IPv6\n"); pr_debug("RX Hash fields_mask is not supported - UDP must be set with IPv4 or IPv6\n");
return (-EOPNOTSUPP); return (-EOPNOTSUPP);
} }
...@@ -707,15 +720,14 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx, ...@@ -707,15 +720,14 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) && if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) &&
(ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) { (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
if (rss_ctx->flags & MLX4_RSS_IPV4) { if (rss_ctx->flags & MLX4_RSS_IPV4)
rss_ctx->flags |= MLX4_RSS_TCP_IPV4; rss_ctx->flags |= MLX4_RSS_TCP_IPV4;
} else if (rss_ctx->flags & MLX4_RSS_IPV6) { if (rss_ctx->flags & MLX4_RSS_IPV6)
rss_ctx->flags |= MLX4_RSS_TCP_IPV6; rss_ctx->flags |= MLX4_RSS_TCP_IPV6;
} else { if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
pr_debug("RX Hash fields_mask is not supported - TCP must be set with IPv4 or IPv6\n"); pr_debug("RX Hash fields_mask is not supported - TCP must be set with IPv4 or IPv6\n");
return (-EOPNOTSUPP); return (-EOPNOTSUPP);
} }
} else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) || } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) ||
(ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) { (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
pr_debug("RX Hash fields_mask is not supported - both TCP SRC and DST must be set\n"); pr_debug("RX Hash fields_mask is not supported - both TCP SRC and DST must be set\n");
......
...@@ -1145,6 +1145,7 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn, ...@@ -1145,6 +1145,7 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
noio_flag = memalloc_noio_save(); noio_flag = memalloc_noio_save();
p->tx_ring = vzalloc(ipoib_sendq_size * sizeof(*p->tx_ring)); p->tx_ring = vzalloc(ipoib_sendq_size * sizeof(*p->tx_ring));
if (!p->tx_ring) { if (!p->tx_ring) {
memalloc_noio_restore(noio_flag);
ret = -ENOMEM; ret = -ENOMEM;
goto err_tx; goto err_tx;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment