Commit 4659fc84 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "Things have been quite slow, only 6 RC patches have been sent to the
  list. Regression, user visible bugs, and crashing fixes:

   - cxgb4 could wrongly fail MR creation due to a typo

   - various crashes if the wrong QP type is mixed in with APIs that
     expect other types

   - syzkaller oops

   - using ERR_PTR and NULL together cases HFI1 to crash in some cases

   - mlx5 memory leak in error unwind"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/mlx5: Fix memory leak in mlx5_ib_create_srq() error path
  RDMA/uverbs: Don't fail in creation of multiple flows
  IB/hfi1: Fix incorrect mixing of ERR_PTR and NULL return values
  RDMA/uverbs: Fix slab-out-of-bounds in ib_uverbs_ex_create_flow
  RDMA/uverbs: Protect from attempts to create flows on unsupported QP
  iw_cxgb4: correctly enforce the max reg_mr depth
parents 2a7e1211 d63c4673
...@@ -3488,8 +3488,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, ...@@ -3488,8 +3488,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
struct ib_flow_attr *flow_attr; struct ib_flow_attr *flow_attr;
struct ib_qp *qp; struct ib_qp *qp;
struct ib_uflow_resources *uflow_res; struct ib_uflow_resources *uflow_res;
struct ib_uverbs_flow_spec_hdr *kern_spec;
int err = 0; int err = 0;
void *kern_spec;
void *ib_spec; void *ib_spec;
int i; int i;
...@@ -3538,8 +3538,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, ...@@ -3538,8 +3538,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
if (!kern_flow_attr) if (!kern_flow_attr)
return -ENOMEM; return -ENOMEM;
memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr)); *kern_flow_attr = cmd.flow_attr;
err = ib_copy_from_udata(kern_flow_attr + 1, ucore, err = ib_copy_from_udata(&kern_flow_attr->flow_specs, ucore,
cmd.flow_attr.size); cmd.flow_attr.size);
if (err) if (err)
goto err_free_attr; goto err_free_attr;
...@@ -3559,6 +3559,11 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, ...@@ -3559,6 +3559,11 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
goto err_uobj; goto err_uobj;
} }
if (qp->qp_type != IB_QPT_UD && qp->qp_type != IB_QPT_RAW_PACKET) {
err = -EINVAL;
goto err_put;
}
flow_attr = kzalloc(struct_size(flow_attr, flows, flow_attr = kzalloc(struct_size(flow_attr, flows,
cmd.flow_attr.num_of_specs), GFP_KERNEL); cmd.flow_attr.num_of_specs), GFP_KERNEL);
if (!flow_attr) { if (!flow_attr) {
...@@ -3578,21 +3583,22 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, ...@@ -3578,21 +3583,22 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
flow_attr->flags = kern_flow_attr->flags; flow_attr->flags = kern_flow_attr->flags;
flow_attr->size = sizeof(*flow_attr); flow_attr->size = sizeof(*flow_attr);
kern_spec = kern_flow_attr + 1; kern_spec = kern_flow_attr->flow_specs;
ib_spec = flow_attr + 1; ib_spec = flow_attr + 1;
for (i = 0; i < flow_attr->num_of_specs && for (i = 0; i < flow_attr->num_of_specs &&
cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) && cmd.flow_attr.size >= sizeof(*kern_spec) &&
cmd.flow_attr.size >= cmd.flow_attr.size >= kern_spec->size;
((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) { i++) {
err = kern_spec_to_ib_spec(file->ucontext, kern_spec, ib_spec, err = kern_spec_to_ib_spec(
uflow_res); file->ucontext, (struct ib_uverbs_flow_spec *)kern_spec,
ib_spec, uflow_res);
if (err) if (err)
goto err_free; goto err_free;
flow_attr->size += flow_attr->size +=
((union ib_flow_spec *) ib_spec)->size; ((union ib_flow_spec *) ib_spec)->size;
cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size; cmd.flow_attr.size -= kern_spec->size;
kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size; kern_spec = ((void *)kern_spec) + kern_spec->size;
ib_spec += ((union ib_flow_spec *) ib_spec)->size; ib_spec += ((union ib_flow_spec *) ib_spec)->size;
} }
if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
......
...@@ -774,7 +774,7 @@ static int c4iw_set_page(struct ib_mr *ibmr, u64 addr) ...@@ -774,7 +774,7 @@ static int c4iw_set_page(struct ib_mr *ibmr, u64 addr)
{ {
struct c4iw_mr *mhp = to_c4iw_mr(ibmr); struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
if (unlikely(mhp->mpl_len == mhp->max_mpl_len)) if (unlikely(mhp->mpl_len == mhp->attr.pbl_size))
return -ENOMEM; return -ENOMEM;
mhp->mpl[mhp->mpl_len++] = addr; mhp->mpl[mhp->mpl_len++] = addr;
......
...@@ -271,7 +271,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) ...@@ -271,7 +271,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
lockdep_assert_held(&qp->s_lock); lockdep_assert_held(&qp->s_lock);
ps->s_txreq = get_txreq(ps->dev, qp); ps->s_txreq = get_txreq(ps->dev, qp);
if (IS_ERR(ps->s_txreq)) if (!ps->s_txreq)
goto bail_no_tx; goto bail_no_tx;
if (priv->hdr_type == HFI1_PKT_TYPE_9B) { if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
......
/* /*
* Copyright(c) 2015, 2016 Intel Corporation. * Copyright(c) 2015 - 2018 Intel Corporation.
* *
* This file is provided under a dual BSD/GPLv2 license. When using or * This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license. * redistributing this file, you may do so under either license.
...@@ -72,7 +72,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) ...@@ -72,7 +72,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
int middle = 0; int middle = 0;
ps->s_txreq = get_txreq(ps->dev, qp); ps->s_txreq = get_txreq(ps->dev, qp);
if (IS_ERR(ps->s_txreq)) if (!ps->s_txreq)
goto bail_no_tx; goto bail_no_tx;
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
......
/* /*
* Copyright(c) 2015, 2016 Intel Corporation. * Copyright(c) 2015 - 2018 Intel Corporation.
* *
* This file is provided under a dual BSD/GPLv2 license. When using or * This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license. * redistributing this file, you may do so under either license.
...@@ -503,7 +503,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) ...@@ -503,7 +503,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
u32 lid; u32 lid;
ps->s_txreq = get_txreq(ps->dev, qp); ps->s_txreq = get_txreq(ps->dev, qp);
if (IS_ERR(ps->s_txreq)) if (!ps->s_txreq)
goto bail_no_tx; goto bail_no_tx;
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) { if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
......
/* /*
* Copyright(c) 2016 - 2017 Intel Corporation. * Copyright(c) 2016 - 2018 Intel Corporation.
* *
* This file is provided under a dual BSD/GPLv2 license. When using or * This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license. * redistributing this file, you may do so under either license.
...@@ -94,7 +94,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, ...@@ -94,7 +94,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
struct rvt_qp *qp) struct rvt_qp *qp)
__must_hold(&qp->s_lock) __must_hold(&qp->s_lock)
{ {
struct verbs_txreq *tx = ERR_PTR(-EBUSY); struct verbs_txreq *tx = NULL;
write_seqlock(&dev->txwait_lock); write_seqlock(&dev->txwait_lock);
if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
......
/* /*
* Copyright(c) 2016 Intel Corporation. * Copyright(c) 2016 - 2018 Intel Corporation.
* *
* This file is provided under a dual BSD/GPLv2 license. When using or * This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license. * redistributing this file, you may do so under either license.
...@@ -83,7 +83,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, ...@@ -83,7 +83,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
if (unlikely(!tx)) { if (unlikely(!tx)) {
/* call slow path to get the lock */ /* call slow path to get the lock */
tx = __get_txreq(dev, qp); tx = __get_txreq(dev, qp);
if (IS_ERR(tx)) if (!tx)
return tx; return tx;
} }
tx->qp = qp; tx->qp = qp;
......
...@@ -266,18 +266,24 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, ...@@ -266,18 +266,24 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
desc_size = sizeof(struct mlx5_wqe_srq_next_seg) + desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
if (desc_size == 0 || srq->msrq.max_gs > desc_size) if (desc_size == 0 || srq->msrq.max_gs > desc_size) {
return ERR_PTR(-EINVAL); err = -EINVAL;
goto err_srq;
}
desc_size = roundup_pow_of_two(desc_size); desc_size = roundup_pow_of_two(desc_size);
desc_size = max_t(size_t, 32, desc_size); desc_size = max_t(size_t, 32, desc_size);
if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) {
return ERR_PTR(-EINVAL); err = -EINVAL;
goto err_srq;
}
srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
sizeof(struct mlx5_wqe_data_seg); sizeof(struct mlx5_wqe_data_seg);
srq->msrq.wqe_shift = ilog2(desc_size); srq->msrq.wqe_shift = ilog2(desc_size);
buf_size = srq->msrq.max * desc_size; buf_size = srq->msrq.max * desc_size;
if (buf_size < desc_size) if (buf_size < desc_size) {
return ERR_PTR(-EINVAL); err = -EINVAL;
goto err_srq;
}
in.type = init_attr->srq_type; in.type = init_attr->srq_type;
if (pd->uobject) if (pd->uobject)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment