Commit 1f11a761 authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA: Check create_flags during create_qp

Each driver should check that the QP attrs create_flags is supported.
Unfortuantely when create_flags was added to the QP attrs the drivers were
not updated. uverbs_ex_cmd_mask was used to block it - even though kernel
drivers use these flags too.

Check that flags is zero in all drivers that don't use it, remove
IB_USER_VERBS_EX_CMD_CREATE_QP from uverbs_ex_cmd_mask. Fix the error code
to be EOPNOTSUPP.

Link: https://lore.kernel.org/r/8-v1-caa70ba3d1ab+1436e-ucmd_mask_jgg@nvidia.comSigned-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 1c407cb5
......@@ -634,6 +634,7 @@ struct ib_device *_ib_alloc_device(size_t size)
device->uverbs_ex_cmd_mask =
BIT_ULL(IB_USER_VERBS_EX_CMD_CREATE_CQ) |
BIT_ULL(IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
BIT_ULL(IB_USER_VERBS_EX_CMD_CREATE_QP) |
BIT_ULL(IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
BIT_ULL(IB_USER_VERBS_EX_CMD_CREATE_WQ) |
BIT_ULL(IB_USER_VERBS_EX_CMD_DESTROY_FLOW) |
......
......@@ -1271,10 +1271,12 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
}
qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */
if (init_attr->create_flags)
if (init_attr->create_flags) {
ibdev_dbg(&rdev->ibdev,
"QP create flags 0x%x not supported",
init_attr->create_flags);
return -EOPNOTSUPP;
}
/* Setup CQs */
if (init_attr->send_cq) {
......
......@@ -2126,7 +2126,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
pr_debug("ib_pd %p\n", pd);
if (attrs->qp_type != IB_QPT_RC)
if (attrs->qp_type != IB_QPT_RC || attrs->create_flags)
return ERR_PTR(-EOPNOTSUPP);
php = to_c4iw_pd(pd);
......
......@@ -869,17 +869,6 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
if (ret)
ibdev_err(ibdev, "Failed to set user SQ size\n");
} else {
if (init_attr->create_flags &
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
ibdev_err(ibdev, "Failed to check multicast loopback\n");
return -EINVAL;
}
if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
ibdev_err(ibdev, "Failed to check ipoib ud lso\n");
return -EINVAL;
}
ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp);
if (ret)
ibdev_err(ibdev, "Failed to set kernel SQ size\n");
......@@ -906,6 +895,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
hr_qp->state = IB_QPS_RESET;
hr_qp->flush_flag = 0;
if (init_attr->create_flags)
return -EOPNOTSUPP;
ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd);
if (ret) {
ibdev_err(ibdev, "Failed to set QP param\n");
......
......@@ -556,7 +556,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
return ERR_PTR(-ENODEV);
if (init_attr->create_flags)
return ERR_PTR(-EINVAL);
return ERR_PTR(-EOPNOTSUPP);
if (init_attr->cap.max_inline_data > I40IW_MAX_INLINE_DATA_SIZE)
init_attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
......
......@@ -2658,8 +2658,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.dev.parent = &dev->persist->pdev->dev;
ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_ops);
ibdev->ib_dev.uverbs_ex_cmd_mask |=
(1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) &&
((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) ==
......
......@@ -1493,7 +1493,7 @@ static int _mlx4_ib_create_qp(struct ib_pd *pd, struct mlx4_ib_qp *qp,
MLX4_IB_SRIOV_SQP |
MLX4_IB_QP_NETIF |
MLX4_IB_QP_CREATE_ROCE_V2_GSI))
return -EINVAL;
return -EOPNOTSUPP;
if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
if (init_attr->qp_type != IB_QPT_UD)
......
......@@ -4142,8 +4142,6 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
dev->ib_dev.uverbs_cmd_mask |=
(1ull << IB_USER_VERBS_CMD_CREATE_AH) |
(1ull << IB_USER_VERBS_CMD_DESTROY_AH);
dev->ib_dev.uverbs_ex_cmd_mask |=
(1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
IS_ENABLED(CONFIG_MLX5_CORE_IPOIB))
......
......@@ -2712,11 +2712,12 @@ static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
process_create_flag(dev, &create_flags, MLX5_IB_QP_CREATE_SQPN_QP1,
true, qp);
if (create_flags)
if (create_flags) {
mlx5_ib_dbg(dev, "Create QP has unsupported flags 0x%X\n",
create_flags);
return (create_flags) ? -EINVAL : 0;
return -EOPNOTSUPP;
}
return 0;
}
static int process_udata_size(struct mlx5_ib_dev *dev,
......
......@@ -470,7 +470,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
int err;
if (init_attr->create_flags)
return ERR_PTR(-EINVAL);
return ERR_PTR(-EOPNOTSUPP);
switch (init_attr->qp_type) {
case IB_QPT_RC:
......
......@@ -1299,6 +1299,9 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
struct ocrdma_create_qp_ureq ureq;
u16 dpp_credit_lmt, dpp_offset;
if (attrs->create_flags)
return ERR_PTR(-EOPNOTSUPP);
status = ocrdma_check_qp_params(ibpd, dev, attrs, udata);
if (status)
goto gen_err;
......
......@@ -2239,6 +2239,9 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
struct ib_qp *ibqp;
int rc = 0;
if (attrs->create_flags)
return ERR_PTR(-EOPNOTSUPP);
if (attrs->qp_type == IB_QPT_XRC_TGT) {
xrcd = get_qedr_xrcd(attrs->xrcd);
dev = get_qedr_dev(xrcd->ibxrcd.device);
......
......@@ -474,7 +474,7 @@ struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
us_ibdev = to_usdev(pd->device);
if (init_attr->create_flags)
return ERR_PTR(-EINVAL);
return ERR_PTR(-EOPNOTSUPP);
err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
if (err) {
......
......@@ -209,7 +209,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
dev_warn(&dev->pdev->dev,
"invalid create queuepair flags %#x\n",
init_attr->create_flags);
return ERR_PTR(-EINVAL);
return ERR_PTR(-EOPNOTSUPP);
}
if (init_attr->qp_type != IB_QPT_RC &&
......
......@@ -1083,10 +1083,11 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
if (!rdi)
return ERR_PTR(-EINVAL);
if (init_attr->create_flags & ~IB_QP_CREATE_NETDEV_USE)
return ERR_PTR(-EOPNOTSUPP);
if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge ||
init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
(init_attr->create_flags &&
init_attr->create_flags != IB_QP_CREATE_NETDEV_USE))
init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr)
return ERR_PTR(-EINVAL);
/* Check receive queue parameters if no SRQ is specified. */
......
......@@ -395,6 +395,9 @@ static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
uresp = udata->outbuf;
}
if (init->create_flags)
return ERR_PTR(-EOPNOTSUPP);
err = rxe_qp_chk_init(rxe, init);
if (err)
goto err1;
......
......@@ -307,6 +307,9 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
siw_dbg(base_dev, "create new QP\n");
if (attrs->create_flags)
return ERR_PTR(-EOPNOTSUPP);
if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) {
siw_dbg(base_dev, "too many QP's\n");
rv = -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment