Commit 20da44df authored by Leon Romanovsky's avatar Leon Romanovsky Committed by Jason Gunthorpe

RDMA/mlx5: Drop in-driver verbs object creations

There is no real value in bypassing IB/core APIs for creating standard
objects with standard types. The open-coded variant didn't have any
restrack task management calls and caused to such objects to be not
present when running rdmatoool.

Link: https://lore.kernel.org/r/f745590e5fb7d56f90fdb25f64ee3983ba17e1e4.1627040189.git.leonro@nvidia.comSigned-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 514aee66
...@@ -1035,7 +1035,8 @@ struct ib_srq *ib_create_srq_user(struct ib_pd *pd, ...@@ -1035,7 +1035,8 @@ struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
} }
if (srq->srq_type == IB_SRQT_XRC) { if (srq->srq_type == IB_SRQT_XRC) {
srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd; srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
atomic_inc(&srq->ext.xrc.xrcd->usecnt); if (srq->ext.xrc.xrcd)
atomic_inc(&srq->ext.xrc.xrcd->usecnt);
} }
atomic_inc(&pd->usecnt); atomic_inc(&pd->usecnt);
...@@ -1046,7 +1047,7 @@ struct ib_srq *ib_create_srq_user(struct ib_pd *pd, ...@@ -1046,7 +1047,7 @@ struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
if (ret) { if (ret) {
rdma_restrack_put(&srq->res); rdma_restrack_put(&srq->res);
atomic_dec(&srq->pd->usecnt); atomic_dec(&srq->pd->usecnt);
if (srq->srq_type == IB_SRQT_XRC) if (srq->srq_type == IB_SRQT_XRC && srq->ext.xrc.xrcd)
atomic_dec(&srq->ext.xrc.xrcd->usecnt); atomic_dec(&srq->ext.xrc.xrcd->usecnt);
if (ib_srq_has_cq(srq->srq_type)) if (ib_srq_has_cq(srq->srq_type))
atomic_dec(&srq->ext.cq->usecnt); atomic_dec(&srq->ext.cq->usecnt);
...@@ -1090,7 +1091,7 @@ int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata) ...@@ -1090,7 +1091,7 @@ int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata)
return ret; return ret;
atomic_dec(&srq->pd->usecnt); atomic_dec(&srq->pd->usecnt);
if (srq->srq_type == IB_SRQT_XRC) if (srq->srq_type == IB_SRQT_XRC && srq->ext.xrc.xrcd)
atomic_dec(&srq->ext.xrc.xrcd->usecnt); atomic_dec(&srq->ext.xrc.xrcd->usecnt);
if (ib_srq_has_cq(srq->srq_type)) if (ib_srq_has_cq(srq->srq_type))
atomic_dec(&srq->ext.cq->usecnt); atomic_dec(&srq->ext.cq->usecnt);
......
...@@ -2802,31 +2802,16 @@ static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev) ...@@ -2802,31 +2802,16 @@ static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev)
if (!MLX5_CAP_GEN(dev->mdev, xrc)) if (!MLX5_CAP_GEN(dev->mdev, xrc))
return -EOPNOTSUPP; return -EOPNOTSUPP;
devr->p0 = rdma_zalloc_drv_obj(ibdev, ib_pd); devr->p0 = ib_alloc_pd(ibdev, 0);
if (!devr->p0) if (IS_ERR(devr->p0))
return -ENOMEM; return PTR_ERR(devr->p0);
devr->p0->device = ibdev;
devr->p0->uobject = NULL;
atomic_set(&devr->p0->usecnt, 0);
ret = mlx5_ib_alloc_pd(devr->p0, NULL); devr->c0 = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
if (ret) if (IS_ERR(devr->c0)) {
goto error0; ret = PTR_ERR(devr->c0);
devr->c0 = rdma_zalloc_drv_obj(ibdev, ib_cq);
if (!devr->c0) {
ret = -ENOMEM;
goto error1; goto error1;
} }
devr->c0->device = &dev->ib_dev;
atomic_set(&devr->c0->usecnt, 0);
ret = mlx5_ib_create_cq(devr->c0, &cq_attr, NULL);
if (ret)
goto err_create_cq;
ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn0, 0); ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn0, 0);
if (ret) if (ret)
goto error2; goto error2;
...@@ -2841,45 +2826,22 @@ static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev) ...@@ -2841,45 +2826,22 @@ static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev)
attr.srq_type = IB_SRQT_XRC; attr.srq_type = IB_SRQT_XRC;
attr.ext.cq = devr->c0; attr.ext.cq = devr->c0;
devr->s0 = rdma_zalloc_drv_obj(ibdev, ib_srq); devr->s0 = ib_create_srq(devr->p0, &attr);
if (!devr->s0) { if (IS_ERR(devr->s0)) {
ret = -ENOMEM; ret = PTR_ERR(devr->s0);
goto error4;
}
devr->s0->device = &dev->ib_dev;
devr->s0->pd = devr->p0;
devr->s0->srq_type = IB_SRQT_XRC;
devr->s0->ext.cq = devr->c0;
ret = mlx5_ib_create_srq(devr->s0, &attr, NULL);
if (ret)
goto err_create; goto err_create;
}
atomic_inc(&devr->s0->ext.cq->usecnt);
atomic_inc(&devr->p0->usecnt);
atomic_set(&devr->s0->usecnt, 0);
memset(&attr, 0, sizeof(attr)); memset(&attr, 0, sizeof(attr));
attr.attr.max_sge = 1; attr.attr.max_sge = 1;
attr.attr.max_wr = 1; attr.attr.max_wr = 1;
attr.srq_type = IB_SRQT_BASIC; attr.srq_type = IB_SRQT_BASIC;
devr->s1 = rdma_zalloc_drv_obj(ibdev, ib_srq);
if (!devr->s1) {
ret = -ENOMEM;
goto error5;
}
devr->s1->device = &dev->ib_dev;
devr->s1->pd = devr->p0;
devr->s1->srq_type = IB_SRQT_BASIC;
devr->s1->ext.cq = devr->c0;
ret = mlx5_ib_create_srq(devr->s1, &attr, NULL); devr->s1 = ib_create_srq(devr->p0, &attr);
if (ret) if (IS_ERR(devr->s1)) {
ret = PTR_ERR(devr->s1);
goto error6; goto error6;
}
atomic_inc(&devr->p0->usecnt);
atomic_set(&devr->s1->usecnt, 0);
for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
INIT_WORK(&devr->ports[port].pkey_change_work, INIT_WORK(&devr->ports[port].pkey_change_work,
...@@ -2888,23 +2850,15 @@ static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev) ...@@ -2888,23 +2850,15 @@ static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev)
return 0; return 0;
error6: error6:
kfree(devr->s1); ib_destroy_srq(devr->s0);
error5:
mlx5_ib_destroy_srq(devr->s0, NULL);
err_create: err_create:
kfree(devr->s0);
error4:
mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0); mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
error3: error3:
mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0); mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
error2: error2:
mlx5_ib_destroy_cq(devr->c0, NULL); ib_destroy_cq(devr->c0);
err_create_cq:
kfree(devr->c0);
error1: error1:
mlx5_ib_dealloc_pd(devr->p0, NULL); ib_dealloc_pd(devr->p0);
error0:
kfree(devr->p0);
return ret; return ret;
} }
...@@ -2922,16 +2876,12 @@ static void mlx5_ib_dev_res_cleanup(struct mlx5_ib_dev *dev) ...@@ -2922,16 +2876,12 @@ static void mlx5_ib_dev_res_cleanup(struct mlx5_ib_dev *dev)
for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
cancel_work_sync(&devr->ports[port].pkey_change_work); cancel_work_sync(&devr->ports[port].pkey_change_work);
mlx5_ib_destroy_srq(devr->s1, NULL); ib_destroy_srq(devr->s1);
kfree(devr->s1); ib_destroy_srq(devr->s0);
mlx5_ib_destroy_srq(devr->s0, NULL);
kfree(devr->s0);
mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0); mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0); mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
mlx5_ib_destroy_cq(devr->c0, NULL); ib_destroy_cq(devr->c0);
kfree(devr->c0); ib_dealloc_pd(devr->p0);
mlx5_ib_dealloc_pd(devr->p0, NULL);
kfree(devr->p0);
} }
static u32 get_core_cap_flags(struct ib_device *ibdev, static u32 get_core_cap_flags(struct ib_device *ibdev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment