Commit add53535 authored by Leon Romanovsky's avatar Leon Romanovsky Committed by Jason Gunthorpe

RDMA: Restore ability to return error for destroy WQ

Make this interface symmetrical to other destroy paths.

Fixes: a49b1dc7 ("RDMA: Convert destroy_wq to be void")
Link: https://lore.kernel.org/r/20200907120921.476363-9-leon@kernel.orgSigned-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent d0c45c85
......@@ -16,7 +16,7 @@ static int uverbs_free_wq(struct ib_uobject *uobject,
container_of(uobject, struct ib_uwq_object, uevent.uobject);
int ret;
ret = ib_destroy_wq(wq, &attrs->driver_udata);
ret = ib_destroy_wq_user(wq, &attrs->driver_udata);
if (ib_is_destroy_retryable(ret, why, uobject))
return ret;
......
......@@ -2399,25 +2399,28 @@ struct ib_wq *ib_create_wq(struct ib_pd *pd,
EXPORT_SYMBOL(ib_create_wq);
/**
* ib_destroy_wq - Destroys the specified user WQ.
* ib_destroy_wq_user - Destroys the specified user WQ.
* @wq: The WQ to destroy.
* @udata: Valid user data
*/
int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata)
{
struct ib_cq *cq = wq->cq;
struct ib_pd *pd = wq->pd;
int ret;
if (atomic_read(&wq->usecnt))
return -EBUSY;
wq->device->ops.destroy_wq(wq, udata);
ret = wq->device->ops.destroy_wq(wq, udata);
if (ret)
return ret;
atomic_dec(&pd->usecnt);
atomic_dec(&cq->usecnt);
return 0;
return ret;
}
EXPORT_SYMBOL(ib_destroy_wq);
EXPORT_SYMBOL(ib_destroy_wq_user);
/**
* ib_modify_wq - Modifies the specified WQ.
......
......@@ -899,7 +899,7 @@ void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port);
struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr,
struct ib_udata *udata);
void mlx4_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
int mlx4_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
int mlx4_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
u32 wq_attr_mask, struct ib_udata *udata);
......
......@@ -4327,7 +4327,7 @@ int mlx4_ib_modify_wq(struct ib_wq *ibwq, struct ib_wq_attr *wq_attr,
return err;
}
void mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
int mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
{
struct mlx4_ib_dev *dev = to_mdev(ibwq->device);
struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
......@@ -4338,6 +4338,7 @@ void mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, udata);
kfree(qp);
return 0;
}
struct ib_rwq_ind_table
......
......@@ -1241,7 +1241,7 @@ int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr,
struct ib_udata *udata);
void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
u32 wq_attr_mask, struct ib_udata *udata);
struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
......
......@@ -5085,14 +5085,18 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
return ERR_PTR(err);
}
void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(wq->device);
struct mlx5_ib_rwq *rwq = to_mrwq(wq);
int ret;
mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
ret = mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
if (ret)
return ret;
destroy_user_rq(dev, wq->pd, rwq, udata);
kfree(rwq);
return 0;
}
struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
......
......@@ -26,8 +26,8 @@ int mlx5_core_dct_query(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev, u32 timeout_usec);
void mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
struct mlx5_core_qp *rq);
int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
struct mlx5_core_qp *rq);
int mlx5_core_create_sq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
struct mlx5_core_qp *sq);
void mlx5_core_destroy_sq_tracked(struct mlx5_ib_dev *dev,
......
......@@ -576,11 +576,12 @@ int mlx5_core_create_rq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
return err;
}
void mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
struct mlx5_core_qp *rq)
int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
struct mlx5_core_qp *rq)
{
destroy_resource_common(dev, rq);
destroy_rq_tracked(dev, rq->qpn, rq->uid);
return 0;
}
static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid)
......
......@@ -2480,7 +2480,7 @@ struct ib_device_ops {
struct ib_wq *(*create_wq)(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr,
struct ib_udata *udata);
void (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
u32 wq_attr_mask, struct ib_udata *udata);
struct ib_rwq_ind_table *(*create_rwq_ind_table)(
......@@ -4316,7 +4316,7 @@ struct net_device *ib_device_netdev(struct ib_device *dev, u8 port);
struct ib_wq *ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr);
int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);
int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
u32 wq_attr_mask);
int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment