Commit b73efcb2 authored by Maor Gottlieb's avatar Maor Gottlieb Committed by Jason Gunthorpe

RDMA/core: Clean ib_alloc_xrcd() and reuse it to allocate XRC domain

ib_alloc_xrcd() already does the required initialization, so move the
uverbs to call it and save code duplication, while cleaning the function
argument lists of that function.

Link: https://lore.kernel.org/r/20200706122716.647338-3-leon@kernel.orgSigned-off-by: default avatarMaor Gottlieb <maorg@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent f4375443
...@@ -614,17 +614,11 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs) ...@@ -614,17 +614,11 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
} }
if (!xrcd) { if (!xrcd) {
xrcd = ib_dev->ops.alloc_xrcd(ib_dev, &attrs->driver_udata); xrcd = ib_alloc_xrcd_user(ib_dev, inode, &attrs->driver_udata);
if (IS_ERR(xrcd)) { if (IS_ERR(xrcd)) {
ret = PTR_ERR(xrcd); ret = PTR_ERR(xrcd);
goto err; goto err;
} }
xrcd->inode = inode;
xrcd->device = ib_dev;
atomic_set(&xrcd->usecnt, 0);
mutex_init(&xrcd->tgt_qp_mutex);
INIT_LIST_HEAD(&xrcd->tgt_qp_list);
new_xrcd = 1; new_xrcd = 1;
} }
...@@ -663,7 +657,7 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs) ...@@ -663,7 +657,7 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
} }
err_dealloc_xrcd: err_dealloc_xrcd:
ib_dealloc_xrcd(xrcd, uverbs_get_cleared_udata(attrs)); ib_dealloc_xrcd_user(xrcd, uverbs_get_cleared_udata(attrs));
err: err:
uobj_alloc_abort(&obj->uobject, attrs); uobj_alloc_abort(&obj->uobject, attrs);
...@@ -701,7 +695,7 @@ int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd, ...@@ -701,7 +695,7 @@ int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, struct ib_xrcd *xrcd,
if (inode && !atomic_dec_and_test(&xrcd->usecnt)) if (inode && !atomic_dec_and_test(&xrcd->usecnt))
return 0; return 0;
ret = ib_dealloc_xrcd(xrcd, &attrs->driver_udata); ret = ib_dealloc_xrcd_user(xrcd, &attrs->driver_udata);
if (ib_is_destroy_retryable(ret, why, uobject)) { if (ib_is_destroy_retryable(ret, why, uobject)) {
atomic_inc(&xrcd->usecnt); atomic_inc(&xrcd->usecnt);
......
...@@ -2288,17 +2288,24 @@ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) ...@@ -2288,17 +2288,24 @@ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
} }
EXPORT_SYMBOL(ib_detach_mcast); EXPORT_SYMBOL(ib_detach_mcast);
struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller) /**
* ib_alloc_xrcd_user - Allocates an XRC domain.
* @device: The device on which to allocate the XRC domain.
* @inode: inode to connect XRCD
* @udata: Valid user data or NULL for kernel object
*/
struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
struct inode *inode, struct ib_udata *udata)
{ {
struct ib_xrcd *xrcd; struct ib_xrcd *xrcd;
if (!device->ops.alloc_xrcd) if (!device->ops.alloc_xrcd)
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
xrcd = device->ops.alloc_xrcd(device, NULL); xrcd = device->ops.alloc_xrcd(device, udata);
if (!IS_ERR(xrcd)) { if (!IS_ERR(xrcd)) {
xrcd->device = device; xrcd->device = device;
xrcd->inode = NULL; xrcd->inode = inode;
atomic_set(&xrcd->usecnt, 0); atomic_set(&xrcd->usecnt, 0);
mutex_init(&xrcd->tgt_qp_mutex); mutex_init(&xrcd->tgt_qp_mutex);
INIT_LIST_HEAD(&xrcd->tgt_qp_list); INIT_LIST_HEAD(&xrcd->tgt_qp_list);
...@@ -2306,9 +2313,14 @@ struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller) ...@@ -2306,9 +2313,14 @@ struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller)
return xrcd; return xrcd;
} }
EXPORT_SYMBOL(__ib_alloc_xrcd); EXPORT_SYMBOL(ib_alloc_xrcd_user);
int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata) /**
* ib_dealloc_xrcd_user - Deallocates an XRC domain.
* @xrcd: The XRC domain to deallocate.
* @udata: Valid user data or NULL for kernel object
*/
int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata)
{ {
struct ib_qp *qp; struct ib_qp *qp;
int ret; int ret;
...@@ -2326,7 +2338,7 @@ int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata) ...@@ -2326,7 +2338,7 @@ int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
return xrcd->device->ops.dealloc_xrcd(xrcd, udata); return xrcd->device->ops.dealloc_xrcd(xrcd, udata);
} }
EXPORT_SYMBOL(ib_dealloc_xrcd); EXPORT_SYMBOL(ib_dealloc_xrcd_user);
/** /**
* ib_create_wq - Creates a WQ associated with the specified protection * ib_create_wq - Creates a WQ associated with the specified protection
......
...@@ -4321,21 +4321,9 @@ int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); ...@@ -4321,21 +4321,9 @@ int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
*/ */
int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
/** struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
* ib_alloc_xrcd - Allocates an XRC domain. struct inode *inode, struct ib_udata *udata);
* @device: The device on which to allocate the XRC domain. int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata);
* @caller: Module name for kernel consumers
*/
struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
#define ib_alloc_xrcd(device) \
__ib_alloc_xrcd((device), KBUILD_MODNAME)
/**
* ib_dealloc_xrcd - Deallocates an XRC domain.
* @xrcd: The XRC domain to deallocate.
* @udata: Valid user data or NULL for kernel object
*/
int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
static inline int ib_check_mr_access(int flags) static inline int ib_check_mr_access(int flags)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment