Commit 6f3ca6f4 authored by Maor Gottlieb's avatar Maor Gottlieb Committed by Jason Gunthorpe

RDMA/core: Optimize XRC target lookup

Replace the mutex with read write semaphore and use xarray instead of
linked list for XRC target QPs. This will give faster XRC target
lookup. In addition, when QP is closed, don't insert it back to the xarray
if the destroy command failed.

Link: https://lore.kernel.org/r/20200706122716.647338-4-leon@kernel.orgSigned-off-by: default avatarMaor Gottlieb <maorg@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent b73efcb2
...@@ -1090,13 +1090,6 @@ static void __ib_shared_qp_event_handler(struct ib_event *event, void *context) ...@@ -1090,13 +1090,6 @@ static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
spin_unlock_irqrestore(&qp->device->qp_open_list_lock, flags); spin_unlock_irqrestore(&qp->device->qp_open_list_lock, flags);
} }
static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
{
mutex_lock(&xrcd->tgt_qp_mutex);
list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
mutex_unlock(&xrcd->tgt_qp_mutex);
}
static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp, static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
void (*event_handler)(struct ib_event *, void *), void (*event_handler)(struct ib_event *, void *),
void *qp_context) void *qp_context)
...@@ -1139,16 +1132,15 @@ struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, ...@@ -1139,16 +1132,15 @@ struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
if (qp_open_attr->qp_type != IB_QPT_XRC_TGT) if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
qp = ERR_PTR(-EINVAL); down_read(&xrcd->tgt_qps_rwsem);
mutex_lock(&xrcd->tgt_qp_mutex); real_qp = xa_load(&xrcd->tgt_qps, qp_open_attr->qp_num);
list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) { if (!real_qp) {
if (real_qp->qp_num == qp_open_attr->qp_num) { up_read(&xrcd->tgt_qps_rwsem);
qp = __ib_open_qp(real_qp, qp_open_attr->event_handler, return ERR_PTR(-EINVAL);
qp_open_attr->qp_context);
break;
}
} }
mutex_unlock(&xrcd->tgt_qp_mutex); qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
qp_open_attr->qp_context);
up_read(&xrcd->tgt_qps_rwsem);
return qp; return qp;
} }
EXPORT_SYMBOL(ib_open_qp); EXPORT_SYMBOL(ib_open_qp);
...@@ -1157,6 +1149,7 @@ static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp, ...@@ -1157,6 +1149,7 @@ static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp,
struct ib_qp_init_attr *qp_init_attr) struct ib_qp_init_attr *qp_init_attr)
{ {
struct ib_qp *real_qp = qp; struct ib_qp *real_qp = qp;
int err;
qp->event_handler = __ib_shared_qp_event_handler; qp->event_handler = __ib_shared_qp_event_handler;
qp->qp_context = qp; qp->qp_context = qp;
...@@ -1172,7 +1165,12 @@ static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp, ...@@ -1172,7 +1165,12 @@ static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp,
if (IS_ERR(qp)) if (IS_ERR(qp))
return qp; return qp;
__ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp); err = xa_err(xa_store(&qp_init_attr->xrcd->tgt_qps, real_qp->qp_num,
real_qp, GFP_KERNEL));
if (err) {
ib_close_qp(qp);
return ERR_PTR(err);
}
return qp; return qp;
} }
...@@ -1887,21 +1885,18 @@ static int __ib_destroy_shared_qp(struct ib_qp *qp) ...@@ -1887,21 +1885,18 @@ static int __ib_destroy_shared_qp(struct ib_qp *qp)
real_qp = qp->real_qp; real_qp = qp->real_qp;
xrcd = real_qp->xrcd; xrcd = real_qp->xrcd;
down_write(&xrcd->tgt_qps_rwsem);
mutex_lock(&xrcd->tgt_qp_mutex);
ib_close_qp(qp); ib_close_qp(qp);
if (atomic_read(&real_qp->usecnt) == 0) if (atomic_read(&real_qp->usecnt) == 0)
list_del(&real_qp->xrcd_list); xa_erase(&xrcd->tgt_qps, real_qp->qp_num);
else else
real_qp = NULL; real_qp = NULL;
mutex_unlock(&xrcd->tgt_qp_mutex); up_write(&xrcd->tgt_qps_rwsem);
if (real_qp) { if (real_qp) {
ret = ib_destroy_qp(real_qp); ret = ib_destroy_qp(real_qp);
if (!ret) if (!ret)
atomic_dec(&xrcd->usecnt); atomic_dec(&xrcd->usecnt);
else
__ib_insert_xrcd_qp(xrcd, real_qp);
} }
return 0; return 0;
...@@ -2307,8 +2302,8 @@ struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device, ...@@ -2307,8 +2302,8 @@ struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
xrcd->device = device; xrcd->device = device;
xrcd->inode = inode; xrcd->inode = inode;
atomic_set(&xrcd->usecnt, 0); atomic_set(&xrcd->usecnt, 0);
mutex_init(&xrcd->tgt_qp_mutex); init_rwsem(&xrcd->tgt_qps_rwsem);
INIT_LIST_HEAD(&xrcd->tgt_qp_list); xa_init(&xrcd->tgt_qps);
} }
return xrcd; return xrcd;
...@@ -2322,20 +2317,10 @@ EXPORT_SYMBOL(ib_alloc_xrcd_user); ...@@ -2322,20 +2317,10 @@ EXPORT_SYMBOL(ib_alloc_xrcd_user);
*/ */
int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata) int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata)
{ {
struct ib_qp *qp;
int ret;
if (atomic_read(&xrcd->usecnt)) if (atomic_read(&xrcd->usecnt))
return -EBUSY; return -EBUSY;
while (!list_empty(&xrcd->tgt_qp_list)) { WARN_ON(!xa_empty(&xrcd->tgt_qps));
qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
ret = ib_destroy_qp(qp);
if (ret)
return ret;
}
mutex_destroy(&xrcd->tgt_qp_mutex);
return xrcd->device->ops.dealloc_xrcd(xrcd, udata); return xrcd->device->ops.dealloc_xrcd(xrcd, udata);
} }
EXPORT_SYMBOL(ib_dealloc_xrcd_user); EXPORT_SYMBOL(ib_dealloc_xrcd_user);
......
...@@ -1567,9 +1567,8 @@ struct ib_xrcd { ...@@ -1567,9 +1567,8 @@ struct ib_xrcd {
struct ib_device *device; struct ib_device *device;
atomic_t usecnt; /* count all exposed resources */ atomic_t usecnt; /* count all exposed resources */
struct inode *inode; struct inode *inode;
struct rw_semaphore tgt_qps_rwsem;
struct mutex tgt_qp_mutex; struct xarray tgt_qps;
struct list_head tgt_qp_list;
}; };
struct ib_ah { struct ib_ah {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment