Commit da43b7be authored by Yangyang Li's avatar Yangyang Li Committed by Jason Gunthorpe

RDMA/hns: Use IDA interface to manage xrcd index

Switch xrcd index allocation and release from hns own bitmap interface
to IDA interface.

Link: https://lore.kernel.org/r/1623325814-55737-7-git-send-email-liweihang@huawei.comSigned-off-by: default avatarYangyang Li <liyangyang20@huawei.com>
Signed-off-by: default avatarWeihang Li <liweihang@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 645f0593
...@@ -245,7 +245,7 @@ int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs, ...@@ -245,7 +245,7 @@ int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev) void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
{ {
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
hns_roce_cleanup_xrcd_table(hr_dev); ida_destroy(&hr_dev->xrcd_ida.ida);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
hns_roce_cleanup_srq_table(hr_dev); hns_roce_cleanup_srq_table(hr_dev);
......
...@@ -962,7 +962,7 @@ struct hns_roce_dev { ...@@ -962,7 +962,7 @@ struct hns_roce_dev {
struct hns_roce_cmdq cmd; struct hns_roce_cmdq cmd;
struct hns_roce_ida pd_ida; struct hns_roce_ida pd_ida;
struct hns_roce_bitmap xrcd_bitmap; struct hns_roce_ida xrcd_ida;
struct hns_roce_uar_table uar_table; struct hns_roce_uar_table uar_table;
struct hns_roce_mr_table mr_table; struct hns_roce_mr_table mr_table;
struct hns_roce_cq_table cq_table; struct hns_roce_cq_table cq_table;
...@@ -1148,13 +1148,12 @@ void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev); ...@@ -1148,13 +1148,12 @@ void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev); void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev); int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev); int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev); void hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev); void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev); void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev); void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_srq_table(struct hns_roce_dev *hr_dev); void hns_roce_cleanup_srq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_xrcd_table(struct hns_roce_dev *hr_dev);
int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj); int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj);
void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj); void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj);
......
...@@ -750,14 +750,8 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) ...@@ -750,14 +750,8 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
hns_roce_init_pd_table(hr_dev); hns_roce_init_pd_table(hr_dev);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) { if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
ret = hns_roce_init_xrcd_table(hr_dev); hns_roce_init_xrcd_table(hr_dev);
if (ret) {
dev_err(dev, "failed to init xrcd table, ret = %d.\n",
ret);
goto err_pd_table_free;
}
}
hns_roce_init_mr_table(hr_dev); hns_roce_init_mr_table(hr_dev);
...@@ -788,9 +782,8 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) ...@@ -788,9 +782,8 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
ida_destroy(&hr_dev->mr_table.mtpt_ida.ida); ida_destroy(&hr_dev->mr_table.mtpt_ida.ida);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
hns_roce_cleanup_xrcd_table(hr_dev); ida_destroy(&hr_dev->xrcd_ida.ida);
err_pd_table_free:
ida_destroy(&hr_dev->pd_ida.ida); ida_destroy(&hr_dev->pd_ida.ida);
hns_roce_uar_free(hr_dev, &hr_dev->priv_uar); hns_roce_uar_free(hr_dev, &hr_dev->priv_uar);
......
...@@ -134,35 +134,27 @@ void hns_roce_cleanup_uar_table(struct hns_roce_dev *hr_dev) ...@@ -134,35 +134,27 @@ void hns_roce_cleanup_uar_table(struct hns_roce_dev *hr_dev)
static int hns_roce_xrcd_alloc(struct hns_roce_dev *hr_dev, u32 *xrcdn) static int hns_roce_xrcd_alloc(struct hns_roce_dev *hr_dev, u32 *xrcdn)
{ {
unsigned long obj; struct hns_roce_ida *xrcd_ida = &hr_dev->xrcd_ida;
int ret; int id;
ret = hns_roce_bitmap_alloc(&hr_dev->xrcd_bitmap, &obj);
if (ret)
return ret;
*xrcdn = obj; id = ida_alloc_range(&xrcd_ida->ida, xrcd_ida->min, xrcd_ida->max,
GFP_KERNEL);
if (id < 0) {
ibdev_err(&hr_dev->ib_dev, "failed to alloc xrcdn(%d).\n", id);
return -ENOMEM;
}
*xrcdn = (u32)id;
return 0; return 0;
} }
static void hns_roce_xrcd_free(struct hns_roce_dev *hr_dev, void hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev)
u32 xrcdn)
{ {
hns_roce_bitmap_free(&hr_dev->xrcd_bitmap, xrcdn); struct hns_roce_ida *xrcd_ida = &hr_dev->xrcd_ida;
}
int hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev) ida_init(&xrcd_ida->ida);
{ xrcd_ida->max = hr_dev->caps.num_xrcds - 1;
return hns_roce_bitmap_init(&hr_dev->xrcd_bitmap, xrcd_ida->min = hr_dev->caps.reserved_xrcds;
hr_dev->caps.num_xrcds,
hr_dev->caps.num_xrcds - 1,
hr_dev->caps.reserved_xrcds, 0);
}
void hns_roce_cleanup_xrcd_table(struct hns_roce_dev *hr_dev)
{
hns_roce_bitmap_cleanup(&hr_dev->xrcd_bitmap);
} }
int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata) int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata)
...@@ -175,18 +167,18 @@ int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata) ...@@ -175,18 +167,18 @@ int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata)
return -EINVAL; return -EINVAL;
ret = hns_roce_xrcd_alloc(hr_dev, &xrcd->xrcdn); ret = hns_roce_xrcd_alloc(hr_dev, &xrcd->xrcdn);
if (ret) { if (ret)
dev_err(hr_dev->dev, "failed to alloc xrcdn, ret = %d.\n", ret);
return ret; return ret;
}
return 0; return 0;
} }
int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata) int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata)
{ {
hns_roce_xrcd_free(to_hr_dev(ib_xrcd->device), struct hns_roce_dev *hr_dev = to_hr_dev(ib_xrcd->device);
to_hr_xrcd(ib_xrcd)->xrcdn); u32 xrcdn = to_hr_xrcd(ib_xrcd)->xrcdn;
ida_free(&hr_dev->xrcd_ida.ida, (int)xrcdn);
return 0; return 0;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment