Commit 645f0593 authored by Yangyang Li's avatar Yangyang Li Committed by Jason Gunthorpe

RDMA/hns: Use IDA interface to manage pd index

Switch pd index allocation and release from hns own bitmap interface
to IDA interface.

Link: https://lore.kernel.org/r/1623325814-55737-6-git-send-email-liweihang@huawei.comSigned-off-by: default avatarYangyang Li <liyangyang20@huawei.com>
Signed-off-by: default avatarWeihang Li <liweihang@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent d38936f0
...@@ -252,6 +252,6 @@ void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev) ...@@ -252,6 +252,6 @@ void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
hns_roce_cleanup_qp_table(hr_dev); hns_roce_cleanup_qp_table(hr_dev);
hns_roce_cleanup_cq_table(hr_dev); hns_roce_cleanup_cq_table(hr_dev);
ida_destroy(&hr_dev->mr_table.mtpt_ida.ida); ida_destroy(&hr_dev->mr_table.mtpt_ida.ida);
hns_roce_cleanup_pd_table(hr_dev); ida_destroy(&hr_dev->pd_ida.ida);
hns_roce_cleanup_uar_table(hr_dev); hns_roce_cleanup_uar_table(hr_dev);
} }
...@@ -961,7 +961,7 @@ struct hns_roce_dev { ...@@ -961,7 +961,7 @@ struct hns_roce_dev {
void __iomem *priv_addr; void __iomem *priv_addr;
struct hns_roce_cmdq cmd; struct hns_roce_cmdq cmd;
struct hns_roce_bitmap pd_bitmap; struct hns_roce_ida pd_ida;
struct hns_roce_bitmap xrcd_bitmap; struct hns_roce_bitmap xrcd_bitmap;
struct hns_roce_uar_table uar_table; struct hns_roce_uar_table uar_table;
struct hns_roce_mr_table mr_table; struct hns_roce_mr_table mr_table;
...@@ -1143,14 +1143,13 @@ void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, ...@@ -1143,14 +1143,13 @@ void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev,
int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
dma_addr_t *pages, unsigned int page_cnt); dma_addr_t *pages, unsigned int page_cnt);
int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev); void hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev); void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev); void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev); int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev); int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
int hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev); int hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev); void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev); void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev); void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
......
...@@ -748,11 +748,7 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) ...@@ -748,11 +748,7 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
goto err_uar_table_free; goto err_uar_table_free;
} }
ret = hns_roce_init_pd_table(hr_dev); hns_roce_init_pd_table(hr_dev);
if (ret) {
dev_err(dev, "Failed to init protected domain table.\n");
goto err_uar_alloc_free;
}
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) { if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) {
ret = hns_roce_init_xrcd_table(hr_dev); ret = hns_roce_init_xrcd_table(hr_dev);
...@@ -795,9 +791,7 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) ...@@ -795,9 +791,7 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
hns_roce_cleanup_xrcd_table(hr_dev); hns_roce_cleanup_xrcd_table(hr_dev);
err_pd_table_free: err_pd_table_free:
hns_roce_cleanup_pd_table(hr_dev); ida_destroy(&hr_dev->pd_ida.ida);
err_uar_alloc_free:
hns_roce_uar_free(hr_dev, &hr_dev->priv_uar); hns_roce_uar_free(hr_dev, &hr_dev->priv_uar);
err_uar_table_free: err_uar_table_free:
......
...@@ -34,39 +34,31 @@ ...@@ -34,39 +34,31 @@
#include <linux/pci.h> #include <linux/pci.h>
#include "hns_roce_device.h" #include "hns_roce_device.h"
static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn) void hns_roce_init_pd_table(struct hns_roce_dev *hr_dev)
{ {
return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn) ? -ENOMEM : 0; struct hns_roce_ida *pd_ida = &hr_dev->pd_ida;
}
static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn)
{
hns_roce_bitmap_free(&hr_dev->pd_bitmap, pdn);
}
int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev)
{
return hns_roce_bitmap_init(&hr_dev->pd_bitmap, hr_dev->caps.num_pds,
hr_dev->caps.num_pds - 1,
hr_dev->caps.reserved_pds, 0);
}
void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev) ida_init(&pd_ida->ida);
{ pd_ida->max = hr_dev->caps.num_pds - 1;
hns_roce_bitmap_cleanup(&hr_dev->pd_bitmap); pd_ida->min = hr_dev->caps.reserved_pds;
} }
int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{ {
struct ib_device *ib_dev = ibpd->device; struct ib_device *ib_dev = ibpd->device;
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
struct hns_roce_ida *pd_ida = &hr_dev->pd_ida;
struct hns_roce_pd *pd = to_hr_pd(ibpd); struct hns_roce_pd *pd = to_hr_pd(ibpd);
int ret; int ret = 0;
int id;
ret = hns_roce_pd_alloc(to_hr_dev(ib_dev), &pd->pdn); id = ida_alloc_range(&pd_ida->ida, pd_ida->min, pd_ida->max,
if (ret) { GFP_KERNEL);
ibdev_err(ib_dev, "failed to alloc pd, ret = %d.\n", ret); if (id < 0) {
return ret; ibdev_err(ib_dev, "failed to alloc pd, id = %d.\n", id);
return -ENOMEM;
} }
pd->pdn = (unsigned long)id;
if (udata) { if (udata) {
struct hns_roce_ib_alloc_pd_resp resp = {.pdn = pd->pdn}; struct hns_roce_ib_alloc_pd_resp resp = {.pdn = pd->pdn};
...@@ -74,7 +66,7 @@ int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) ...@@ -74,7 +66,7 @@ int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
ret = ib_copy_to_udata(udata, &resp, ret = ib_copy_to_udata(udata, &resp,
min(udata->outlen, sizeof(resp))); min(udata->outlen, sizeof(resp)));
if (ret) { if (ret) {
hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn); ida_free(&pd_ida->ida, id);
ibdev_err(ib_dev, "failed to copy to udata, ret = %d\n", ret); ibdev_err(ib_dev, "failed to copy to udata, ret = %d\n", ret);
} }
} }
...@@ -84,7 +76,10 @@ int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) ...@@ -84,7 +76,10 @@ int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{ {
hns_roce_pd_free(to_hr_dev(pd->device), to_hr_pd(pd)->pdn); struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
ida_free(&hr_dev->pd_ida.ida, (int)to_hr_pd(pd)->pdn);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment