Commit f786eebb authored by Kalesh AP's avatar Kalesh AP Committed by Leon Romanovsky

RDMA/bnxt_re: Avoid an extra hwrm per MR creation

Firmware now have a new mr registration command where
both MR allocation and registration can be done in a
single hwrm command. Driver has to issue this new hwrm
command whenever the support flag is set. This reduces
the number of hwrm issued per MR creation and speed up
the MR creation.
Signed-off-by: default avatarKalesh AP <kalesh-anakkur.purayil@broadcom.com>
Signed-off-by: default avatarSelvin Xavier <selvin.xavier@broadcom.com>
Link: https://patch.msgid.link/1725256351-12751-4-git-send-email-selvin.xavier@broadcom.comSigned-off-by: default avatarLeon Romanovsky <leon@kernel.org>
parent b98d9697
...@@ -518,14 +518,18 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd) ...@@ -518,14 +518,18 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
mr->qplib_mr.pd = &pd->qplib_pd; mr->qplib_mr.pd = &pd->qplib_pd;
mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags); mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); if (!_is_alloc_mr_unified(rdev->dev_attr.dev_cap_flags)) {
if (rc) { rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n"); if (rc) {
goto fail; ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
} goto fail;
}
/* Register MR */ /* Register MR */
mr->ib_mr.lkey = mr->qplib_mr.lkey; mr->ib_mr.lkey = mr->qplib_mr.lkey;
} else {
mr->qplib_mr.flags = CMDQ_REGISTER_MR_FLAGS_ALLOC_MR;
}
mr->qplib_mr.va = (u64)(unsigned long)fence->va; mr->qplib_mr.va = (u64)(unsigned long)fence->va;
mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES; mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL, rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL,
...@@ -4101,14 +4105,18 @@ static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64 ...@@ -4101,14 +4105,18 @@ static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64
mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags); mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR; mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); if (!_is_alloc_mr_unified(rdev->dev_attr.dev_cap_flags)) {
if (rc) { rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc); if (rc) {
rc = -EIO; ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc);
goto free_mr; rc = -EIO;
goto free_mr;
}
/* The fixed portion of the rkey is the same as the lkey */
mr->ib_mr.rkey = mr->qplib_mr.rkey;
} else {
mr->qplib_mr.flags = CMDQ_REGISTER_MR_FLAGS_ALLOC_MR;
} }
/* The fixed portion of the rkey is the same as the lkey */
mr->ib_mr.rkey = mr->qplib_mr.rkey;
mr->ib_umem = umem; mr->ib_umem = umem;
mr->qplib_mr.va = virt_addr; mr->qplib_mr.va = virt_addr;
mr->qplib_mr.total_size = length; mr->qplib_mr.total_size = length;
......
...@@ -565,4 +565,9 @@ static inline u8 bnxt_qplib_dbr_pacing_en(struct bnxt_qplib_chip_ctx *cctx) ...@@ -565,4 +565,9 @@ static inline u8 bnxt_qplib_dbr_pacing_en(struct bnxt_qplib_chip_ctx *cctx)
return cctx->modes.dbr_pacing; return cctx->modes.dbr_pacing;
} }
static inline bool _is_alloc_mr_unified(u16 dev_cap_flags)
{
return dev_cap_flags & CREQ_QUERY_FUNC_RESP_SB_MR_REGISTER_ALLOC;
}
#endif /* __BNXT_QPLIB_RES_H__ */ #endif /* __BNXT_QPLIB_RES_H__ */
...@@ -659,6 +659,9 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, ...@@ -659,6 +659,9 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
req.access = (mr->access_flags & 0xFFFF); req.access = (mr->access_flags & 0xFFFF);
req.va = cpu_to_le64(mr->va); req.va = cpu_to_le64(mr->va);
req.key = cpu_to_le32(mr->lkey); req.key = cpu_to_le32(mr->lkey);
if (_is_alloc_mr_unified(res->dattr->dev_cap_flags))
req.key = cpu_to_le32(mr->pd->id);
req.flags = cpu_to_le16(mr->flags);
req.mr_size = cpu_to_le64(mr->total_size); req.mr_size = cpu_to_le64(mr->total_size);
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
...@@ -667,6 +670,11 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, ...@@ -667,6 +670,11 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
if (rc) if (rc)
goto fail; goto fail;
if (_is_alloc_mr_unified(res->dattr->dev_cap_flags)) {
mr->lkey = le32_to_cpu(resp.xid);
mr->rkey = mr->lkey;
}
return 0; return 0;
fail: fail:
......
...@@ -117,6 +117,7 @@ struct bnxt_qplib_mrw { ...@@ -117,6 +117,7 @@ struct bnxt_qplib_mrw {
u64 va; u64 va;
u64 total_size; u64 total_size;
u32 npages; u32 npages;
u16 flags;
u64 mr_handle; u64 mr_handle;
struct bnxt_qplib_hwq hwq; struct bnxt_qplib_hwq hwq;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment