Commit 9ae2a37e authored by Xi Wang's avatar Xi Wang Committed by Jason Gunthorpe

RDMA/hns: Refactor post recv flow

Refactor post recv flow by removing unnecessary checking and removing
duplicated code.

Link: https://lore.kernel.org/r/1611997090-48820-10-git-send-email-liweihang@huawei.comSigned-off-by: default avatarXi Wang <wangxi11@huawei.com>
Signed-off-by: default avatarWeihang Li <liweihang@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 3f31c412
...@@ -48,7 +48,7 @@ ...@@ -48,7 +48,7 @@
#include "hns_roce_hem.h" #include "hns_roce_hem.h"
#include "hns_roce_hw_v2.h" #include "hns_roce_hw_v2.h"
static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg, static inline void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
struct ib_sge *sg) struct ib_sge *sg)
{ {
dseg->lkey = cpu_to_le32(sg->lkey); dseg->lkey = cpu_to_le32(sg->lkey);
...@@ -729,6 +729,40 @@ static int check_recv_valid(struct hns_roce_dev *hr_dev, ...@@ -729,6 +729,40 @@ static int check_recv_valid(struct hns_roce_dev *hr_dev,
return 0; return 0;
} }
static void fill_rq_wqe(struct hns_roce_qp *hr_qp, const struct ib_recv_wr *wr,
u32 wqe_idx)
{
struct hns_roce_v2_wqe_data_seg *dseg;
struct hns_roce_rinl_sge *sge_list;
void *wqe = NULL;
int i;
wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
for (i = 0; i < wr->num_sge; i++) {
if (!wr->sg_list[i].length)
continue;
set_data_seg_v2(dseg, wr->sg_list + i);
dseg++;
}
if (hr_qp->rq.rsv_sge) {
dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
dseg->addr = 0;
dseg->len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
}
/* rq support inline data */
if (hr_qp->rq_inl_buf.wqe_cnt) {
sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt = (u32)wr->num_sge;
for (i = 0; i < wr->num_sge; i++) {
sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr;
sge_list[i].len = wr->sg_list[i].length;
}
}
}
static int hns_roce_v2_post_recv(struct ib_qp *ibqp, static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
const struct ib_recv_wr *wr, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr) const struct ib_recv_wr **bad_wr)
...@@ -736,15 +770,9 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, ...@@ -736,15 +770,9 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct ib_device *ibdev = &hr_dev->ib_dev; struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_v2_wqe_data_seg *dseg; u32 wqe_idx, nreq, max_sge;
struct hns_roce_rinl_sge *sge_list;
unsigned long flags; unsigned long flags;
void *wqe = NULL;
u32 wqe_idx;
u32 max_sge;
int nreq;
int ret; int ret;
int i;
spin_lock_irqsave(&hr_qp->rq.lock, flags); spin_lock_irqsave(&hr_qp->rq.lock, flags);
...@@ -764,8 +792,6 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, ...@@ -764,8 +792,6 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
goto out; goto out;
} }
wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
if (unlikely(wr->num_sge > max_sge)) { if (unlikely(wr->num_sge > max_sge)) {
ibdev_err(ibdev, "num_sge = %d >= max_sge = %u.\n", ibdev_err(ibdev, "num_sge = %d >= max_sge = %u.\n",
wr->num_sge, max_sge); wr->num_sge, max_sge);
...@@ -774,32 +800,8 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, ...@@ -774,32 +800,8 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
goto out; goto out;
} }
wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx); wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
dseg = (struct hns_roce_v2_wqe_data_seg *)wqe; fill_rq_wqe(hr_qp, wr, wqe_idx);
for (i = 0; i < wr->num_sge; i++) {
if (!wr->sg_list[i].length)
continue;
set_data_seg_v2(dseg, wr->sg_list + i);
dseg++;
}
if (hr_qp->rq.rsv_sge) {
dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
dseg->addr = 0;
dseg->len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
}
/* rq support inline data */
if (hr_qp->rq_inl_buf.wqe_cnt) {
sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt =
(u32)wr->num_sge;
for (i = 0; i < wr->num_sge; i++) {
sge_list[i].addr =
(void *)(u64)wr->sg_list[i].addr;
sge_list[i].len = wr->sg_list[i].length;
}
}
hr_qp->rq.wrid[wqe_idx] = wr->wr_id; hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
} }
...@@ -928,9 +930,8 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq, ...@@ -928,9 +930,8 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
dseg = (struct hns_roce_v2_wqe_data_seg *)wqe; dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
for (i = 0; i < wr->num_sge; ++i) { for (i = 0; i < wr->num_sge; ++i) {
dseg[i].len = cpu_to_le32(wr->sg_list[i].length); set_data_seg_v2(dseg, wr->sg_list + i);
dseg[i].lkey = cpu_to_le32(wr->sg_list[i].lkey); dseg++;
dseg[i].addr = cpu_to_le64(wr->sg_list[i].addr);
} }
if (srq->rsv_sge) { if (srq->rsv_sge) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment