Commit 711195e5 authored by Lijun Ou's avatar Lijun Ou Committed by Jason Gunthorpe

RDMA/hns: Reserve one sge in order to avoid local length error

When rq/srq sge length is smaller than sq sge length, it will produce a
local length error and may cause the bus to hang. Therefore, for rq wqe
and srq wqe, one reserved sge pointing to a reserved mr is used to avoid
this error.

Link: https://lore.kernel.org/r/1588931159-56875-10-git-send-email-liweihang@huawei.comSigned-off-by: default avatarLijun Ou <oulijun@huawei.com>
Signed-off-by: default avatarWeihang Li <liweihang@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 9581a356
...@@ -66,6 +66,8 @@ ...@@ -66,6 +66,8 @@
#define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2 #define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2
#define HNS_ROCE_MIN_CQE_CNT 16 #define HNS_ROCE_MIN_CQE_CNT 16
#define HNS_ROCE_RESERVED_SGE 1
#define HNS_ROCE_MAX_IRQ_NUM 128 #define HNS_ROCE_MAX_IRQ_NUM 128
#define HNS_ROCE_SGE_IN_WQE 2 #define HNS_ROCE_SGE_IN_WQE 2
......
...@@ -629,7 +629,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, ...@@ -629,7 +629,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1); wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) { if (unlikely(wr->num_sge >= hr_qp->rq.max_gs)) {
ibdev_err(ibdev, "rq:num_sge=%d >= qp->sq.max_gs=%d\n", ibdev_err(ibdev, "rq:num_sge=%d >= qp->sq.max_gs=%d\n",
wr->num_sge, hr_qp->rq.max_gs); wr->num_sge, hr_qp->rq.max_gs);
ret = -EINVAL; ret = -EINVAL;
...@@ -649,6 +649,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, ...@@ -649,6 +649,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
if (i < hr_qp->rq.max_gs) { if (i < hr_qp->rq.max_gs) {
dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY); dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
dseg->addr = 0; dseg->addr = 0;
dseg->len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
} }
/* rq support inline data */ /* rq support inline data */
...@@ -782,8 +783,8 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq, ...@@ -782,8 +783,8 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
} }
if (i < srq->max_gs) { if (i < srq->max_gs) {
dseg[i].len = 0; dseg[i].len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
dseg[i].lkey = cpu_to_le32(0x100); dseg[i].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
dseg[i].addr = 0; dseg[i].addr = 0;
} }
...@@ -5045,7 +5046,7 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) ...@@ -5045,7 +5046,7 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
attr->srq_limit = limit_wl; attr->srq_limit = limit_wl;
attr->max_wr = srq->wqe_cnt - 1; attr->max_wr = srq->wqe_cnt - 1;
attr->max_sge = srq->max_gs; attr->max_sge = srq->max_gs - HNS_ROCE_RESERVED_SGE;
out: out:
hns_roce_free_cmd_mailbox(hr_dev, mailbox); hns_roce_free_cmd_mailbox(hr_dev, mailbox);
......
...@@ -92,7 +92,9 @@ ...@@ -92,7 +92,9 @@
#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE #define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE
#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000 #define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
#define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2 #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
#define HNS_ROCE_INVALID_LKEY 0x100 #define HNS_ROCE_INVALID_LKEY 0x0
#define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000
#define HNS_ROCE_CMQ_TX_TIMEOUT 30000 #define HNS_ROCE_CMQ_TX_TIMEOUT 30000
#define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2 #define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2
#define HNS_ROCE_V2_RSV_QPS 8 #define HNS_ROCE_V2_RSV_QPS 8
......
...@@ -386,7 +386,8 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, ...@@ -386,7 +386,8 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
return -EINVAL; return -EINVAL;
} }
hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge)); hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) +
HNS_ROCE_RESERVED_SGE);
if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE) if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE)
hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz); hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
...@@ -401,7 +402,7 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, ...@@ -401,7 +402,7 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
hr_qp->rq_inl_buf.wqe_cnt = 0; hr_qp->rq_inl_buf.wqe_cnt = 0;
cap->max_recv_wr = cnt; cap->max_recv_wr = cnt;
cap->max_recv_sge = hr_qp->rq.max_gs; cap->max_recv_sge = hr_qp->rq.max_gs - HNS_ROCE_RESERVED_SGE;
return 0; return 0;
} }
......
...@@ -297,7 +297,7 @@ int hns_roce_create_srq(struct ib_srq *ib_srq, ...@@ -297,7 +297,7 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
spin_lock_init(&srq->lock); spin_lock_init(&srq->lock);
srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1); srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1);
srq->max_gs = init_attr->attr.max_sge; srq->max_gs = init_attr->attr.max_sge + HNS_ROCE_RESERVED_SGE;
if (udata) { if (udata) {
ret = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); ret = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment