Commit b14c95be authored by Lang Cheng's avatar Lang Cheng Committed by Jason Gunthorpe

RDMA/hns: Cleanups of magic numbers

Some magic numbers are hard to understand, so replace them with macros or
add some comments for them.

Link: https://lore.kernel.org/r/20200126145504.9700-1-liweihang@huawei.comSigned-off-by: default avatarLang Cheng <chenglang@huawei.com>
Signed-off-by: default avatarYixian Liu <liuyixian@huawei.com>
Signed-off-by: default avatarWenpeng Liang <liangwenpeng@huawei.com>
Signed-off-by: default avatarYixing Liu <liuyixing1@huawei.com>
Signed-off-by: default avatarWeihang Li <liweihang@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 43fb5892
...@@ -881,7 +881,7 @@ struct hns_roce_caps { ...@@ -881,7 +881,7 @@ struct hns_roce_caps {
u32 cqc_timer_ba_pg_sz; u32 cqc_timer_ba_pg_sz;
u32 cqc_timer_buf_pg_sz; u32 cqc_timer_buf_pg_sz;
u32 cqc_timer_hop_num; u32 cqc_timer_hop_num;
u32 cqe_ba_pg_sz; u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */
u32 cqe_buf_pg_sz; u32 cqe_buf_pg_sz;
u32 cqe_hop_num; u32 cqe_hop_num;
u32 srqwqe_ba_pg_sz; u32 srqwqe_ba_pg_sz;
......
...@@ -1999,7 +1999,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) ...@@ -1999,7 +1999,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
return ret; return ret;
} }
if (hr_dev->pci_dev->revision == 0x21) { if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B) {
ret = hns_roce_query_pf_timer_resource(hr_dev); ret = hns_roce_query_pf_timer_resource(hr_dev);
if (ret) { if (ret) {
dev_err(hr_dev->dev, dev_err(hr_dev->dev,
...@@ -2016,7 +2016,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) ...@@ -2016,7 +2016,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
return ret; return ret;
} }
if (hr_dev->pci_dev->revision == 0x21) { if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B) {
ret = hns_roce_set_vf_switch_param(hr_dev, 0); ret = hns_roce_set_vf_switch_param(hr_dev, 0);
if (ret) { if (ret) {
dev_err(hr_dev->dev, dev_err(hr_dev->dev,
...@@ -2298,7 +2298,7 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev) ...@@ -2298,7 +2298,7 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
{ {
struct hns_roce_v2_priv *priv = hr_dev->priv; struct hns_roce_v2_priv *priv = hr_dev->priv;
if (hr_dev->pci_dev->revision == 0x21) if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B)
hns_roce_function_clear(hr_dev); hns_roce_function_clear(hr_dev);
hns_roce_free_link_table(hr_dev, &priv->tpq); hns_roce_free_link_table(hr_dev, &priv->tpq);
...@@ -2757,7 +2757,7 @@ static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index) ...@@ -2757,7 +2757,7 @@ static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index) static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
{ {
*hr_cq->set_ci_db = cons_index & 0xffffff; *hr_cq->set_ci_db = cons_index & V2_CQ_DB_PARAMETER_CONS_IDX_M;
} }
static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
...@@ -4475,7 +4475,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp, ...@@ -4475,7 +4475,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M, roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
V2_QPC_BYTE_24_HOP_LIMIT_S, 0); V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
if (hr_dev->pci_dev->revision == 0x21 && is_udp) if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B && is_udp)
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M, roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
V2_QPC_BYTE_24_TC_S, grh->traffic_class >> 2); V2_QPC_BYTE_24_TC_S, grh->traffic_class >> 2);
else else
......
...@@ -309,7 +309,7 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev, ...@@ -309,7 +309,7 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
max_cnt = max(1U, cap->max_recv_sge); max_cnt = max(1U, cap->max_recv_sge);
hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt); hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
if (hr_dev->caps.max_rq_sg <= 2) if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE)
hr_qp->rq.wqe_shift = hr_qp->rq.wqe_shift =
ilog2(hr_dev->caps.max_rq_desc_sz); ilog2(hr_dev->caps.max_rq_desc_sz);
else else
...@@ -370,16 +370,17 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, ...@@ -370,16 +370,17 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sq.wqe_shift = ucmd->log_sq_stride; hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
max_cnt = max(1U, cap->max_send_sge); max_cnt = max(1U, cap->max_send_sge);
if (hr_dev->caps.max_sq_sg <= 2) if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt); hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
else else
hr_qp->sq.max_gs = max_cnt; hr_qp->sq.max_gs = max_cnt;
if (hr_qp->sq.max_gs > 2) if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE)
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
(hr_qp->sq.max_gs - 2)); (hr_qp->sq.max_gs - 2));
if ((hr_qp->sq.max_gs > 2) && (hr_dev->pci_dev->revision == 0x20)) { if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE &&
hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_A) {
if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) { if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
dev_err(hr_dev->dev, dev_err(hr_dev->dev,
"The extended sge cnt error! sge_cnt=%d\n", "The extended sge cnt error! sge_cnt=%d\n",
...@@ -392,7 +393,7 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, ...@@ -392,7 +393,7 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
ex_sge_num = hr_qp->sge.sge_cnt; ex_sge_num = hr_qp->sge.sge_cnt;
/* Get buf size, SQ and RQ are aligned to page_szie */ /* Get buf size, SQ and RQ are aligned to page_szie */
if (hr_dev->caps.max_sq_sg <= 2) { if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
hr_qp->buff_size = round_up((hr_qp->rq.wqe_cnt << hr_qp->buff_size = round_up((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), PAGE_SIZE) + hr_qp->rq.wqe_shift), PAGE_SIZE) +
round_up((hr_qp->sq.wqe_cnt << round_up((hr_qp->sq.wqe_cnt <<
...@@ -528,13 +529,15 @@ static int set_extend_sge_param(struct hns_roce_dev *hr_dev, ...@@ -528,13 +529,15 @@ static int set_extend_sge_param(struct hns_roce_dev *hr_dev,
} }
/* ud sqwqe's sge use extend sge */ /* ud sqwqe's sge use extend sge */
if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) { if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 &&
hr_qp->ibqp.qp_type == IB_QPT_GSI) {
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
hr_qp->sq.max_gs); hr_qp->sq.max_gs);
hr_qp->sge.sge_shift = 4; hr_qp->sge.sge_shift = 4;
} }
if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) { if (hr_qp->sq.max_gs > 2 &&
hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_A) {
if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) { if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n", dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n",
hr_qp->sge.sge_cnt); hr_qp->sge.sge_cnt);
...@@ -577,7 +580,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, ...@@ -577,7 +580,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
/* Get data_seg numbers */ /* Get data_seg numbers */
max_cnt = max(1U, cap->max_send_sge); max_cnt = max(1U, cap->max_send_sge);
if (hr_dev->caps.max_sq_sg <= 2) if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt); hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
else else
hr_qp->sq.max_gs = max_cnt; hr_qp->sq.max_gs = max_cnt;
...@@ -593,7 +596,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, ...@@ -593,7 +596,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sq.offset = 0; hr_qp->sq.offset = 0;
size = round_up(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift, page_size); size = round_up(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift, page_size);
if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) { if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 && hr_qp->sge.sge_cnt) {
hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift), hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift),
(u32)hr_qp->sge.sge_cnt); (u32)hr_qp->sge.sge_cnt);
hr_qp->sge.offset = size; hr_qp->sge.offset = size;
...@@ -1078,7 +1081,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, ...@@ -1078,7 +1081,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
/* when hw version is v1, the sqpn is allocated */ /* when hw version is v1, the sqpn is allocated */
if (hr_dev->caps.max_sq_sg <= 2) if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS + hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
hr_dev->iboe.phy_port[hr_qp->port]; hr_dev->iboe.phy_port[hr_qp->port];
else else
......
...@@ -381,7 +381,8 @@ int hns_roce_create_srq(struct ib_srq *ib_srq, ...@@ -381,7 +381,8 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1); srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1);
srq->max_gs = init_attr->attr.max_sge; srq->max_gs = init_attr->attr.max_sge;
srq_desc_size = roundup_pow_of_two(max(16, 16 * srq->max_gs)); srq_desc_size = roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE,
HNS_ROCE_SGE_SIZE * srq->max_gs));
srq->wqe_shift = ilog2(srq_desc_size); srq->wqe_shift = ilog2(srq_desc_size);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment