Commit 606bf89e authored by Lijun Ou's avatar Lijun Ou Committed by Jason Gunthorpe

RDMA/hns: Refactor for hns_roce_v2_modify_qp function

Move some lines which exist hns_roce_v2_modify_qp function into a new
function. The code refactored mainly includes some absolute fields of qp
context and some optional fields of qp context.

Link: https://lore.kernel.org/r/1562593285-8037-4-git-send-email-oulijun@huawei.comSigned-off-by: default avatarLijun Ou <oulijun@huawei.com>
Signed-off-by: default avatarXi Wang <wangxi11@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 2a2f1887
......@@ -3974,88 +3974,24 @@ static inline bool hns_roce_v2_check_qp_stat(enum ib_qp_state cur_state,
}
static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
static int hns_roce_v2_set_path(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask, enum ib_qp_state cur_state,
enum ib_qp_state new_state)
int attr_mask,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct hns_roce_v2_qp_context *context;
struct hns_roce_v2_qp_context *qpc_mask;
struct device *dev = hr_dev->dev;
int ret = -EINVAL;
context = kcalloc(2, sizeof(*context), GFP_ATOMIC);
if (!context)
return -ENOMEM;
qpc_mask = context + 1;
/*
* In v2 engine, software pass context and context mask to hardware
* when modifying qp. If software need modify some fields in context,
* we should set all bits of the relevant fields in context mask to
* 0 at the same time, else set them to 0x1.
*/
memset(qpc_mask, 0xff, sizeof(*qpc_mask));
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
memset(qpc_mask, 0, sizeof(*qpc_mask));
modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
modify_qp_init_to_init(ibqp, attr, attr_mask, context,
qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
qpc_mask);
if (ret)
goto out;
} else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
qpc_mask);
if (ret)
goto out;
} else if (hns_roce_v2_check_qp_stat(cur_state, new_state)) {
/* Nothing */
;
} else {
dev_err(dev, "Illegal state for QP!\n");
ret = -EINVAL;
goto out;
}
/* When QP state is err, SQ and RQ WQE should be flushed */
if (new_state == IB_QPS_ERR) {
roce_set_field(context->byte_160_sq_ci_pi,
V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
hr_qp->sq.head);
roce_set_field(qpc_mask->byte_160_sq_ci_pi,
V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
if (!ibqp->srq) {
roce_set_field(context->byte_84_rq_ci_pi,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
hr_qp->rq.head);
roce_set_field(qpc_mask->byte_84_rq_ci_pi,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
}
}
if (attr_mask & IB_QP_AV) {
const struct ib_global_route *grh =
rdma_ah_read_grh(&attr->ah_attr);
const struct ib_gid_attr *gid_attr = NULL;
int is_roce_protocol;
bool is_udp = false;
u16 vlan = 0xffff;
u8 ib_port;
u8 hr_port;
int ret;
ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num :
hr_qp->port + 1;
ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : hr_qp->port + 1;
hr_port = ib_port - 1;
is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
......@@ -4064,7 +4000,11 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
gid_attr = attr->ah_attr.grh.sgid_attr;
ret = rdma_read_gid_l2_fields(gid_attr, &vlan, NULL);
if (ret)
goto out;
return ret;
if (gid_attr)
is_udp = (gid_attr->gid_type ==
IB_GID_TYPE_ROCE_UDP_ENCAP);
}
if (vlan < VLAN_CFI_MASK) {
......@@ -4078,61 +4018,47 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0);
}
roce_set_field(context->byte_24_mtu_tc,
V2_QPC_BYTE_24_VLAN_ID_M,
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
V2_QPC_BYTE_24_VLAN_ID_S, vlan);
roce_set_field(qpc_mask->byte_24_mtu_tc,
V2_QPC_BYTE_24_VLAN_ID_M,
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
V2_QPC_BYTE_24_VLAN_ID_S, 0);
if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
dev_err(hr_dev->dev,
"sgid_index(%u) too large. max is %d\n",
grh->sgid_index,
hr_dev->caps.gid_table_len[hr_port]);
ret = -EINVAL;
goto out;
dev_err(hr_dev->dev, "sgid_index(%u) too large. max is %d\n",
grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]);
return -EINVAL;
}
if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n");
ret = -EINVAL;
goto out;
return -EINVAL;
}
roce_set_field(context->byte_52_udpspn_dmac,
V2_QPC_BYTE_52_UDPSPN_M, V2_QPC_BYTE_52_UDPSPN_S,
(gid_attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) ?
0 : 0x12b7);
roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
V2_QPC_BYTE_52_UDPSPN_S,
is_udp ? 0x12b7 : 0);
roce_set_field(qpc_mask->byte_52_udpspn_dmac,
V2_QPC_BYTE_52_UDPSPN_M,
roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
V2_QPC_BYTE_52_UDPSPN_S, 0);
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGID_IDX_M,
V2_QPC_BYTE_20_SGID_IDX_S, grh->sgid_index);
V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S,
grh->sgid_index);
roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGID_IDX_M,
V2_QPC_BYTE_20_SGID_IDX_S, 0);
V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0);
roce_set_field(context->byte_24_mtu_tc,
V2_QPC_BYTE_24_HOP_LIMIT_M,
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
roce_set_field(qpc_mask->byte_24_mtu_tc,
V2_QPC_BYTE_24_HOP_LIMIT_M,
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
if (hr_dev->pci_dev->revision == 0x21 &&
gid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
roce_set_field(context->byte_24_mtu_tc,
V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S,
grh->traffic_class >> 2);
if (hr_dev->pci_dev->revision == 0x21 && is_udp)
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
V2_QPC_BYTE_24_TC_S, grh->traffic_class >> 2);
else
roce_set_field(context->byte_24_mtu_tc,
V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S,
grh->traffic_class);
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
V2_QPC_BYTE_24_TC_S, grh->traffic_class);
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
V2_QPC_BYTE_24_TC_S, 0);
roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
......@@ -4142,11 +4068,70 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
V2_QPC_BYTE_28_SL_S,
rdma_ah_get_sl(&attr->ah_attr));
V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
V2_QPC_BYTE_28_SL_S, 0);
hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
return 0;
}
static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask,
enum ib_qp_state cur_state,
enum ib_qp_state new_state,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
int ret = 0;
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
memset(qpc_mask, 0, sizeof(*qpc_mask));
modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
modify_qp_init_to_init(ibqp, attr, attr_mask, context,
qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
qpc_mask);
if (ret)
goto out;
} else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
qpc_mask);
if (ret)
goto out;
} else if (hns_roce_v2_check_qp_stat(cur_state, new_state)) {
/* Nothing */
;
} else {
dev_err(hr_dev->dev, "Illegal state for QP!\n");
ret = -EINVAL;
goto out;
}
out:
return ret;
}
static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
int ret = 0;
if (attr_mask & IB_QP_AV) {
ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context,
qpc_mask);
if (ret)
return ret;
}
if (attr_mask & IB_QP_TIMEOUT) {
......@@ -4158,7 +4143,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
0);
} else {
dev_warn(dev, "Local ACK timeout shall be 0 to 30.\n");
dev_warn(hr_dev->dev,
"Local ACK timeout shall be 0 to 30.\n");
}
}
......@@ -4196,6 +4182,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
V2_QPC_BYTE_244_RNR_CNT_S, 0);
}
/* RC&UC&UD required attr */
if (attr_mask & IB_QP_SQ_PSN) {
roce_set_field(context->byte_172_sq_psn,
V2_QPC_BYTE_172_SQ_CUR_PSN_M,
......@@ -4295,6 +4282,83 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
hr_qp->qkey = attr->qkey;
}
return ret;
}
static void hns_roce_v2_record_opt_fields(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
if (attr_mask & IB_QP_ACCESS_FLAGS)
hr_qp->atomic_rd_en = attr->qp_access_flags;
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
hr_qp->resp_depth = attr->max_dest_rd_atomic;
if (attr_mask & IB_QP_PORT) {
hr_qp->port = attr->port_num - 1;
hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
}
}
static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask, enum ib_qp_state cur_state,
enum ib_qp_state new_state)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct hns_roce_v2_qp_context *context;
struct hns_roce_v2_qp_context *qpc_mask;
struct device *dev = hr_dev->dev;
int ret = -EINVAL;
context = kcalloc(2, sizeof(*context), GFP_ATOMIC);
if (!context)
return -ENOMEM;
qpc_mask = context + 1;
/*
* In v2 engine, software pass context and context mask to hardware
* when modifying qp. If software need modify some fields in context,
* we should set all bits of the relevant fields in context mask to
* 0 at the same time, else set them to 0x1.
*/
memset(qpc_mask, 0xff, sizeof(*qpc_mask));
ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
new_state, context, qpc_mask);
if (ret)
goto out;
/* When QP state is err, SQ and RQ WQE should be flushed */
if (new_state == IB_QPS_ERR) {
roce_set_field(context->byte_160_sq_ci_pi,
V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
hr_qp->sq.head);
roce_set_field(qpc_mask->byte_160_sq_ci_pi,
V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
if (!ibqp->srq) {
roce_set_field(context->byte_84_rq_ci_pi,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
hr_qp->rq.head);
roce_set_field(qpc_mask->byte_84_rq_ci_pi,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
}
}
/* Configure the optional fields */
ret = hns_roce_v2_set_opt_fields(ibqp, attr, attr_mask, context,
qpc_mask);
if (ret)
goto out;
roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S,
ibqp->srq ? 1 : 0);
roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
......@@ -4316,15 +4380,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
hr_qp->state = new_state;
if (attr_mask & IB_QP_ACCESS_FLAGS)
hr_qp->atomic_rd_en = attr->qp_access_flags;
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
hr_qp->resp_depth = attr->max_dest_rd_atomic;
if (attr_mask & IB_QP_PORT) {
hr_qp->port = attr->port_num - 1;
hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
}
hns_roce_v2_record_opt_fields(ibqp, attr, attr_mask);
if (new_state == IB_QPS_RESET && !ibqp->uobject) {
hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment