Commit 12542f1d authored by Weihang Li's avatar Weihang Li Committed by Jason Gunthorpe

RDMA/hns: Refactor process about opcode in post_send()

According to the IB specifications, the verbs should return an immediate
error when the users set an unsupported opcode. Furthermore, refactor codes
about opcode in process of post_send to make the difference between opcodes
clearer.

Link: https://lore.kernel.org/r/1600509802-44382-2-git-send-email-liweihang@huawei.comSigned-off-by: default avatarWeihang Li <liweihang@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 3cb2c996
...@@ -292,6 +292,33 @@ static unsigned int calc_wr_sge_num(const struct ib_send_wr *wr, ...@@ -292,6 +292,33 @@ static unsigned int calc_wr_sge_num(const struct ib_send_wr *wr,
return valid_num; return valid_num;
} }
static __le32 get_immtdata(const struct ib_send_wr *wr)
{
switch (wr->opcode) {
case IB_WR_SEND_WITH_IMM:
case IB_WR_RDMA_WRITE_WITH_IMM:
return cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
default:
return 0;
}
}
static int set_ud_opcode(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
const struct ib_send_wr *wr)
{
u32 ib_op = wr->opcode;
if (ib_op != IB_WR_SEND && ib_op != IB_WR_SEND_WITH_IMM)
return -EINVAL;
ud_sq_wqe->immtdata = get_immtdata(wr);
roce_set_field(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
V2_UD_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op));
return 0;
}
static inline int set_ud_wqe(struct hns_roce_qp *qp, static inline int set_ud_wqe(struct hns_roce_qp *qp,
const struct ib_send_wr *wr, const struct ib_send_wr *wr,
void *wqe, unsigned int *sge_idx, void *wqe, unsigned int *sge_idx,
...@@ -305,10 +332,15 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp, ...@@ -305,10 +332,15 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
u32 msg_len = 0; u32 msg_len = 0;
bool loopback; bool loopback;
u8 *smac; u8 *smac;
int ret;
valid_num_sge = calc_wr_sge_num(wr, &msg_len); valid_num_sge = calc_wr_sge_num(wr, &msg_len);
memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe)); memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
ret = set_ud_opcode(ud_sq_wqe, wr);
if (WARN_ON(ret))
return ret;
roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M, roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]); V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M, roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
...@@ -329,23 +361,8 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp, ...@@ -329,23 +361,8 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
roce_set_bit(ud_sq_wqe->byte_40, roce_set_bit(ud_sq_wqe->byte_40,
V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback); V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback);
roce_set_field(ud_sq_wqe->byte_4,
V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_SEND);
ud_sq_wqe->msg_len = cpu_to_le32(msg_len); ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
switch (wr->opcode) {
case IB_WR_SEND_WITH_IMM:
case IB_WR_RDMA_WRITE_WITH_IMM:
ud_sq_wqe->immtdata = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
break;
default:
ud_sq_wqe->immtdata = 0;
break;
}
/* Set sig attr */ /* Set sig attr */
roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_CQE_S, roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_CQE_S,
(wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0); (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
...@@ -402,6 +419,46 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp, ...@@ -402,6 +419,46 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
return 0; return 0;
} }
static int set_rc_opcode(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
const struct ib_send_wr *wr)
{
u32 ib_op = wr->opcode;
rc_sq_wqe->immtdata = get_immtdata(wr);
switch (ib_op) {
case IB_WR_RDMA_READ:
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
break;
case IB_WR_SEND:
case IB_WR_SEND_WITH_IMM:
break;
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
rc_sq_wqe->rkey = cpu_to_le32(atomic_wr(wr)->rkey);
rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr);
break;
case IB_WR_REG_MR:
set_frmr_seg(rc_sq_wqe, reg_wr(wr));
break;
case IB_WR_LOCAL_INV:
roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
fallthrough;
case IB_WR_SEND_WITH_INV:
rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
break;
default:
return -EINVAL;
}
roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op));
return 0;
}
static inline int set_rc_wqe(struct hns_roce_qp *qp, static inline int set_rc_wqe(struct hns_roce_qp *qp,
const struct ib_send_wr *wr, const struct ib_send_wr *wr,
void *wqe, unsigned int *sge_idx, void *wqe, unsigned int *sge_idx,
...@@ -411,25 +468,16 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp, ...@@ -411,25 +468,16 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
unsigned int curr_idx = *sge_idx; unsigned int curr_idx = *sge_idx;
unsigned int valid_num_sge; unsigned int valid_num_sge;
u32 msg_len = 0; u32 msg_len = 0;
int ret = 0; int ret;
valid_num_sge = calc_wr_sge_num(wr, &msg_len); valid_num_sge = calc_wr_sge_num(wr, &msg_len);
memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe)); memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
rc_sq_wqe->msg_len = cpu_to_le32(msg_len); rc_sq_wqe->msg_len = cpu_to_le32(msg_len);
switch (wr->opcode) { ret = set_rc_opcode(rc_sq_wqe, wr);
case IB_WR_SEND_WITH_IMM: if (WARN_ON(ret))
case IB_WR_RDMA_WRITE_WITH_IMM: return ret;
rc_sq_wqe->immtdata = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
break;
case IB_WR_SEND_WITH_INV:
rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
break;
default:
rc_sq_wqe->immtdata = 0;
break;
}
roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FENCE_S, roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FENCE_S,
(wr->send_flags & IB_SEND_FENCE) ? 1 : 0); (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
...@@ -443,33 +491,6 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp, ...@@ -443,33 +491,6 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S, roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S,
owner_bit); owner_bit);
switch (wr->opcode) {
case IB_WR_RDMA_READ:
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
break;
case IB_WR_LOCAL_INV:
roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
break;
case IB_WR_REG_MR:
set_frmr_seg(rc_sq_wqe, reg_wr(wr));
break;
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
rc_sq_wqe->rkey = cpu_to_le32(atomic_wr(wr)->rkey);
rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr);
break;
default:
break;
}
roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
to_hr_opcode(wr->opcode));
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
set_atomic_seg(wr, rc_sq_wqe, valid_num_sge); set_atomic_seg(wr, rc_sq_wqe, valid_num_sge);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment