Commit 5c171cbe authored by Israel Rukshin's avatar Israel Rukshin Committed by Jason Gunthorpe

RDMA/mlx5: Remove unused IB_WR_REG_SIG_MR code

IB_WR_REG_SIG_MR is not needed after IB_WR_REG_MR_INTEGRITY
was used.
Signed-off-by: default avatarIsrael Rukshin <israelr@mellanox.com>
Reviewed-by: default avatarMax Gurtovoy <maxg@mellanox.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent e9a53e73
...@@ -1760,8 +1760,7 @@ static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd, ...@@ -1760,8 +1760,7 @@ static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd,
goto err_free_in; goto err_free_in;
mr->desc_size = sizeof(struct mlx5_klm); mr->desc_size = sizeof(struct mlx5_klm);
mr->max_descs = ndescs; mr->max_descs = ndescs;
} else if (mr_type == IB_MR_TYPE_SIGNATURE || } else if (mr_type == IB_MR_TYPE_INTEGRITY) {
mr_type == IB_MR_TYPE_INTEGRITY) {
u32 psv_index[2]; u32 psv_index[2];
MLX5_SET(mkc, mkc, bsf_en, 1); MLX5_SET(mkc, mkc, bsf_en, 1);
...@@ -1787,14 +1786,12 @@ static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd, ...@@ -1787,14 +1786,12 @@ static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd,
mr->sig->sig_err_exists = false; mr->sig->sig_err_exists = false;
/* Next UMR, Arm SIGERR */ /* Next UMR, Arm SIGERR */
++mr->sig->sigerr_count; ++mr->sig->sigerr_count;
if (mr_type == IB_MR_TYPE_INTEGRITY) {
mr->pi_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, mr->pi_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg,
max_num_meta_sg); max_num_meta_sg);
if (IS_ERR(mr->pi_mr)) { if (IS_ERR(mr->pi_mr)) {
err = PTR_ERR(mr->pi_mr); err = PTR_ERR(mr->pi_mr);
goto err_destroy_psv; goto err_destroy_psv;
} }
}
} else { } else {
mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type); mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
err = -EINVAL; err = -EINVAL;
......
...@@ -4557,20 +4557,6 @@ static int set_sig_data_segment(const struct ib_send_wr *send_wr, ...@@ -4557,20 +4557,6 @@ static int set_sig_data_segment(const struct ib_send_wr *send_wr,
bool prot = false; bool prot = false;
int ret; int ret;
int wqe_size; int wqe_size;
if (send_wr->opcode == IB_WR_REG_SIG_MR) {
const struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
data_len = wr->wr.sg_list->length;
data_key = wr->wr.sg_list->lkey;
data_va = wr->wr.sg_list->addr;
if (wr->prot) {
prot_len = wr->prot->length;
prot_key = wr->prot->lkey;
prot_va = wr->prot->addr;
prot = true;
}
} else {
struct mlx5_ib_mr *mr = to_mmr(sig_mr); struct mlx5_ib_mr *mr = to_mmr(sig_mr);
struct mlx5_ib_mr *pi_mr = mr->pi_mr; struct mlx5_ib_mr *pi_mr = mr->pi_mr;
...@@ -4583,7 +4569,6 @@ static int set_sig_data_segment(const struct ib_send_wr *send_wr, ...@@ -4583,7 +4569,6 @@ static int set_sig_data_segment(const struct ib_send_wr *send_wr,
prot_va = pi_mr->ibmr.iova + data_len; prot_va = pi_mr->ibmr.iova + data_len;
prot = true; prot = true;
} }
}
if (!prot || (data_key == prot_key && data_va == prot_va && if (!prot || (data_key == prot_key && data_va == prot_va &&
data_len == prot_len)) { data_len == prot_len)) {
...@@ -4748,57 +4733,6 @@ static int set_pi_umr_wr(const struct ib_send_wr *send_wr, ...@@ -4748,57 +4733,6 @@ static int set_pi_umr_wr(const struct ib_send_wr *send_wr,
return 0; return 0;
} }
static int set_sig_umr_wr(const struct ib_send_wr *send_wr,
struct mlx5_ib_qp *qp, void **seg, int *size,
void **cur_edge)
{
const struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr);
u32 pdn = get_pd(qp)->pdn;
u32 xlt_size;
int region_len, ret;
if (unlikely(wr->wr.num_sge != 1) ||
unlikely(wr->access_flags & IB_ACCESS_REMOTE_ATOMIC) ||
unlikely(!sig_mr->sig) || unlikely(!qp->ibqp.integrity_en) ||
unlikely(!sig_mr->sig->sig_status_checked))
return -EINVAL;
/* length of the protected region, data + protection */
region_len = wr->wr.sg_list->length;
if (wr->prot &&
(wr->prot->lkey != wr->wr.sg_list->lkey ||
wr->prot->addr != wr->wr.sg_list->addr ||
wr->prot->length != wr->wr.sg_list->length))
region_len += wr->prot->length;
/**
* KLM octoword size - if protection was provided
* then we use strided block format (3 octowords),
* else we use single KLM (1 octoword)
**/
xlt_size = wr->prot ? 0x30 : sizeof(struct mlx5_klm);
set_sig_umr_segment(*seg, xlt_size);
*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
set_sig_mkey_segment(*seg, wr->sig_mr, wr->access_flags, xlt_size,
region_len, pdn);
*seg += sizeof(struct mlx5_mkey_seg);
*size += sizeof(struct mlx5_mkey_seg) / 16;
handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
ret = set_sig_data_segment(send_wr, wr->sig_mr, wr->sig_attrs, qp, seg,
size, cur_edge);
if (ret)
return ret;
sig_mr->sig->sig_status_checked = false;
return 0;
}
static int set_psv_wr(struct ib_sig_domain *domain, static int set_psv_wr(struct ib_sig_domain *domain,
u32 psv_idx, void **seg, int *size) u32 psv_idx, void **seg, int *size)
{ {
...@@ -5187,74 +5121,6 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, ...@@ -5187,74 +5121,6 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
num_sge = 0; num_sge = 0;
goto skip_psv; goto skip_psv;
case IB_WR_REG_SIG_MR:
qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR;
mr = to_mmr(sig_handover_wr(wr)->sig_mr);
ctrl->imm = cpu_to_be32(mr->ibmr.rkey);
err = set_sig_umr_wr(wr, qp, &seg, &size,
&cur_edge);
if (err) {
mlx5_ib_warn(dev, "\n");
*bad_wr = wr;
goto out;
}
finish_wqe(qp, ctrl, seg, size, cur_edge, idx,
wr->wr_id, nreq, fence,
MLX5_OPCODE_UMR);
/*
* SET_PSV WQEs are not signaled and solicited
* on error
*/
err = __begin_wqe(qp, &seg, &ctrl, wr, &idx,
&size, &cur_edge, nreq, false,
true);
if (err) {
mlx5_ib_warn(dev, "\n");
err = -ENOMEM;
*bad_wr = wr;
goto out;
}
err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->mem,
mr->sig->psv_memory.psv_idx, &seg,
&size);
if (err) {
mlx5_ib_warn(dev, "\n");
*bad_wr = wr;
goto out;
}
finish_wqe(qp, ctrl, seg, size, cur_edge, idx,
wr->wr_id, nreq, fence,
MLX5_OPCODE_SET_PSV);
err = __begin_wqe(qp, &seg, &ctrl, wr, &idx,
&size, &cur_edge, nreq, false,
true);
if (err) {
mlx5_ib_warn(dev, "\n");
err = -ENOMEM;
*bad_wr = wr;
goto out;
}
err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire,
mr->sig->psv_wire.psv_idx, &seg,
&size);
if (err) {
mlx5_ib_warn(dev, "\n");
*bad_wr = wr;
goto out;
}
finish_wqe(qp, ctrl, seg, size, cur_edge, idx,
wr->wr_id, nreq, fence,
MLX5_OPCODE_SET_PSV);
qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
num_sge = 0;
goto skip_psv;
default: default:
break; break;
} }
......
...@@ -456,7 +456,7 @@ static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op) ...@@ -456,7 +456,7 @@ static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op)
return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP; return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP;
case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD: case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD; return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD;
case IB_WR_REG_SIG_MR: case IB_WR_REG_MR_INTEGRITY:
return PVRDMA_WR_REG_SIG_MR; return PVRDMA_WR_REG_SIG_MR;
default: default:
return PVRDMA_WR_ERROR; return PVRDMA_WR_ERROR;
......
...@@ -776,9 +776,6 @@ __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate); ...@@ -776,9 +776,6 @@ __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
* enum ib_mr_type - memory region type * enum ib_mr_type - memory region type
* @IB_MR_TYPE_MEM_REG: memory region that is used for * @IB_MR_TYPE_MEM_REG: memory region that is used for
* normal registration * normal registration
* @IB_MR_TYPE_SIGNATURE: memory region that is used for
* signature operations (data-integrity
* capable regions)
* @IB_MR_TYPE_SG_GAPS: memory region that is capable to * @IB_MR_TYPE_SG_GAPS: memory region that is capable to
* register any arbitrary sg lists (without * register any arbitrary sg lists (without
* the normal mr constraints - see * the normal mr constraints - see
...@@ -794,7 +791,6 @@ __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate); ...@@ -794,7 +791,6 @@ __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
*/ */
enum ib_mr_type { enum ib_mr_type {
IB_MR_TYPE_MEM_REG, IB_MR_TYPE_MEM_REG,
IB_MR_TYPE_SIGNATURE,
IB_MR_TYPE_SG_GAPS, IB_MR_TYPE_SG_GAPS,
IB_MR_TYPE_DM, IB_MR_TYPE_DM,
IB_MR_TYPE_USER, IB_MR_TYPE_USER,
...@@ -1235,7 +1231,6 @@ enum ib_wr_opcode { ...@@ -1235,7 +1231,6 @@ enum ib_wr_opcode {
/* These are kernel only and can not be issued by userspace */ /* These are kernel only and can not be issued by userspace */
IB_WR_REG_MR = 0x20, IB_WR_REG_MR = 0x20,
IB_WR_REG_SIG_MR,
IB_WR_REG_MR_INTEGRITY, IB_WR_REG_MR_INTEGRITY,
/* reserve values for low level drivers' internal use. /* reserve values for low level drivers' internal use.
...@@ -1346,20 +1341,6 @@ static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr) ...@@ -1346,20 +1341,6 @@ static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
return container_of(wr, struct ib_reg_wr, wr); return container_of(wr, struct ib_reg_wr, wr);
} }
struct ib_sig_handover_wr {
struct ib_send_wr wr;
struct ib_sig_attrs *sig_attrs;
struct ib_mr *sig_mr;
int access_flags;
struct ib_sge *prot;
};
static inline const struct ib_sig_handover_wr *
sig_handover_wr(const struct ib_send_wr *wr)
{
return container_of(wr, struct ib_sig_handover_wr, wr);
}
struct ib_recv_wr { struct ib_recv_wr {
struct ib_recv_wr *next; struct ib_recv_wr *next;
union { union {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment