Commit 2821c509 authored by Mike Marciniszyn's avatar Mike Marciniszyn Committed by Doug Ledford

IB/rdmavt: Use new driver specific post send table

Change rvt_post_one_wr to use the new table mechanism for
post send.

Validate that each low level driver specifies the table.
Reviewed-by: default avatarJianxin Xiong <jianxin.xiong@intel.com>
Reviewed-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 9ec4faa3
...@@ -1535,6 +1535,7 @@ static int rvt_post_one_wr(struct rvt_qp *qp, ...@@ -1535,6 +1535,7 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
u8 log_pmtu; u8 log_pmtu;
int ret; int ret;
size_t cplen;
BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE)); BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE));
...@@ -1542,32 +1543,11 @@ static int rvt_post_one_wr(struct rvt_qp *qp, ...@@ -1542,32 +1543,11 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
if (unlikely(wr->num_sge > qp->s_max_sge)) if (unlikely(wr->num_sge > qp->s_max_sge))
return -EINVAL; return -EINVAL;
/* ret = rvt_qp_valid_operation(qp, rdi->post_parms, wr);
* Don't allow RDMA reads or atomic operations on UC or if (ret < 0)
* undefined operations. return ret;
* Make sure buffer is large enough to hold the result for atomics. cplen = ret;
*/
if (qp->ibqp.qp_type == IB_QPT_UC) {
if ((unsigned)wr->opcode >= IB_WR_RDMA_READ)
return -EINVAL;
} else if (qp->ibqp.qp_type != IB_QPT_RC) {
/* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
if (wr->opcode != IB_WR_SEND &&
wr->opcode != IB_WR_SEND_WITH_IMM)
return -EINVAL;
/* Check UD destination address PD */
if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
return -EINVAL;
} else if ((unsigned)wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) {
return -EINVAL;
} else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
(wr->num_sge == 0 ||
wr->sg_list[0].length < sizeof(u64) ||
wr->sg_list[0].addr & (sizeof(u64) - 1))) {
return -EINVAL;
} else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) {
return -EINVAL;
}
/* check for avail */ /* check for avail */
if (unlikely(!qp->s_avail)) { if (unlikely(!qp->s_avail)) {
qp->s_avail = qp_get_savail(qp); qp->s_avail = qp_get_savail(qp);
...@@ -1588,18 +1568,8 @@ static int rvt_post_one_wr(struct rvt_qp *qp, ...@@ -1588,18 +1568,8 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
pd = ibpd_to_rvtpd(qp->ibqp.pd); pd = ibpd_to_rvtpd(qp->ibqp.pd);
wqe = rvt_get_swqe_ptr(qp, qp->s_head); wqe = rvt_get_swqe_ptr(qp, qp->s_head);
if (qp->ibqp.qp_type != IB_QPT_UC && /* cplen has length from above */
qp->ibqp.qp_type != IB_QPT_RC) memcpy(&wqe->wr, wr, cplen);
memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr));
else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
wr->opcode == IB_WR_RDMA_WRITE ||
wr->opcode == IB_WR_RDMA_READ)
memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr));
else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr));
else
memcpy(&wqe->wr, wr, sizeof(wqe->wr));
wqe->length = 0; wqe->length = 0;
j = 0; j = 0;
......
...@@ -528,7 +528,8 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb) ...@@ -528,7 +528,8 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
post_send), post_send),
rvt_post_send)) rvt_post_send))
if (!rdi->driver_f.schedule_send || if (!rdi->driver_f.schedule_send ||
!rdi->driver_f.do_send) !rdi->driver_f.do_send ||
!rdi->post_parms)
return -EINVAL; return -EINVAL;
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment