Commit 0771da5a authored by Mike Marciniszyn's avatar Mike Marciniszyn Committed by Doug Ledford

IB/hfi1,IB/qib: Use new send completion helper

Convert cq completion returns in both rdmavt drivers
to use the new helper.
Reviewed-by: default avatarAshutosh Dixit <ashutosh.dixit@intel.com>
Signed-off-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent f2dc9cdc
...@@ -1146,7 +1146,6 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr) ...@@ -1146,7 +1146,6 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
{ {
struct ib_other_headers *ohdr; struct ib_other_headers *ohdr;
struct rvt_swqe *wqe; struct rvt_swqe *wqe;
struct ib_wc wc;
unsigned i; unsigned i;
u32 opcode; u32 opcode;
u32 psn; u32 psn;
...@@ -1200,17 +1199,7 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr) ...@@ -1200,17 +1199,7 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
rvt_put_mr(sge->mr); rvt_put_mr(sge->mr);
} }
/* Post a send completion queue entry if requested. */ rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED)) {
memset(&wc, 0, sizeof(wc));
wc.wr_id = wqe->wr.wr_id;
wc.status = IB_WC_SUCCESS;
wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode];
wc.byte_len = wqe->length;
wc.qp = &qp->ibqp;
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
}
} }
/* /*
* If we were waiting for sends to complete before re-sending, * If we were waiting for sends to complete before re-sending,
...@@ -1240,7 +1229,6 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, ...@@ -1240,7 +1229,6 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
struct rvt_swqe *wqe, struct rvt_swqe *wqe,
struct hfi1_ibport *ibp) struct hfi1_ibport *ibp)
{ {
struct ib_wc wc;
unsigned i; unsigned i;
lockdep_assert_held(&qp->s_lock); lockdep_assert_held(&qp->s_lock);
...@@ -1264,17 +1252,7 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, ...@@ -1264,17 +1252,7 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
qp->s_last = s_last; qp->s_last = s_last;
/* see post_send() */ /* see post_send() */
barrier(); barrier();
/* Post a send completion queue entry if requested. */ rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED)) {
memset(&wc, 0, sizeof(wc));
wc.wr_id = wqe->wr.wr_id;
wc.status = IB_WC_SUCCESS;
wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode];
wc.byte_len = wqe->length;
wc.qp = &qp->ibqp;
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
}
} else { } else {
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
......
...@@ -964,22 +964,7 @@ void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, ...@@ -964,22 +964,7 @@ void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
qp->ibqp.qp_type == IB_QPT_GSI) qp->ibqp.qp_type == IB_QPT_GSI)
atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount); atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
/* See ch. 11.2.4.1 and 10.7.3.1 */ rvt_qp_swqe_complete(qp, wqe, status);
if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED) ||
status != IB_WC_SUCCESS) {
struct ib_wc wc;
memset(&wc, 0, sizeof(wc));
wc.wr_id = wqe->wr.wr_id;
wc.status = status;
wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode];
wc.qp = &qp->ibqp;
if (status == IB_WC_SUCCESS)
wc.byte_len = wqe->length;
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
status != IB_WC_SUCCESS);
}
if (qp->s_acked == old_last) if (qp->s_acked == old_last)
qp->s_acked = last; qp->s_acked = last;
......
...@@ -296,22 +296,6 @@ static inline int wss_exceeds_threshold(void) ...@@ -296,22 +296,6 @@ static inline int wss_exceeds_threshold(void)
return atomic_read(&wss.total_count) >= wss.threshold; return atomic_read(&wss.total_count) >= wss.threshold;
} }
/*
* Translate ib_wr_opcode into ib_wc_opcode.
*/
const enum ib_wc_opcode ib_hfi1_wc_opcode[] = {
[IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
[IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
[IB_WR_SEND] = IB_WC_SEND,
[IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
[IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
[IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
[IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
[IB_WR_SEND_WITH_INV] = IB_WC_SEND,
[IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV,
[IB_WR_REG_MR] = IB_WC_REG_MR
};
/* /*
* Length of header by opcode, 0 --> not supported * Length of header by opcode, 0 --> not supported
*/ */
......
...@@ -941,7 +941,6 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr) ...@@ -941,7 +941,6 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
{ {
struct ib_other_headers *ohdr; struct ib_other_headers *ohdr;
struct rvt_swqe *wqe; struct rvt_swqe *wqe;
struct ib_wc wc;
unsigned i; unsigned i;
u32 opcode; u32 opcode;
u32 psn; u32 psn;
...@@ -993,17 +992,7 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr) ...@@ -993,17 +992,7 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
rvt_put_mr(sge->mr); rvt_put_mr(sge->mr);
} }
/* Post a send completion queue entry if requested. */ rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED)) {
memset(&wc, 0, sizeof(wc));
wc.wr_id = wqe->wr.wr_id;
wc.status = IB_WC_SUCCESS;
wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
wc.byte_len = wqe->length;
wc.qp = &qp->ibqp;
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
}
} }
/* /*
* If we were waiting for sends to complete before resending, * If we were waiting for sends to complete before resending,
...@@ -1032,7 +1021,6 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, ...@@ -1032,7 +1021,6 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
struct rvt_swqe *wqe, struct rvt_swqe *wqe,
struct qib_ibport *ibp) struct qib_ibport *ibp)
{ {
struct ib_wc wc;
unsigned i; unsigned i;
/* /*
...@@ -1055,17 +1043,7 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, ...@@ -1055,17 +1043,7 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
qp->s_last = s_last; qp->s_last = s_last;
/* see post_send() */ /* see post_send() */
barrier(); barrier();
/* Post a send completion queue entry if requested. */ rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS);
if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED)) {
memset(&wc, 0, sizeof(wc));
wc.wr_id = wqe->wr.wr_id;
wc.status = IB_WC_SUCCESS;
wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
wc.byte_len = wqe->length;
wc.qp = &qp->ibqp;
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
}
} else } else
this_cpu_inc(*ibp->rvp.rc_delayed_comp); this_cpu_inc(*ibp->rvp.rc_delayed_comp);
......
...@@ -815,22 +815,7 @@ void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, ...@@ -815,22 +815,7 @@ void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
qp->ibqp.qp_type == IB_QPT_GSI) qp->ibqp.qp_type == IB_QPT_GSI)
atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount); atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
/* See ch. 11.2.4.1 and 10.7.3.1 */ rvt_qp_swqe_complete(qp, wqe, status);
if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED) ||
status != IB_WC_SUCCESS) {
struct ib_wc wc;
memset(&wc, 0, sizeof(wc));
wc.wr_id = wqe->wr.wr_id;
wc.status = status;
wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
wc.qp = &qp->ibqp;
if (status == IB_WC_SUCCESS)
wc.byte_len = wqe->length;
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
status != IB_WC_SUCCESS);
}
if (qp->s_acked == old_last) if (qp->s_acked == old_last)
qp->s_acked = last; qp->s_acked = last;
......
...@@ -113,19 +113,6 @@ static unsigned int ib_qib_disable_sma; ...@@ -113,19 +113,6 @@ static unsigned int ib_qib_disable_sma;
module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO); module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(disable_sma, "Disable the SMA"); MODULE_PARM_DESC(disable_sma, "Disable the SMA");
/*
* Translate ib_wr_opcode into ib_wc_opcode.
*/
const enum ib_wc_opcode ib_qib_wc_opcode[] = {
[IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
[IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
[IB_WR_SEND] = IB_WC_SEND,
[IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
[IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
[IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
[IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
};
/* /*
* System image GUID. * System image GUID.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment