Commit 43a474aa authored by Mike Marciniszyn's avatar Mike Marciniszyn Committed by Doug Ledford

IB/rdmavt, IB/hfi1, IB/qib: Make wc opcode translation driver dependent

The work to create a completion helper moved the translation of send
wqe operations to completion opcodes to rdmvat.

This precludes having driver dependent operations.  Make the translation
driver dependent by doing the translation in the driver prior to the
rvt_qp_swqe_complete() call using restored translation tables.

Fixes: Commit f2dc9cdc ("IB/rdmavt: Add a send completion helper")
Fixes: Commit 0771da5a ("IB/hfi1,IB/qib: Use new send completion helper")
Reviewed-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 5a52a7ac
...@@ -1034,7 +1034,10 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr) ...@@ -1034,7 +1034,10 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
/* see post_send() */ /* see post_send() */
barrier(); barrier();
rvt_put_swqe(wqe); rvt_put_swqe(wqe);
rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS); rvt_qp_swqe_complete(qp,
wqe,
ib_hfi1_wc_opcode[wqe->wr.opcode],
IB_WC_SUCCESS);
} }
/* /*
* If we were waiting for sends to complete before re-sending, * If we were waiting for sends to complete before re-sending,
...@@ -1081,7 +1084,10 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, ...@@ -1081,7 +1084,10 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
qp->s_last = s_last; qp->s_last = s_last;
/* see post_send() */ /* see post_send() */
barrier(); barrier();
rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS); rvt_qp_swqe_complete(qp,
wqe,
ib_hfi1_wc_opcode[wqe->wr.opcode],
IB_WC_SUCCESS);
} else { } else {
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
......
...@@ -920,7 +920,10 @@ void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, ...@@ -920,7 +920,10 @@ void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
qp->ibqp.qp_type == IB_QPT_GSI) qp->ibqp.qp_type == IB_QPT_GSI)
atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount); atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
rvt_qp_swqe_complete(qp, wqe, status); rvt_qp_swqe_complete(qp,
wqe,
ib_hfi1_wc_opcode[wqe->wr.opcode],
status);
if (qp->s_acked == old_last) if (qp->s_acked == old_last)
qp->s_acked = last; qp->s_acked = last;
......
...@@ -296,6 +296,22 @@ static inline bool wss_exceeds_threshold(void) ...@@ -296,6 +296,22 @@ static inline bool wss_exceeds_threshold(void)
return atomic_read(&wss.total_count) >= wss.threshold; return atomic_read(&wss.total_count) >= wss.threshold;
} }
/*
* Translate ib_wr_opcode into ib_wc_opcode.
*/
const enum ib_wc_opcode ib_hfi1_wc_opcode[] = {
[IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
[IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
[IB_WR_SEND] = IB_WC_SEND,
[IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
[IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
[IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
[IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
[IB_WR_SEND_WITH_INV] = IB_WC_SEND,
[IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV,
[IB_WR_REG_MR] = IB_WC_REG_MR
};
/* /*
* Length of header by opcode, 0 --> not supported * Length of header by opcode, 0 --> not supported
*/ */
......
...@@ -938,7 +938,10 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr) ...@@ -938,7 +938,10 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
/* see post_send() */ /* see post_send() */
barrier(); barrier();
rvt_put_swqe(wqe); rvt_put_swqe(wqe);
rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS); rvt_qp_swqe_complete(qp,
wqe,
ib_qib_wc_opcode[wqe->wr.opcode],
IB_WC_SUCCESS);
} }
/* /*
* If we were waiting for sends to complete before resending, * If we were waiting for sends to complete before resending,
...@@ -983,7 +986,10 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, ...@@ -983,7 +986,10 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
qp->s_last = s_last; qp->s_last = s_last;
/* see post_send() */ /* see post_send() */
barrier(); barrier();
rvt_qp_swqe_complete(qp, wqe, IB_WC_SUCCESS); rvt_qp_swqe_complete(qp,
wqe,
ib_qib_wc_opcode[wqe->wr.opcode],
IB_WC_SUCCESS);
} else } else
this_cpu_inc(*ibp->rvp.rc_delayed_comp); this_cpu_inc(*ibp->rvp.rc_delayed_comp);
......
...@@ -769,7 +769,10 @@ void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, ...@@ -769,7 +769,10 @@ void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
qp->ibqp.qp_type == IB_QPT_GSI) qp->ibqp.qp_type == IB_QPT_GSI)
atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount); atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
rvt_qp_swqe_complete(qp, wqe, status); rvt_qp_swqe_complete(qp,
wqe,
ib_qib_wc_opcode[wqe->wr.opcode],
status);
if (qp->s_acked == old_last) if (qp->s_acked == old_last)
qp->s_acked = last; qp->s_acked = last;
......
...@@ -113,6 +113,19 @@ static unsigned int ib_qib_disable_sma; ...@@ -113,6 +113,19 @@ static unsigned int ib_qib_disable_sma;
module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO); module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(disable_sma, "Disable the SMA"); MODULE_PARM_DESC(disable_sma, "Disable the SMA");
/*
* Translate ib_wr_opcode into ib_wc_opcode.
*/
const enum ib_wc_opcode ib_qib_wc_opcode[] = {
[IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
[IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
[IB_WR_SEND] = IB_WC_SEND,
[IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
[IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
[IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
[IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
};
/* /*
* System image GUID. * System image GUID.
*/ */
......
...@@ -117,23 +117,6 @@ const int ib_rvt_state_ops[IB_QPS_ERR + 1] = { ...@@ -117,23 +117,6 @@ const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
}; };
EXPORT_SYMBOL(ib_rvt_state_ops); EXPORT_SYMBOL(ib_rvt_state_ops);
/*
* Translate ib_wr_opcode into ib_wc_opcode.
*/
const enum ib_wc_opcode ib_rvt_wc_opcode[] = {
[IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
[IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
[IB_WR_SEND] = IB_WC_SEND,
[IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
[IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
[IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
[IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
[IB_WR_SEND_WITH_INV] = IB_WC_SEND,
[IB_WR_LOCAL_INV] = IB_WC_LOCAL_INV,
[IB_WR_REG_MR] = IB_WC_REG_MR
};
EXPORT_SYMBOL(ib_rvt_wc_opcode);
static void get_map_page(struct rvt_qpn_table *qpt, static void get_map_page(struct rvt_qpn_table *qpt,
struct rvt_qpn_map *map, struct rvt_qpn_map *map,
gfp_t gfp) gfp_t gfp)
......
...@@ -574,6 +574,7 @@ extern const enum ib_wc_opcode ib_rvt_wc_opcode[]; ...@@ -574,6 +574,7 @@ extern const enum ib_wc_opcode ib_rvt_wc_opcode[];
static inline void rvt_qp_swqe_complete( static inline void rvt_qp_swqe_complete(
struct rvt_qp *qp, struct rvt_qp *qp,
struct rvt_swqe *wqe, struct rvt_swqe *wqe,
enum ib_wc_opcode opcode,
enum ib_wc_status status) enum ib_wc_status status)
{ {
if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED))
...@@ -586,7 +587,7 @@ static inline void rvt_qp_swqe_complete( ...@@ -586,7 +587,7 @@ static inline void rvt_qp_swqe_complete(
memset(&wc, 0, sizeof(wc)); memset(&wc, 0, sizeof(wc));
wc.wr_id = wqe->wr.wr_id; wc.wr_id = wqe->wr.wr_id;
wc.status = status; wc.status = status;
wc.opcode = ib_rvt_wc_opcode[wqe->wr.opcode]; wc.opcode = opcode;
wc.qp = &qp->ibqp; wc.qp = &qp->ibqp;
wc.byte_len = wqe->length; wc.byte_len = wqe->length;
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment