Commit beb5a042 authored by Brian Welty's avatar Brian Welty Committed by Doug Ledford

IB/hfi1, qib, rdmavt: Move two IB event functions into rdmavt

Add rvt_rc_error() and rvt_comm_est() as shared functions in
rdmavt, moved from hfi1/qib logic.
Reviewed-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarBrian Welty <brian.welty@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent c03c08d5
...@@ -784,19 +784,6 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter) ...@@ -784,19 +784,6 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
qp->pid); qp->pid);
} }
void qp_comm_est(struct rvt_qp *qp)
{
qp->r_flags |= RVT_R_COMM_EST;
if (qp->ibqp.event_handler) {
struct ib_event ev;
ev.device = qp->ibqp.device;
ev.element.qp = &qp->ibqp;
ev.event = IB_EVENT_COMM_EST;
qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
}
}
void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp,
gfp_t gfp) gfp_t gfp)
{ {
......
...@@ -131,12 +131,6 @@ int qp_iter_next(struct qp_iter *iter); ...@@ -131,12 +131,6 @@ int qp_iter_next(struct qp_iter *iter);
*/ */
void qp_iter_print(struct seq_file *s, struct qp_iter *iter); void qp_iter_print(struct seq_file *s, struct qp_iter *iter);
/**
* qp_comm_est - handle trap with QP established
* @qp: the QP
*/
void qp_comm_est(struct rvt_qp *qp);
void _hfi1_schedule_send(struct rvt_qp *qp); void _hfi1_schedule_send(struct rvt_qp *qp);
void hfi1_schedule_send(struct rvt_qp *qp); void hfi1_schedule_send(struct rvt_qp *qp);
......
...@@ -1966,25 +1966,6 @@ static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data, ...@@ -1966,25 +1966,6 @@ static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
return 0; return 0;
} }
void hfi1_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
{
unsigned long flags;
int lastwqe;
spin_lock_irqsave(&qp->s_lock, flags);
lastwqe = rvt_error_qp(qp, err);
spin_unlock_irqrestore(&qp->s_lock, flags);
if (lastwqe) {
struct ib_event ev;
ev.device = qp->ibqp.device;
ev.element.qp = &qp->ibqp;
ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
}
}
static inline void update_ack_queue(struct rvt_qp *qp, unsigned n) static inline void update_ack_queue(struct rvt_qp *qp, unsigned n)
{ {
unsigned next; unsigned next;
...@@ -2185,7 +2166,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet) ...@@ -2185,7 +2166,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
} }
if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
qp_comm_est(qp); rvt_comm_est(qp);
/* OK, process the packet. */ /* OK, process the packet. */
switch (opcode) { switch (opcode) {
...@@ -2517,7 +2498,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet) ...@@ -2517,7 +2498,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
return; return;
nack_op_err: nack_op_err:
hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR); rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR; qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
qp->r_ack_psn = qp->r_psn; qp->r_ack_psn = qp->r_psn;
/* Queue NAK for later */ /* Queue NAK for later */
...@@ -2527,7 +2508,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet) ...@@ -2527,7 +2508,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
nack_inv_unlck: nack_inv_unlck:
spin_unlock_irqrestore(&qp->s_lock, flags); spin_unlock_irqrestore(&qp->s_lock, flags);
nack_inv: nack_inv:
hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR); rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
qp->r_nak_state = IB_NAK_INVALID_REQUEST; qp->r_nak_state = IB_NAK_INVALID_REQUEST;
qp->r_ack_psn = qp->r_psn; qp->r_ack_psn = qp->r_psn;
/* Queue NAK for later */ /* Queue NAK for later */
...@@ -2537,7 +2518,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet) ...@@ -2537,7 +2518,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
nack_acc_unlck: nack_acc_unlck:
spin_unlock_irqrestore(&qp->s_lock, flags); spin_unlock_irqrestore(&qp->s_lock, flags);
nack_acc: nack_acc:
hfi1_rc_error(qp, IB_WC_LOC_PROT_ERR); rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
qp->r_ack_psn = qp->r_psn; qp->r_ack_psn = qp->r_psn;
send_ack: send_ack:
......
...@@ -637,7 +637,7 @@ static void ruc_loopback(struct rvt_qp *sqp) ...@@ -637,7 +637,7 @@ static void ruc_loopback(struct rvt_qp *sqp)
wc.status = IB_WC_LOC_PROT_ERR; wc.status = IB_WC_LOC_PROT_ERR;
err: err:
/* responder goes to error state */ /* responder goes to error state */
hfi1_rc_error(qp, wc.status); rvt_rc_error(qp, wc.status);
serr: serr:
spin_lock_irqsave(&sqp->s_lock, flags); spin_lock_irqsave(&sqp->s_lock, flags);
......
...@@ -384,7 +384,7 @@ void hfi1_uc_rcv(struct hfi1_packet *packet) ...@@ -384,7 +384,7 @@ void hfi1_uc_rcv(struct hfi1_packet *packet)
} }
if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
qp_comm_est(qp); rvt_comm_est(qp);
/* OK, process the packet. */ /* OK, process the packet. */
switch (opcode) { switch (opcode) {
...@@ -584,5 +584,5 @@ void hfi1_uc_rcv(struct hfi1_packet *packet) ...@@ -584,5 +584,5 @@ void hfi1_uc_rcv(struct hfi1_packet *packet)
return; return;
op_err: op_err:
hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR); rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
} }
...@@ -167,7 +167,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) ...@@ -167,7 +167,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
ret = hfi1_rvt_get_rwqe(qp, 0); ret = hfi1_rvt_get_rwqe(qp, 0);
if (ret < 0) { if (ret < 0) {
hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR); rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
goto bail_unlock; goto bail_unlock;
} }
if (!ret) { if (!ret) {
...@@ -796,7 +796,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) ...@@ -796,7 +796,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
ret = hfi1_rvt_get_rwqe(qp, 0); ret = hfi1_rvt_get_rwqe(qp, 0);
if (ret < 0) { if (ret < 0) {
hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR); rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
return; return;
} }
if (!ret) { if (!ret) {
......
...@@ -327,8 +327,6 @@ void hfi1_stop_rc_timers(struct rvt_qp *qp); ...@@ -327,8 +327,6 @@ void hfi1_stop_rc_timers(struct rvt_qp *qp);
void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr); void hfi1_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr);
void hfi1_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
void hfi1_ud_rcv(struct hfi1_packet *packet); void hfi1_ud_rcv(struct hfi1_packet *packet);
int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey); int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey);
......
...@@ -1765,25 +1765,6 @@ static int qib_rc_rcv_error(struct ib_other_headers *ohdr, ...@@ -1765,25 +1765,6 @@ static int qib_rc_rcv_error(struct ib_other_headers *ohdr,
return 0; return 0;
} }
void qib_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
{
unsigned long flags;
int lastwqe;
spin_lock_irqsave(&qp->s_lock, flags);
lastwqe = rvt_error_qp(qp, err);
spin_unlock_irqrestore(&qp->s_lock, flags);
if (lastwqe) {
struct ib_event ev;
ev.device = qp->ibqp.device;
ev.element.qp = &qp->ibqp;
ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
}
}
static inline void qib_update_ack_queue(struct rvt_qp *qp, unsigned n) static inline void qib_update_ack_queue(struct rvt_qp *qp, unsigned n)
{ {
unsigned next; unsigned next;
...@@ -1895,17 +1876,8 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr, ...@@ -1895,17 +1876,8 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
break; break;
} }
if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) { if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
qp->r_flags |= RVT_R_COMM_EST; rvt_comm_est(qp);
if (qp->ibqp.event_handler) {
struct ib_event ev;
ev.device = qp->ibqp.device;
ev.element.qp = &qp->ibqp;
ev.event = IB_EVENT_COMM_EST;
qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
}
}
/* OK, process the packet. */ /* OK, process the packet. */
switch (opcode) { switch (opcode) {
...@@ -2197,7 +2169,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr, ...@@ -2197,7 +2169,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
return; return;
nack_op_err: nack_op_err:
qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR; qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
qp->r_ack_psn = qp->r_psn; qp->r_ack_psn = qp->r_psn;
/* Queue NAK for later */ /* Queue NAK for later */
...@@ -2211,7 +2183,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr, ...@@ -2211,7 +2183,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
nack_inv_unlck: nack_inv_unlck:
spin_unlock_irqrestore(&qp->s_lock, flags); spin_unlock_irqrestore(&qp->s_lock, flags);
nack_inv: nack_inv:
qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
qp->r_nak_state = IB_NAK_INVALID_REQUEST; qp->r_nak_state = IB_NAK_INVALID_REQUEST;
qp->r_ack_psn = qp->r_psn; qp->r_ack_psn = qp->r_psn;
/* Queue NAK for later */ /* Queue NAK for later */
...@@ -2225,7 +2197,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr, ...@@ -2225,7 +2197,7 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
nack_acc_unlck: nack_acc_unlck:
spin_unlock_irqrestore(&qp->s_lock, flags); spin_unlock_irqrestore(&qp->s_lock, flags);
nack_acc: nack_acc:
qib_rc_error(qp, IB_WC_LOC_PROT_ERR); rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
qp->r_ack_psn = qp->r_psn; qp->r_ack_psn = qp->r_psn;
send_ack: send_ack:
......
...@@ -621,7 +621,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp) ...@@ -621,7 +621,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp)
wc.status = IB_WC_LOC_PROT_ERR; wc.status = IB_WC_LOC_PROT_ERR;
err: err:
/* responder goes to error state */ /* responder goes to error state */
qib_rc_error(qp, wc.status); rvt_rc_error(qp, wc.status);
serr: serr:
spin_lock_irqsave(&sqp->s_lock, flags); spin_lock_irqsave(&sqp->s_lock, flags);
......
...@@ -325,17 +325,8 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct ib_header *hdr, ...@@ -325,17 +325,8 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
goto inv; goto inv;
} }
if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) { if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
qp->r_flags |= RVT_R_COMM_EST; rvt_comm_est(qp);
if (qp->ibqp.event_handler) {
struct ib_event ev;
ev.device = qp->ibqp.device;
ev.element.qp = &qp->ibqp;
ev.event = IB_EVENT_COMM_EST;
qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
}
}
/* OK, process the packet. */ /* OK, process the packet. */
switch (opcode) { switch (opcode) {
...@@ -527,7 +518,7 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct ib_header *hdr, ...@@ -527,7 +518,7 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
return; return;
op_err: op_err:
qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
return; return;
} }
...@@ -152,7 +152,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) ...@@ -152,7 +152,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
ret = qib_get_rwqe(qp, 0); ret = qib_get_rwqe(qp, 0);
if (ret < 0) { if (ret < 0) {
qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
goto bail_unlock; goto bail_unlock;
} }
if (!ret) { if (!ret) {
...@@ -548,7 +548,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr, ...@@ -548,7 +548,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
ret = qib_get_rwqe(qp, 0); ret = qib_get_rwqe(qp, 0);
if (ret < 0) { if (ret < 0) {
qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
return; return;
} }
if (!ret) { if (!ret) {
......
...@@ -326,8 +326,6 @@ void qib_rc_rnr_retry(unsigned long arg); ...@@ -326,8 +326,6 @@ void qib_rc_rnr_retry(unsigned long arg);
void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr); void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr);
void qib_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
int qib_post_ud_send(struct rvt_qp *qp, struct ib_send_wr *wr); int qib_post_ud_send(struct rvt_qp *qp, struct ib_send_wr *wr);
void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr, void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
......
...@@ -1868,3 +1868,41 @@ int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, ...@@ -1868,3 +1868,41 @@ int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
} }
return 0; return 0;
} }
/**
* qp_comm_est - handle trap with QP established
* @qp: the QP
*/
void rvt_comm_est(struct rvt_qp *qp)
{
qp->r_flags |= RVT_R_COMM_EST;
if (qp->ibqp.event_handler) {
struct ib_event ev;
ev.device = qp->ibqp.device;
ev.element.qp = &qp->ibqp;
ev.event = IB_EVENT_COMM_EST;
qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
}
}
EXPORT_SYMBOL(rvt_comm_est);
void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
{
unsigned long flags;
int lastwqe;
spin_lock_irqsave(&qp->s_lock, flags);
lastwqe = rvt_error_qp(qp, err);
spin_unlock_irqrestore(&qp->s_lock, flags);
if (lastwqe) {
struct ib_event ev;
ev.device = qp->ibqp.device;
ev.element.qp = &qp->ibqp;
ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
}
}
EXPORT_SYMBOL(rvt_rc_error);
...@@ -607,6 +607,8 @@ static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len) ...@@ -607,6 +607,8 @@ static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len)
extern const int ib_rvt_state_ops[]; extern const int ib_rvt_state_ops[];
struct rvt_dev_info; struct rvt_dev_info;
void rvt_comm_est(struct rvt_qp *qp);
int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err); int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
#endif /* DEF_RDMAVT_INCQP_H */ #endif /* DEF_RDMAVT_INCQP_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment