Commit 688f21c0 authored by Mike Marciniszyn's avatar Mike Marciniszyn Committed by Doug Ledford

IB/hfi1, IB/rdmavt: Move r_adefered to r_lock cache line

This field is causing excessive cache line bouncing.

There are spare bytes in the r_lock cache line so the best approach
is to make an rvt QP field and remove from the hfi1 priv field.
Signed-off-by: default avatarSebastian Sanchez <sebastian.sanchez@intel.com>
Signed-off-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 02d1008b
...@@ -731,9 +731,7 @@ void quiesce_qp(struct rvt_qp *qp) ...@@ -731,9 +731,7 @@ void quiesce_qp(struct rvt_qp *qp)
void notify_qp_reset(struct rvt_qp *qp) void notify_qp_reset(struct rvt_qp *qp)
{ {
struct hfi1_qp_priv *priv = qp->priv; qp->r_adefered = 0;
priv->r_adefered = 0;
clear_ahg(qp); clear_ahg(qp);
} }
......
...@@ -727,10 +727,9 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp, ...@@ -727,10 +727,9 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp,
struct ib_header hdr; struct ib_header hdr;
struct ib_other_headers *ohdr; struct ib_other_headers *ohdr;
unsigned long flags; unsigned long flags;
struct hfi1_qp_priv *priv = qp->priv;
/* clear the defer count */ /* clear the defer count */
priv->r_adefered = 0; qp->r_adefered = 0;
/* Don't send ACK or NAK if a RDMA read or atomic is pending. */ /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
if (qp->s_flags & RVT_S_RESP_PENDING) if (qp->s_flags & RVT_S_RESP_PENDING)
...@@ -1604,9 +1603,7 @@ static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd, ...@@ -1604,9 +1603,7 @@ static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,
static inline void rc_cancel_ack(struct rvt_qp *qp) static inline void rc_cancel_ack(struct rvt_qp *qp)
{ {
struct hfi1_qp_priv *priv = qp->priv; qp->r_adefered = 0;
priv->r_adefered = 0;
if (list_empty(&qp->rspwait)) if (list_empty(&qp->rspwait))
return; return;
list_del_init(&qp->rspwait); list_del_init(&qp->rspwait);
...@@ -2314,13 +2311,11 @@ void hfi1_rc_rcv(struct hfi1_packet *packet) ...@@ -2314,13 +2311,11 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
qp->r_nak_state = 0; qp->r_nak_state = 0;
/* Send an ACK if requested or required. */ /* Send an ACK if requested or required. */
if (psn & IB_BTH_REQ_ACK) { if (psn & IB_BTH_REQ_ACK) {
struct hfi1_qp_priv *priv = qp->priv;
if (packet->numpkt == 0) { if (packet->numpkt == 0) {
rc_cancel_ack(qp); rc_cancel_ack(qp);
goto send_ack; goto send_ack;
} }
if (priv->r_adefered >= HFI1_PSN_CREDIT) { if (qp->r_adefered >= HFI1_PSN_CREDIT) {
rc_cancel_ack(qp); rc_cancel_ack(qp);
goto send_ack; goto send_ack;
} }
...@@ -2328,7 +2323,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet) ...@@ -2328,7 +2323,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
rc_cancel_ack(qp); rc_cancel_ack(qp);
goto send_ack; goto send_ack;
} }
priv->r_adefered++; qp->r_adefered++;
rc_defered_ack(rcd, qp); rc_defered_ack(rcd, qp);
} }
return; return;
......
...@@ -125,7 +125,6 @@ struct hfi1_qp_priv { ...@@ -125,7 +125,6 @@ struct hfi1_qp_priv {
struct sdma_engine *s_sde; /* current sde */ struct sdma_engine *s_sde; /* current sde */
struct send_context *s_sendcontext; /* current sendcontext */ struct send_context *s_sendcontext; /* current sendcontext */
u8 s_sc; /* SC[0..4] for next packet */ u8 s_sc; /* SC[0..4] for next packet */
u8 r_adefered; /* number of acks defered */
struct iowait s_iowait; struct iowait s_iowait;
struct rvt_qp *owner; struct rvt_qp *owner;
}; };
......
...@@ -324,6 +324,7 @@ struct rvt_qp { ...@@ -324,6 +324,7 @@ struct rvt_qp {
u8 r_state; /* opcode of last packet received */ u8 r_state; /* opcode of last packet received */
u8 r_flags; u8 r_flags;
u8 r_head_ack_queue; /* index into s_ack_queue[] */ u8 r_head_ack_queue; /* index into s_ack_queue[] */
u8 r_adefered; /* defered ack count */
struct list_head rspwait; /* link for waiting to respond */ struct list_head rspwait; /* link for waiting to respond */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment