Commit 154257f3 authored by Ralph Campbell's avatar Ralph Campbell Committed by Roland Dreier

IB/ipath: Fix a race condition when generating ACKs

Fix a problem where simple ACKs can be sent ahead of RDMA read
responses thus implicitly NAKing the RDMA read.
Signed-off-by: default avatarRalph Campbell <ralph.cambpell@qlogic.com>
Signed-off-by: default avatarRobert Walsh <robert.walsh@qlogic.com>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent 6ed89b95
...@@ -98,13 +98,21 @@ static int ipath_make_rc_ack(struct ipath_qp *qp, ...@@ -98,13 +98,21 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
case OP(RDMA_READ_RESPONSE_LAST): case OP(RDMA_READ_RESPONSE_LAST):
case OP(RDMA_READ_RESPONSE_ONLY): case OP(RDMA_READ_RESPONSE_ONLY):
case OP(ATOMIC_ACKNOWLEDGE): case OP(ATOMIC_ACKNOWLEDGE):
qp->s_ack_state = OP(ACKNOWLEDGE); /*
* We can increment the tail pointer now that the last
* response has been sent instead of only being
* constructed.
*/
if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC)
qp->s_tail_ack_queue = 0;
/* FALLTHROUGH */ /* FALLTHROUGH */
case OP(SEND_ONLY):
case OP(ACKNOWLEDGE): case OP(ACKNOWLEDGE):
/* Check for no next entry in the queue. */ /* Check for no next entry in the queue. */
if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
if (qp->s_flags & IPATH_S_ACK_PENDING) if (qp->s_flags & IPATH_S_ACK_PENDING)
goto normal; goto normal;
qp->s_ack_state = OP(ACKNOWLEDGE);
goto bail; goto bail;
} }
...@@ -117,12 +125,8 @@ static int ipath_make_rc_ack(struct ipath_qp *qp, ...@@ -117,12 +125,8 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
if (len > pmtu) { if (len > pmtu) {
len = pmtu; len = pmtu;
qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST); qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
} else { } else
qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY); qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
if (++qp->s_tail_ack_queue >
IPATH_MAX_RDMA_ATOMIC)
qp->s_tail_ack_queue = 0;
}
ohdr->u.aeth = ipath_compute_aeth(qp); ohdr->u.aeth = ipath_compute_aeth(qp);
hwords++; hwords++;
qp->s_ack_rdma_psn = e->psn; qp->s_ack_rdma_psn = e->psn;
...@@ -139,8 +143,6 @@ static int ipath_make_rc_ack(struct ipath_qp *qp, ...@@ -139,8 +143,6 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
cpu_to_be32(e->atomic_data); cpu_to_be32(e->atomic_data);
hwords += sizeof(ohdr->u.at) / sizeof(u32); hwords += sizeof(ohdr->u.at) / sizeof(u32);
bth2 = e->psn; bth2 = e->psn;
if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC)
qp->s_tail_ack_queue = 0;
} }
bth0 = qp->s_ack_state << 24; bth0 = qp->s_ack_state << 24;
break; break;
...@@ -156,8 +158,6 @@ static int ipath_make_rc_ack(struct ipath_qp *qp, ...@@ -156,8 +158,6 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
ohdr->u.aeth = ipath_compute_aeth(qp); ohdr->u.aeth = ipath_compute_aeth(qp);
hwords++; hwords++;
qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC)
qp->s_tail_ack_queue = 0;
} }
bth0 = qp->s_ack_state << 24; bth0 = qp->s_ack_state << 24;
bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK; bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
...@@ -171,7 +171,7 @@ static int ipath_make_rc_ack(struct ipath_qp *qp, ...@@ -171,7 +171,7 @@ static int ipath_make_rc_ack(struct ipath_qp *qp,
* the ACK before setting s_ack_state to ACKNOWLEDGE * the ACK before setting s_ack_state to ACKNOWLEDGE
* (see above). * (see above).
*/ */
qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE); qp->s_ack_state = OP(SEND_ONLY);
qp->s_flags &= ~IPATH_S_ACK_PENDING; qp->s_flags &= ~IPATH_S_ACK_PENDING;
qp->s_cur_sge = NULL; qp->s_cur_sge = NULL;
if (qp->s_nak_state) if (qp->s_nak_state)
...@@ -223,7 +223,7 @@ int ipath_make_rc_req(struct ipath_qp *qp, ...@@ -223,7 +223,7 @@ int ipath_make_rc_req(struct ipath_qp *qp,
/* Sending responses has higher priority over sending requests. */ /* Sending responses has higher priority over sending requests. */
if ((qp->r_head_ack_queue != qp->s_tail_ack_queue || if ((qp->r_head_ack_queue != qp->s_tail_ack_queue ||
(qp->s_flags & IPATH_S_ACK_PENDING) || (qp->s_flags & IPATH_S_ACK_PENDING) ||
qp->s_ack_state != IB_OPCODE_RC_ACKNOWLEDGE) && qp->s_ack_state != OP(ACKNOWLEDGE)) &&
ipath_make_rc_ack(qp, ohdr, pmtu, bth0p, bth2p)) ipath_make_rc_ack(qp, ohdr, pmtu, bth0p, bth2p))
goto done; goto done;
...@@ -585,7 +585,9 @@ static void send_rc_ack(struct ipath_qp *qp) ...@@ -585,7 +585,9 @@ static void send_rc_ack(struct ipath_qp *qp)
unsigned long flags; unsigned long flags;
/* Don't send ACK or NAK if a RDMA read or atomic is pending. */ /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
if (qp->r_head_ack_queue != qp->s_tail_ack_queue) if (qp->r_head_ack_queue != qp->s_tail_ack_queue ||
(qp->s_flags & IPATH_S_ACK_PENDING) ||
qp->s_ack_state != OP(ACKNOWLEDGE))
goto queue_ack; goto queue_ack;
/* Construct the header. */ /* Construct the header. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment