Commit d98b1937 authored by Ralph Campbell's avatar Ralph Campbell Committed by Roland Dreier

IB/ipath: Use PIO buffer for RC ACKs

This reduces the latency for RC ACKs when a PIO buffer is available.
Signed-off-by: default avatarRalph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent c4b4d16e
...@@ -31,6 +31,8 @@ ...@@ -31,6 +31,8 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include <linux/io.h>
#include "ipath_verbs.h" #include "ipath_verbs.h"
#include "ipath_kernel.h" #include "ipath_kernel.h"
...@@ -585,19 +587,39 @@ int ipath_make_rc_req(struct ipath_qp *qp) ...@@ -585,19 +587,39 @@ int ipath_make_rc_req(struct ipath_qp *qp)
static void send_rc_ack(struct ipath_qp *qp) static void send_rc_ack(struct ipath_qp *qp)
{ {
struct ipath_ibdev *dev = to_idev(qp->ibqp.device); struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
struct ipath_devdata *dd;
u16 lrh0; u16 lrh0;
u32 bth0; u32 bth0;
u32 hwords; u32 hwords;
u32 __iomem *piobuf;
struct ipath_ib_header hdr; struct ipath_ib_header hdr;
struct ipath_other_headers *ohdr; struct ipath_other_headers *ohdr;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&qp->s_lock, flags);
/* Don't send ACK or NAK if a RDMA read or atomic is pending. */ /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
if (qp->r_head_ack_queue != qp->s_tail_ack_queue || if (qp->r_head_ack_queue != qp->s_tail_ack_queue ||
(qp->s_flags & IPATH_S_ACK_PENDING) || (qp->s_flags & IPATH_S_ACK_PENDING) ||
qp->s_ack_state != OP(ACKNOWLEDGE)) qp->s_ack_state != OP(ACKNOWLEDGE))
goto queue_ack; goto queue_ack;
spin_unlock_irqrestore(&qp->s_lock, flags);
dd = dev->dd;
piobuf = ipath_getpiobuf(dd, 0, NULL);
if (!piobuf) {
/*
* We are out of PIO buffers at the moment.
* Pass responsibility for sending the ACK to the
* send tasklet so that when a PIO buffer becomes
* available, the ACK is sent ahead of other outgoing
* packets.
*/
spin_lock_irqsave(&qp->s_lock, flags);
goto queue_ack;
}
/* Construct the header. */ /* Construct the header. */
ohdr = &hdr.u.oth; ohdr = &hdr.u.oth;
lrh0 = IPATH_LRH_BTH; lrh0 = IPATH_LRH_BTH;
...@@ -611,7 +633,7 @@ static void send_rc_ack(struct ipath_qp *qp) ...@@ -611,7 +633,7 @@ static void send_rc_ack(struct ipath_qp *qp)
lrh0 = IPATH_LRH_GRH; lrh0 = IPATH_LRH_GRH;
} }
/* read pkey_index w/o lock (its atomic) */ /* read pkey_index w/o lock (its atomic) */
bth0 = ipath_get_pkey(dev->dd, qp->s_pkey_index) | bth0 = ipath_get_pkey(dd, qp->s_pkey_index) |
(OP(ACKNOWLEDGE) << 24) | (1 << 22); (OP(ACKNOWLEDGE) << 24) | (1 << 22);
if (qp->r_nak_state) if (qp->r_nak_state)
ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) | ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
...@@ -623,30 +645,29 @@ static void send_rc_ack(struct ipath_qp *qp) ...@@ -623,30 +645,29 @@ static void send_rc_ack(struct ipath_qp *qp)
hdr.lrh[0] = cpu_to_be16(lrh0); hdr.lrh[0] = cpu_to_be16(lrh0);
hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC); hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid); hdr.lrh[3] = cpu_to_be16(dd->ipath_lid);
ohdr->bth[0] = cpu_to_be32(bth0); ohdr->bth[0] = cpu_to_be32(bth0);
ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK); ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
/* writeq(hwords + 1, piobuf);
* If we can send the ACK, clear the ACK state.
*/
if (ipath_verbs_send(qp, &hdr, hwords, NULL, 0) == 0) {
dev->n_unicast_xmit++;
goto done;
}
/* if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) {
* We are out of PIO buffers at the moment. u32 *hdrp = (u32 *) &hdr;
* Pass responsibility for sending the ACK to the
* send tasklet so that when a PIO buffer becomes ipath_flush_wc();
* available, the ACK is sent ahead of other outgoing __iowrite32_copy(piobuf + 2, hdrp, hwords - 1);
* packets. ipath_flush_wc();
*/ __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
dev->n_rc_qacks++; } else
__iowrite32_copy(piobuf + 2, (u32 *) &hdr, hwords);
ipath_flush_wc();
dev->n_unicast_xmit++;
goto done;
queue_ack: queue_ack:
spin_lock_irqsave(&qp->s_lock, flags);
dev->n_rc_qacks++; dev->n_rc_qacks++;
qp->s_flags |= IPATH_S_ACK_PENDING; qp->s_flags |= IPATH_S_ACK_PENDING;
qp->s_nak_state = qp->r_nak_state; qp->s_nak_state = qp->r_nak_state;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment