Commit f5f99929 authored by Bryan O'Sullivan's avatar Bryan O'Sullivan Committed by Linus Torvalds

[PATCH] IB/ipath: fixed bug 9776 for real

The problem was that I was updating the head register multiple times in the
rcvhdrq processing loop, and setting the counter on each update.  Since that
meant that the tail register was ahead of head for all but the last update, we
would get extra interrupts.  The fix was to not write the counter value except
on the last update.

I also changed to update rcvhdrhead and rcvegrindexhead at most every 16
packets, if there were lots of packets in the queue (and of course, on the
last packet, regardless).

I also made some small cleanups while debugging this.

With these changes, xeon/monty typically sees two openib packets per interrupt
on sdp and ipoib, opteron/monty is about 1.25 pkts/intr.

I'm seeing about 3800 Mbit/s monty/xeon, and 5000-5100 opteron/monty with
netperf sdp.  Netpipe doesn't show as good as that, peaking at about 4400 on
opteron/monty sdp.  Plain ipoib xeon is about 2100+ netperf, opteron 2900+, at
128KB

Signed-off-by: olson@eng-12.pathscale.com
Signed-off-by: default avatarBryan O'Sullivan <bos@pathscale.com>
Cc: "Michael S. Tsirkin" <mst@mellanox.co.il>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 13aef494
...@@ -870,7 +870,7 @@ void ipath_kreceive(struct ipath_devdata *dd) ...@@ -870,7 +870,7 @@ void ipath_kreceive(struct ipath_devdata *dd)
const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize; /* words */ const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize; /* words */
u32 etail = -1, l, hdrqtail; u32 etail = -1, l, hdrqtail;
struct ips_message_header *hdr; struct ips_message_header *hdr;
u32 eflags, i, etype, tlen, pkttot = 0; u32 eflags, i, etype, tlen, pkttot = 0, updegr=0;
static u64 totcalls; /* stats, may eventually remove */ static u64 totcalls; /* stats, may eventually remove */
char emsg[128]; char emsg[128];
...@@ -884,14 +884,14 @@ void ipath_kreceive(struct ipath_devdata *dd) ...@@ -884,14 +884,14 @@ void ipath_kreceive(struct ipath_devdata *dd)
if (test_and_set_bit(0, &dd->ipath_rcv_pending)) if (test_and_set_bit(0, &dd->ipath_rcv_pending))
goto bail; goto bail;
if (dd->ipath_port0head == l = dd->ipath_port0head;
(u32)le64_to_cpu(*dd->ipath_hdrqtailptr)) if (l == (u32)le64_to_cpu(*dd->ipath_hdrqtailptr))
goto done; goto done;
/* read only once at start for performance */ /* read only once at start for performance */
hdrqtail = (u32)le64_to_cpu(*dd->ipath_hdrqtailptr); hdrqtail = (u32)le64_to_cpu(*dd->ipath_hdrqtailptr);
for (i = 0, l = dd->ipath_port0head; l != hdrqtail; i++) { for (i = 0; l != hdrqtail; i++) {
u32 qp; u32 qp;
u8 *bthbytes; u8 *bthbytes;
...@@ -1002,15 +1002,26 @@ void ipath_kreceive(struct ipath_devdata *dd) ...@@ -1002,15 +1002,26 @@ void ipath_kreceive(struct ipath_devdata *dd)
l += rsize; l += rsize;
if (l >= maxcnt) if (l >= maxcnt)
l = 0; l = 0;
if (etype != RCVHQ_RCV_TYPE_EXPECTED)
updegr = 1;
/* /*
* update for each packet, to help prevent overflows if we * update head regs on last packet, and every 16 packets.
* have lots of packets. * Reduce bus traffic, while still trying to prevent
* rcvhdrq overflows, for when the queue is nearly full
*/ */
(void)ipath_write_ureg(dd, ur_rcvhdrhead, if (l == hdrqtail || (i && !(i&0xf))) {
dd->ipath_rhdrhead_intr_off | l, 0); u64 lval;
if (etype != RCVHQ_RCV_TYPE_EXPECTED) if (l == hdrqtail) /* want interrupt only on last */
(void)ipath_write_ureg(dd, ur_rcvegrindexhead, lval = dd->ipath_rhdrhead_intr_off | l;
etail, 0); else
lval = l;
(void)ipath_write_ureg(dd, ur_rcvhdrhead, lval, 0);
if (updegr) {
(void)ipath_write_ureg(dd, ur_rcvegrindexhead,
etail, 0);
updegr = 0;
}
}
} }
pkttot += i; pkttot += i;
......
...@@ -383,7 +383,7 @@ static unsigned handle_frequent_errors(struct ipath_devdata *dd, ...@@ -383,7 +383,7 @@ static unsigned handle_frequent_errors(struct ipath_devdata *dd,
return supp_msgs; return supp_msgs;
} }
static void handle_errors(struct ipath_devdata *dd, ipath_err_t errs) static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
{ {
char msg[512]; char msg[512];
u64 ignore_this_time = 0; u64 ignore_this_time = 0;
...@@ -480,7 +480,7 @@ static void handle_errors(struct ipath_devdata *dd, ipath_err_t errs) ...@@ -480,7 +480,7 @@ static void handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
INFINIPATH_E_IBSTATUSCHANGED); INFINIPATH_E_IBSTATUSCHANGED);
} }
if (!errs) if (!errs)
return; return 0;
if (!noprint) if (!noprint)
/* /*
...@@ -604,9 +604,7 @@ static void handle_errors(struct ipath_devdata *dd, ipath_err_t errs) ...@@ -604,9 +604,7 @@ static void handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
wake_up_interruptible(&ipath_sma_state_wait); wake_up_interruptible(&ipath_sma_state_wait);
} }
if (chkerrpkts) return chkerrpkts;
/* process possible error packets in hdrq */
ipath_kreceive(dd);
} }
/* this is separate to allow for better optimization of ipath_intr() */ /* this is separate to allow for better optimization of ipath_intr() */
...@@ -765,10 +763,10 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat) ...@@ -765,10 +763,10 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat)
irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs) irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs)
{ {
struct ipath_devdata *dd = data; struct ipath_devdata *dd = data;
u32 istat; u32 istat, chk0rcv = 0;
ipath_err_t estat = 0; ipath_err_t estat = 0;
irqreturn_t ret; irqreturn_t ret;
u32 p0bits; u32 p0bits, oldhead;
static unsigned unexpected = 0; static unsigned unexpected = 0;
static const u32 port0rbits = (1U<<INFINIPATH_I_RCVAVAIL_SHIFT) | static const u32 port0rbits = (1U<<INFINIPATH_I_RCVAVAIL_SHIFT) |
(1U<<INFINIPATH_I_RCVURG_SHIFT); (1U<<INFINIPATH_I_RCVURG_SHIFT);
...@@ -810,9 +808,8 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs) ...@@ -810,9 +808,8 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs)
* interrupts. We clear the interrupts first so that we don't * interrupts. We clear the interrupts first so that we don't
* lose intr for later packets that arrive while we are processing. * lose intr for later packets that arrive while we are processing.
*/ */
if (dd->ipath_port0head != oldhead = dd->ipath_port0head;
(u32)le64_to_cpu(*dd->ipath_hdrqtailptr)) { if (oldhead != (u32) le64_to_cpu(*dd->ipath_hdrqtailptr)) {
u32 oldhead = dd->ipath_port0head;
if (dd->ipath_flags & IPATH_GPIO_INTR) { if (dd->ipath_flags & IPATH_GPIO_INTR) {
ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear, ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear,
(u64) (1 << 2)); (u64) (1 << 2));
...@@ -830,6 +827,8 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs) ...@@ -830,6 +827,8 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs)
} }
istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus); istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus);
p0bits = port0rbits;
if (unlikely(!istat)) { if (unlikely(!istat)) {
ipath_stats.sps_nullintr++; ipath_stats.sps_nullintr++;
ret = IRQ_NONE; /* not our interrupt, or already handled */ ret = IRQ_NONE; /* not our interrupt, or already handled */
...@@ -867,10 +866,11 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs) ...@@ -867,10 +866,11 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs)
ipath_dev_err(dd, "Read of error status failed " ipath_dev_err(dd, "Read of error status failed "
"(all bits set); ignoring\n"); "(all bits set); ignoring\n");
else else
handle_errors(dd, estat); if (handle_errors(dd, estat))
/* force calling ipath_kreceive() */
chk0rcv = 1;
} }
p0bits = port0rbits;
if (istat & INFINIPATH_I_GPIO) { if (istat & INFINIPATH_I_GPIO) {
/* /*
* Packets are available in the port 0 rcv queue. * Packets are available in the port 0 rcv queue.
...@@ -892,8 +892,10 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs) ...@@ -892,8 +892,10 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs)
ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear, ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear,
(u64) (1 << 2)); (u64) (1 << 2));
p0bits |= INFINIPATH_I_GPIO; p0bits |= INFINIPATH_I_GPIO;
chk0rcv = 1;
} }
} }
chk0rcv |= istat & p0bits;
/* /*
* clear the ones we will deal with on this round * clear the ones we will deal with on this round
...@@ -905,18 +907,16 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs) ...@@ -905,18 +907,16 @@ irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs)
ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat); ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat);
/* /*
* we check for both transition from empty to non-empty, and urgent * handle port0 receive before checking for pio buffers available,
* packets (those with the interrupt bit set in the header), and * since receives can overflow; piobuf waiters can afford a few
* if enabled, the GPIO bit 2 interrupt used for port0 on some * extra cycles, since they were waiting anyway, and user's waiting
* HT-400 boards. * for receive are at the bottom.
* Do this before checking for pio buffers available, since
* receives can overflow; piobuf waiters can afford a few
* extra cycles, since they were waiting anyway.
*/ */
if (istat & p0bits) { if (chk0rcv) {
ipath_kreceive(dd); ipath_kreceive(dd);
istat &= ~port0rbits; istat &= ~port0rbits;
} }
if (istat & ((infinipath_i_rcvavail_mask << if (istat & ((infinipath_i_rcvavail_mask <<
INFINIPATH_I_RCVAVAIL_SHIFT) INFINIPATH_I_RCVAVAIL_SHIFT)
| (infinipath_i_rcvurg_mask << | (infinipath_i_rcvurg_mask <<
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment