Commit e342c119 authored by John Gregor's avatar John Gregor Committed by Roland Dreier

IB/ipath: Fix sendctrl locking

Code review pointed out that the locking around uses of ipath_sendctrl 
and kr_sendctrl were, in several places, incorrect and/or inconsistent.
Signed-off-by: default avatarJohn Gregor <john.gregor@qlogic.com>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent 9ab4295d
......@@ -803,31 +803,37 @@ void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
unsigned cnt)
{
unsigned i, last = first + cnt;
u64 sendctrl, sendorig;
unsigned long flags;
ipath_cdbg(PKT, "disarm %u PIObufs first=%u\n", cnt, first);
sendorig = dd->ipath_sendctrl;
for (i = first; i < last; i++) {
sendctrl = sendorig | INFINIPATH_S_DISARM |
(i << INFINIPATH_S_DISARMPIOBUF_SHIFT);
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
/*
* The disarm-related bits are write-only, so it
* is ok to OR them in with our copy of sendctrl
* while we hold the lock.
*/
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
sendctrl);
dd->ipath_sendctrl | INFINIPATH_S_DISARM |
(i << INFINIPATH_S_DISARMPIOBUF_SHIFT));
/* can't disarm bufs back-to-back per iba7220 spec */
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
}
/*
* Write it again with current value, in case ipath_sendctrl changed
* while we were looping; no critical bits that would require
* locking.
*
* disable PIOAVAILUPD, then re-enable, reading scratch in
* Disable PIOAVAILUPD, then re-enable, reading scratch in
* between. This seems to avoid a chip timing race that causes
* pioavail updates to memory to stop.
* pioavail updates to memory to stop. We xor as we don't
* know the state of the bit when we're called.
*/
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
sendorig & ~INFINIPATH_S_PIOBUFAVAILUPD);
sendorig = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
dd->ipath_sendctrl ^ INFINIPATH_S_PIOBUFAVAILUPD);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
}
/**
......@@ -2056,6 +2062,8 @@ void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val)
*/
void ipath_shutdown_device(struct ipath_devdata *dd)
{
unsigned long flags;
ipath_dbg("Shutting down the device\n");
dd->ipath_flags |= IPATH_LINKUNK;
......@@ -2076,9 +2084,13 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
* gracefully stop all sends allowing any in progress to trickle out
* first.
*/
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0ULL);
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
dd->ipath_sendctrl = 0;
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
/* flush it */
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
/*
* enough for anything that's going to trickle out to have actually
* done so.
......
......@@ -2149,11 +2149,15 @@ static int ipath_get_slave_info(struct ipath_portdata *pd,
static int ipath_force_pio_avail_update(struct ipath_devdata *dd)
{
u64 reg = dd->ipath_sendctrl;
unsigned long flags;
clear_bit(IPATH_S_PIOBUFAVAILUPD, &reg);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, reg);
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
return 0;
}
......
......@@ -345,7 +345,7 @@ static int init_chip_first(struct ipath_devdata *dd,
dd->ipath_piobcnt2k, dd->ipath_pio2kbase);
spin_lock_init(&dd->ipath_tid_lock);
spin_lock_init(&dd->ipath_sendctrl_lock);
spin_lock_init(&dd->ipath_gpio_lock);
spin_lock_init(&dd->ipath_eep_st_lock);
mutex_init(&dd->ipath_eep_lock);
......@@ -372,9 +372,9 @@ static int init_chip_reset(struct ipath_devdata *dd,
*pdp = dd->ipath_pd[0];
/* ensure chip does no sends or receives while we re-initialize */
dd->ipath_control = dd->ipath_sendctrl = dd->ipath_rcvctrl = 0U;
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 0);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0);
ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0);
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
ipath_write_kreg(dd, dd->ipath_kregs->kr_control, dd->ipath_control);
rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
if (dd->ipath_portcnt != rtmp)
......@@ -487,6 +487,7 @@ static void enable_chip(struct ipath_devdata *dd,
struct ipath_portdata *pd, int reinit)
{
u32 val;
unsigned long flags;
int i;
if (!reinit)
......@@ -495,11 +496,13 @@ static void enable_chip(struct ipath_devdata *dd,
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
dd->ipath_rcvctrl);
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
/* Enable PIO send, and update of PIOavail regs to memory. */
dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE |
INFINIPATH_S_PIOBUFAVAILUPD;
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
/*
* enable port 0 receive, and receive interrupt. other ports
......@@ -696,6 +699,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
u64 val;
struct ipath_portdata *pd = NULL; /* keep gcc4 happy */
gfp_t gfp_flags = GFP_USER | __GFP_COMP;
unsigned long flags;
ret = init_housekeeping(dd, &pd, reinit);
if (ret)
......@@ -827,8 +831,12 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
~0ULL&~INFINIPATH_HWE_MEMBISTFAILED);
ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
INFINIPATH_S_PIOENABLE);
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE;
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
/*
* before error clears, since we expect serdes pll errors during
......
......@@ -795,6 +795,7 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
{
int i, im;
__le64 val;
unsigned long flags;
/* disable error interrupts, to avoid confusion */
ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);
......@@ -813,11 +814,14 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
dd->ipath_control);
/* ensure pio avail updates continue */
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl);
dd->ipath_sendctrl);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
/*
* We just enabled pioavailupdate, so dma copy is almost certainly
......@@ -922,6 +926,7 @@ static noinline void ipath_bad_regread(struct ipath_devdata *dd)
static void handle_layer_pioavail(struct ipath_devdata *dd)
{
unsigned long flags;
int ret;
ret = ipath_ib_piobufavail(dd->verbs_dev);
......@@ -930,9 +935,12 @@ static void handle_layer_pioavail(struct ipath_devdata *dd)
return;
set:
set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
}
/*
......@@ -1168,9 +1176,14 @@ irqreturn_t ipath_intr(int irq, void *data)
handle_urcv(dd, istat);
if (istat & INFINIPATH_I_SPIOBUFAVAIL) {
clear_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
unsigned long flags;
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
dd->ipath_sendctrl &= ~INFINIPATH_S_PIOINTBUFAVAIL;
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
handle_layer_pioavail(dd);
}
......
......@@ -376,6 +376,7 @@ struct ipath_devdata {
dma_addr_t *ipath_physshadow;
/* lock to workaround chip bug 9437 */
spinlock_t ipath_tid_lock;
spinlock_t ipath_sendctrl_lock;
/*
* IPATH_STATUS_*,
......
......@@ -479,9 +479,14 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
static void want_buffer(struct ipath_devdata *dd)
{
set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
unsigned long flags;
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl);
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
}
/**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment