Commit 0f8b375f authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://gkernel.bkbits.net/net-drivers-2.5

into ppc970.osdl.org:/home/torvalds/v2.5/linux
parents 1f0bb3eb 6bac5a4d
......@@ -55,7 +55,7 @@ extern void ide_do_request(ide_hwgroup_t *hwgroup, int masked_irq);
#define IDE_PMAC_DEBUG
#define DMA_WAIT_TIMEOUT 100
#define DMA_WAIT_TIMEOUT 50
typedef struct pmac_ide_hwif {
unsigned long regbase;
......@@ -2026,8 +2026,11 @@ pmac_ide_dma_end (ide_drive_t *drive)
dstat = readl(&dma->status);
writel(((RUN|WAKE|DEAD) << 16), &dma->control);
pmac_ide_destroy_dmatable(drive);
/* verify good dma status */
return (dstat & (RUN|DEAD|ACTIVE)) != RUN;
/* verify good dma status. we don't check for ACTIVE beeing 0. We should...
* in theory, but with ATAPI decices doing buffer underruns, that would
* cause us to disable DMA, which isn't what we want
*/
return (dstat & (RUN|DEAD)) != RUN;
}
/*
......@@ -2041,7 +2044,7 @@ pmac_ide_dma_test_irq (ide_drive_t *drive)
{
pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
volatile struct dbdma_regs *dma;
unsigned long status;
unsigned long status, timeout;
if (pmif == NULL)
return 0;
......@@ -2057,17 +2060,8 @@ pmac_ide_dma_test_irq (ide_drive_t *drive)
* - The dbdma fifo hasn't yet finished flushing to
* to system memory when the disk interrupt occurs.
*
* The trick here is to increment drive->waiting_for_dma,
* and return as if no interrupt occurred. If the counter
* reach a certain timeout value, we then return 1. If
* we really got the interrupt, it will happen right away
* again.
* Apple's solution here may be more elegant. They issue
* a DMA channel interrupt (a separate irq line) via a DBDMA
* NOP command just before the STOP, and wait for both the
* disk and DBDMA interrupts to have completed.
*/
/* If ACTIVE is cleared, the STOP command have passed and
* transfer is complete.
*/
......@@ -2079,15 +2073,26 @@ pmac_ide_dma_test_irq (ide_drive_t *drive)
called while not waiting\n", HWIF(drive)->index);
/* If dbdma didn't execute the STOP command yet, the
* active bit is still set */
drive->waiting_for_dma++;
if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) {
printk(KERN_WARNING "ide%d, timeout waiting \
for dbdma command stop\n", HWIF(drive)->index);
return 1;
}
udelay(5);
return 0;
* active bit is still set. We consider that we aren't
* sharing interrupts (which is hopefully the case with
* those controllers) and so we just try to flush the
* channel for pending data in the fifo
*/
udelay(1);
writel((FLUSH << 16) | FLUSH, &dma->control);
timeout = 0;
for (;;) {
udelay(1);
status = readl(&dma->status);
if ((status & FLUSH) == 0)
break;
if (++timeout > 100) {
printk(KERN_WARNING "ide%d, ide_dma_test_irq \
timeout flushing channel\n", HWIF(drive)->index);
break;
}
}
return 1;
}
static int __pmac
......
......@@ -654,6 +654,7 @@ static __inline__ void gem_post_rxds(struct gem *gp, int limit)
cluster_start = curr = (gp->rx_new & ~(4 - 1));
count = 0;
kick = -1;
wmb();
while (curr != limit) {
curr = NEXT_RX(curr);
if (++count == 4) {
......@@ -670,8 +671,10 @@ static __inline__ void gem_post_rxds(struct gem *gp, int limit)
count = 0;
}
}
if (kick >= 0)
if (kick >= 0) {
mb();
writel(kick, gp->regs + RXDMA_KICK);
}
}
static void gem_rx(struct gem *gp)
......@@ -884,6 +887,7 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (gem_intme(entry))
ctrl |= TXDCTRL_INTME;
txd->buffer = cpu_to_le64(mapping);
wmb();
txd->control_word = cpu_to_le64(ctrl);
entry = NEXT_TX(entry);
} else {
......@@ -923,6 +927,7 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
txd = &gp->init_block->txd[entry];
txd->buffer = cpu_to_le64(mapping);
wmb();
txd->control_word = cpu_to_le64(this_ctrl | len);
if (gem_intme(entry))
......@@ -932,6 +937,7 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
txd = &gp->init_block->txd[first_entry];
txd->buffer = cpu_to_le64(first_mapping);
wmb();
txd->control_word =
cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len);
}
......@@ -943,6 +949,7 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (netif_msg_tx_queued(gp))
printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
dev->name, entry, skb->len);
mb();
writel(gp->tx_new, gp->regs + TXDMA_KICK);
spin_unlock_irq(&gp->lock);
......@@ -1418,6 +1425,7 @@ static void gem_clean_rings(struct gem *gp)
gp->rx_skbs[i] = NULL;
}
rxd->status_word = 0;
wmb();
rxd->buffer = 0;
}
......@@ -1478,6 +1486,7 @@ static void gem_init_rings(struct gem *gp)
RX_BUF_ALLOC_SIZE(gp),
PCI_DMA_FROMDEVICE);
rxd->buffer = cpu_to_le64(dma_addr);
wmb();
rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
skb_reserve(skb, RX_OFFSET);
}
......@@ -1486,8 +1495,10 @@ static void gem_init_rings(struct gem *gp)
struct gem_txd *txd = &gb->txd[i];
txd->control_word = 0;
wmb();
txd->buffer = 0;
}
wmb();
}
/* Must be invoked under gp->lock. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment