Commit a57c1bad authored by Alan Cox's avatar Alan Cox Committed by Jeff Garzik

libata-sff: Fix oops reported in kerneloops.org for pnp devices with no ctl

- Make ata_sff_altstatus private so nobody uses it by mistake
- Drop the 400nS delay from it

Add

ata_sff_irq_status	-	encapsulates the IRQ check logic

This function keeps the existing behaviour for altstatus using devices. I
actually suspect the logic was wrong before the changes but -rc isn't the
time to play with that

ata_sff_sync		-	ensure writes hit the device

Really we want an io* operation for 'is posted' eg ioisposted(ioaddr) so
that we can fix the nasty delay this causes on most systems.

- ata_sff_pause		-	400nS delay

Ensure the command hit the device and delay 400nS

- ata_sff_dma_pause

Ensure the I/O hit the device and enforce an HDMA1:0 transition delay.
Requires altstatus register exists, BUG if not so we don't risk
corruption in MWDMA modes. (UDMA the checksum will save your backside in
theory)

The only other complication then is devices with their own handlers.
rb532 can use dma_pause but scc needs to access its own altstatus
register for internal errata workarounds so directly call the drivers own
altstatus function.
Signed-off-by: default avatarAlan Cox <alan@redhat.com>
Signed-off-by: default avatarJeff Garzik <jgarzik@redhat.com>
parent 4f0ebe3c
...@@ -247,7 +247,7 @@ u8 ata_sff_check_status(struct ata_port *ap) ...@@ -247,7 +247,7 @@ u8 ata_sff_check_status(struct ata_port *ap)
* LOCKING: * LOCKING:
* Inherited from caller. * Inherited from caller.
*/ */
u8 ata_sff_altstatus(struct ata_port *ap) static u8 ata_sff_altstatus(struct ata_port *ap)
{ {
if (ap->ops->sff_check_altstatus) if (ap->ops->sff_check_altstatus)
return ap->ops->sff_check_altstatus(ap); return ap->ops->sff_check_altstatus(ap);
...@@ -255,6 +255,93 @@ u8 ata_sff_altstatus(struct ata_port *ap) ...@@ -255,6 +255,93 @@ u8 ata_sff_altstatus(struct ata_port *ap)
return ioread8(ap->ioaddr.altstatus_addr); return ioread8(ap->ioaddr.altstatus_addr);
} }
/**
* ata_sff_irq_status - Check if the device is busy
* @ap: port where the device is
*
* Determine if the port is currently busy. Uses altstatus
* if available in order to avoid clearing shared IRQ status
* when finding an IRQ source. Non ctl capable devices don't
* share interrupt lines fortunately for us.
*
* LOCKING:
* Inherited from caller.
*/
static u8 ata_sff_irq_status(struct ata_port *ap)
{
u8 status;
if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
status = ata_sff_altstatus(ap);
/* Not us: We are busy */
if (status & ATA_BUSY)
return status;
}
/* Clear INTRQ latch */
status = ata_sff_check_status(ap);
return status;
}
/**
* ata_sff_sync - Flush writes
* @ap: Port to wait for.
*
* CAUTION:
* If we have an mmio device with no ctl and no altstatus
* method this will fail. No such devices are known to exist.
*
* LOCKING:
* Inherited from caller.
*/
static void ata_sff_sync(struct ata_port *ap)
{
if (ap->ops->sff_check_altstatus)
ap->ops->sff_check_altstatus(ap);
else if (ap->ioaddr.altstatus_addr)
ioread8(ap->ioaddr.altstatus_addr);
}
/**
* ata_sff_pause - Flush writes and wait 400nS
* @ap: Port to pause for.
*
* CAUTION:
* If we have an mmio device with no ctl and no altstatus
* method this will fail. No such devices are known to exist.
*
* LOCKING:
* Inherited from caller.
*/
void ata_sff_pause(struct ata_port *ap)
{
ata_sff_sync(ap);
ndelay(400);
}
/**
* ata_sff_dma_pause - Pause before commencing DMA
* @ap: Port to pause for.
*
* Perform I/O fencing and ensure sufficient cycle delays occur
* for the HDMA1:0 transition
*/
void ata_sff_dma_pause(struct ata_port *ap)
{
if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
/* An altstatus read will cause the needed delay without
messing up the IRQ status */
ata_sff_altstatus(ap);
return;
}
/* There are no DMA controllers without ctl. BUG here to ensure
we never violate the HDMA1:0 transition timing and risk
corruption. */
BUG();
}
/** /**
* ata_sff_busy_sleep - sleep until BSY clears, or timeout * ata_sff_busy_sleep - sleep until BSY clears, or timeout
* @ap: port containing status register to be polled * @ap: port containing status register to be polled
...@@ -742,7 +829,7 @@ static void ata_pio_sectors(struct ata_queued_cmd *qc) ...@@ -742,7 +829,7 @@ static void ata_pio_sectors(struct ata_queued_cmd *qc)
} else } else
ata_pio_sector(qc); ata_pio_sector(qc);
ata_sff_altstatus(qc->ap); /* flush */ ata_sff_sync(qc->ap); /* flush */
} }
/** /**
...@@ -763,8 +850,9 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) ...@@ -763,8 +850,9 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
WARN_ON(qc->dev->cdb_len < 12); WARN_ON(qc->dev->cdb_len < 12);
ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
ata_sff_altstatus(ap); /* flush */ ata_sff_sync(ap);
/* FIXME: If the CDB is for DMA do we need to do the transition delay
or is bmdma_start guaranteed to do it ? */
switch (qc->tf.protocol) { switch (qc->tf.protocol) {
case ATAPI_PROT_PIO: case ATAPI_PROT_PIO:
ap->hsm_task_state = HSM_ST; ap->hsm_task_state = HSM_ST;
...@@ -905,7 +993,7 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc) ...@@ -905,7 +993,7 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
if (unlikely(__atapi_pio_bytes(qc, bytes))) if (unlikely(__atapi_pio_bytes(qc, bytes)))
goto err_out; goto err_out;
ata_sff_altstatus(ap); /* flush */ ata_sff_sync(ap); /* flush */
return; return;
...@@ -1489,14 +1577,10 @@ inline unsigned int ata_sff_host_intr(struct ata_port *ap, ...@@ -1489,14 +1577,10 @@ inline unsigned int ata_sff_host_intr(struct ata_port *ap,
goto idle_irq; goto idle_irq;
} }
/* check altstatus */
status = ata_sff_altstatus(ap);
if (status & ATA_BUSY)
goto idle_irq;
/* check main status, clearing INTRQ */ /* check main status, clearing INTRQ if needed */
status = ap->ops->sff_check_status(ap); status = ata_sff_irq_status(ap);
if (unlikely(status & ATA_BUSY)) if (status & ATA_BUSY)
goto idle_irq; goto idle_irq;
/* ack bmdma irq events */ /* ack bmdma irq events */
...@@ -2030,7 +2114,7 @@ void ata_sff_error_handler(struct ata_port *ap) ...@@ -2030,7 +2114,7 @@ void ata_sff_error_handler(struct ata_port *ap)
ap->ops->bmdma_stop(qc); ap->ops->bmdma_stop(qc);
} }
ata_sff_altstatus(ap); ata_sff_sync(ap); /* FIXME: We don't need this */
ap->ops->sff_check_status(ap); ap->ops->sff_check_status(ap);
ap->ops->sff_irq_clear(ap); ap->ops->sff_irq_clear(ap);
...@@ -2203,7 +2287,7 @@ void ata_bmdma_stop(struct ata_queued_cmd *qc) ...@@ -2203,7 +2287,7 @@ void ata_bmdma_stop(struct ata_queued_cmd *qc)
mmio + ATA_DMA_CMD); mmio + ATA_DMA_CMD);
/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
ata_sff_altstatus(ap); /* dummy read */ ata_sff_dma_pause(ap);
} }
/** /**
...@@ -2722,7 +2806,8 @@ EXPORT_SYMBOL_GPL(ata_sff_qc_prep); ...@@ -2722,7 +2806,8 @@ EXPORT_SYMBOL_GPL(ata_sff_qc_prep);
EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep); EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep);
EXPORT_SYMBOL_GPL(ata_sff_dev_select); EXPORT_SYMBOL_GPL(ata_sff_dev_select);
EXPORT_SYMBOL_GPL(ata_sff_check_status); EXPORT_SYMBOL_GPL(ata_sff_check_status);
EXPORT_SYMBOL_GPL(ata_sff_altstatus); EXPORT_SYMBOL_GPL(ata_sff_dma_pause);
EXPORT_SYMBOL_GPL(ata_sff_pause);
EXPORT_SYMBOL_GPL(ata_sff_busy_sleep); EXPORT_SYMBOL_GPL(ata_sff_busy_sleep);
EXPORT_SYMBOL_GPL(ata_sff_wait_ready); EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
EXPORT_SYMBOL_GPL(ata_sff_tf_load); EXPORT_SYMBOL_GPL(ata_sff_tf_load);
......
...@@ -270,7 +270,7 @@ static void pata_icside_bmdma_stop(struct ata_queued_cmd *qc) ...@@ -270,7 +270,7 @@ static void pata_icside_bmdma_stop(struct ata_queued_cmd *qc)
disable_dma(state->dma); disable_dma(state->dma);
/* see ata_bmdma_stop */ /* see ata_bmdma_stop */
ata_sff_altstatus(ap); ata_sff_dma_pause(ap);
} }
static u8 pata_icside_bmdma_status(struct ata_port *ap) static u8 pata_icside_bmdma_status(struct ata_port *ap)
......
...@@ -57,7 +57,9 @@ static inline void rb532_pata_finish_io(struct ata_port *ap) ...@@ -57,7 +57,9 @@ static inline void rb532_pata_finish_io(struct ata_port *ap)
struct ata_host *ah = ap->host; struct ata_host *ah = ap->host;
struct rb532_cf_info *info = ah->private_data; struct rb532_cf_info *info = ah->private_data;
ata_sff_altstatus(ap); /* FIXME: Keep previous delay. If this is merely a fence then
ata_sff_sync might be sufficient. */
ata_sff_dma_pause(ap);
ndelay(RB500_CF_IO_DELAY); ndelay(RB500_CF_IO_DELAY);
set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH); set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH);
......
...@@ -726,7 +726,7 @@ static void scc_bmdma_stop (struct ata_queued_cmd *qc) ...@@ -726,7 +726,7 @@ static void scc_bmdma_stop (struct ata_queued_cmd *qc)
in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START); in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
ata_sff_altstatus(ap); /* dummy read */ ata_sff_dma_pause(ap); /* dummy read */
} }
/** /**
...@@ -747,7 +747,8 @@ static u8 scc_bmdma_status (struct ata_port *ap) ...@@ -747,7 +747,8 @@ static u8 scc_bmdma_status (struct ata_port *ap)
return host_stat; return host_stat;
/* errata A252,A308 workaround: Step4 */ /* errata A252,A308 workaround: Step4 */
if ((ata_sff_altstatus(ap) & ATA_ERR) && (int_status & INTSTS_INTRQ)) if ((scc_check_altstatus(ap) & ATA_ERR)
&& (int_status & INTSTS_INTRQ))
return (host_stat | ATA_DMA_INTR); return (host_stat | ATA_DMA_INTR);
/* errata A308 workaround Step5 */ /* errata A308 workaround Step5 */
......
...@@ -1432,7 +1432,8 @@ extern void ata_sff_qc_prep(struct ata_queued_cmd *qc); ...@@ -1432,7 +1432,8 @@ extern void ata_sff_qc_prep(struct ata_queued_cmd *qc);
extern void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc); extern void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc);
extern void ata_sff_dev_select(struct ata_port *ap, unsigned int device); extern void ata_sff_dev_select(struct ata_port *ap, unsigned int device);
extern u8 ata_sff_check_status(struct ata_port *ap); extern u8 ata_sff_check_status(struct ata_port *ap);
extern u8 ata_sff_altstatus(struct ata_port *ap); extern void ata_sff_pause(struct ata_port *ap);
extern void ata_sff_dma_pause(struct ata_port *ap);
extern int ata_sff_busy_sleep(struct ata_port *ap, extern int ata_sff_busy_sleep(struct ata_port *ap,
unsigned long timeout_pat, unsigned long timeout); unsigned long timeout_pat, unsigned long timeout);
extern int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline); extern int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline);
...@@ -1492,19 +1493,6 @@ extern int ata_pci_sff_init_one(struct pci_dev *pdev, ...@@ -1492,19 +1493,6 @@ extern int ata_pci_sff_init_one(struct pci_dev *pdev,
struct scsi_host_template *sht, void *host_priv); struct scsi_host_template *sht, void *host_priv);
#endif /* CONFIG_PCI */ #endif /* CONFIG_PCI */
/**
* ata_sff_pause - Flush writes and pause 400 nanoseconds.
* @ap: Port to wait for.
*
* LOCKING:
* Inherited from caller.
*/
static inline void ata_sff_pause(struct ata_port *ap)
{
ata_sff_altstatus(ap);
ndelay(400);
}
/** /**
* ata_sff_busy_wait - Wait for a port status register * ata_sff_busy_wait - Wait for a port status register
* @ap: Port to wait for. * @ap: Port to wait for.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment