Commit 44b73380 authored by Mark Lord's avatar Mark Lord Committed by Jeff Garzik

sata_mv: fix broken DSM/TRIM support (v2)

Fix DSM/TRIM commands in sata_mv (v2).
These need to be issued using old-school "BM DMA",
rather than via the EDMA host queue.

Since the chips don't have proper BM DMA status,
we need to be more careful with setting the ATA_DMA_INTR bit,
since DSM/TRIM often has a long delay between "DMA complete"
and "command complete".

GEN_I chips don't have BM DMA, so no TRIM for them.
Signed-off-by: default avatarMark Lord <mlord@pobox.com>
Signed-off-by: default avatarJeff Garzik <jgarzik@redhat.com>
Cc: stable@kernel.org
parent 60f5d6ef
...@@ -1898,19 +1898,25 @@ static void mv_bmdma_start(struct ata_queued_cmd *qc) ...@@ -1898,19 +1898,25 @@ static void mv_bmdma_start(struct ata_queued_cmd *qc)
* LOCKING: * LOCKING:
* Inherited from caller. * Inherited from caller.
*/ */
static void mv_bmdma_stop(struct ata_queued_cmd *qc) static void mv_bmdma_stop_ap(struct ata_port *ap)
{ {
struct ata_port *ap = qc->ap;
void __iomem *port_mmio = mv_ap_base(ap); void __iomem *port_mmio = mv_ap_base(ap);
u32 cmd; u32 cmd;
/* clear start/stop bit */ /* clear start/stop bit */
cmd = readl(port_mmio + BMDMA_CMD); cmd = readl(port_mmio + BMDMA_CMD);
cmd &= ~ATA_DMA_START; if (cmd & ATA_DMA_START) {
writelfl(cmd, port_mmio + BMDMA_CMD); cmd &= ~ATA_DMA_START;
writelfl(cmd, port_mmio + BMDMA_CMD);
/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
ata_sff_dma_pause(ap);
}
}
/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ static void mv_bmdma_stop(struct ata_queued_cmd *qc)
ata_sff_dma_pause(ap); {
mv_bmdma_stop_ap(qc->ap);
} }
/** /**
...@@ -1934,8 +1940,21 @@ static u8 mv_bmdma_status(struct ata_port *ap) ...@@ -1934,8 +1940,21 @@ static u8 mv_bmdma_status(struct ata_port *ap)
reg = readl(port_mmio + BMDMA_STATUS); reg = readl(port_mmio + BMDMA_STATUS);
if (reg & ATA_DMA_ACTIVE) if (reg & ATA_DMA_ACTIVE)
status = ATA_DMA_ACTIVE; status = ATA_DMA_ACTIVE;
else else if (reg & ATA_DMA_ERR)
status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR; status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
else {
/*
* Just because DMA_ACTIVE is 0 (DMA completed),
* this does _not_ mean the device is "done".
* So we should not yet be signalling ATA_DMA_INTR
* in some cases. Eg. DSM/TRIM, and perhaps others.
*/
mv_bmdma_stop_ap(ap);
if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
status = 0;
else
status = ATA_DMA_INTR;
}
return status; return status;
} }
...@@ -1995,6 +2014,9 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) ...@@ -1995,6 +2014,9 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
switch (tf->protocol) { switch (tf->protocol) {
case ATA_PROT_DMA: case ATA_PROT_DMA:
if (tf->command == ATA_CMD_DSM)
return;
/* fall-thru */
case ATA_PROT_NCQ: case ATA_PROT_NCQ:
break; /* continue below */ break; /* continue below */
case ATA_PROT_PIO: case ATA_PROT_PIO:
...@@ -2094,6 +2116,8 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) ...@@ -2094,6 +2116,8 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
if ((tf->protocol != ATA_PROT_DMA) && if ((tf->protocol != ATA_PROT_DMA) &&
(tf->protocol != ATA_PROT_NCQ)) (tf->protocol != ATA_PROT_NCQ))
return; return;
if (tf->command == ATA_CMD_DSM)
return; /* use bmdma for this */
/* Fill in Gen IIE command request block */ /* Fill in Gen IIE command request block */
if (!(tf->flags & ATA_TFLAG_WRITE)) if (!(tf->flags & ATA_TFLAG_WRITE))
...@@ -2289,6 +2313,12 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) ...@@ -2289,6 +2313,12 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
switch (qc->tf.protocol) { switch (qc->tf.protocol) {
case ATA_PROT_DMA: case ATA_PROT_DMA:
if (qc->tf.command == ATA_CMD_DSM) {
if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */
return AC_ERR_OTHER;
break; /* use bmdma for this */
}
/* fall thru */
case ATA_PROT_NCQ: case ATA_PROT_NCQ:
mv_start_edma(ap, port_mmio, pp, qc->tf.protocol); mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK; pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment