Commit 1aadf5c3 authored by Tejun Heo's avatar Tejun Heo Committed by Jeff Garzik

libata: always use ata_qc_complete_multiple() for NCQ command completions

Currently, sata_fsl, mv and nv call ata_qc_complete() multiple times
from their interrupt handlers to indicate completion of NCQ commands.
This limits the visibility the libata core layer has into how commands
are being executed and completed, which is necessary to support IRQ
expecting in generic way.  libata already has an interface to complete
multiple commands at once - ata_qc_complete_multiple() which ahci and
sata_sil24 already use.

This patch updates the three drivers to use ata_qc_complete_multiple()
too and updates comments on ata_qc_complete[_multiple]() regarding
their usages with NCQ completions.  This change not only provides
better visibility into command execution to the core layer but also
simplifies low level drivers.

* sata_fsl: It already builds done_mask.  Conversion is straight
  forward.

* sata_mv: mv_process_crpb_response() no longer checks for illegal
  completions, it just returns whether the tag is completed or not.
  mv_process_crpb_entries() builds done_mask from it and passes it to
  ata_qc_complete_multiple() which will check for illegal completions.

* sata_nv adma: Similar to sata_mv.  nv_adma_check_cpb() now just
  returns the tag status and nv_adma_interrupt() builds done_mask from
  it and passes it to ata_qc_complete_multiple().

* sata_nv swncq: It already builds done_mask.  Drop unnecessary
  illegal transition checks and call ata_qc_complete_multiple().

In the long run, it might be a good idea to make ata_qc_complete()
whine if called when multiple NCQ commands are in flight.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: Ashish Kalra <ashish.kalra@freescale.com>
Cc: Saeed Bishara <saeed@marvell.com>
Cc: Mark Lord <liml@rtr.ca>
Cc: Robert Hancock <hancockr@shaw.ca>
Signed-off-by: default avatarJeff Garzik <jgarzik@redhat.com>
parent d9027470
...@@ -4943,8 +4943,13 @@ static void ata_verify_xfer(struct ata_queued_cmd *qc) ...@@ -4943,8 +4943,13 @@ static void ata_verify_xfer(struct ata_queued_cmd *qc)
* ata_qc_complete - Complete an active ATA command * ata_qc_complete - Complete an active ATA command
* @qc: Command to complete * @qc: Command to complete
* *
* Indicate to the mid and upper layers that an ATA * Indicate to the mid and upper layers that an ATA command has
* command has completed, with either an ok or not-ok status. * completed, with either an ok or not-ok status.
*
* Refrain from calling this function multiple times when
* successfully completing multiple NCQ commands.
* ata_qc_complete_multiple() should be used instead, which will
* properly update IRQ expect state.
* *
* LOCKING: * LOCKING:
* spin_lock_irqsave(host lock) * spin_lock_irqsave(host lock)
...@@ -5037,6 +5042,10 @@ void ata_qc_complete(struct ata_queued_cmd *qc) ...@@ -5037,6 +5042,10 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
* requests normally. ap->qc_active and @qc_active is compared * requests normally. ap->qc_active and @qc_active is compared
* and commands are completed accordingly. * and commands are completed accordingly.
* *
* Always use this function when completing multiple NCQ commands
* from IRQ handlers instead of calling ata_qc_complete()
* multiple times to keep IRQ expect status properly in sync.
*
* LOCKING: * LOCKING:
* spin_lock_irqsave(host lock) * spin_lock_irqsave(host lock)
* *
......
...@@ -1137,17 +1137,13 @@ static void sata_fsl_host_intr(struct ata_port *ap) ...@@ -1137,17 +1137,13 @@ static void sata_fsl_host_intr(struct ata_port *ap)
ioread32(hcr_base + CE)); ioread32(hcr_base + CE));
for (i = 0; i < SATA_FSL_QUEUE_DEPTH; i++) { for (i = 0; i < SATA_FSL_QUEUE_DEPTH; i++) {
if (done_mask & (1 << i)) { if (done_mask & (1 << i))
qc = ata_qc_from_tag(ap, i);
if (qc) {
ata_qc_complete(qc);
}
DPRINTK DPRINTK
("completing ncq cmd,tag=%d,CC=0x%x,CA=0x%x\n", ("completing ncq cmd,tag=%d,CC=0x%x,CA=0x%x\n",
i, ioread32(hcr_base + CC), i, ioread32(hcr_base + CC),
ioread32(hcr_base + CA)); ioread32(hcr_base + CA));
}
} }
ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
return; return;
} else if ((ap->qc_active & (1 << ATA_TAG_INTERNAL))) { } else if ((ap->qc_active & (1 << ATA_TAG_INTERNAL))) {
......
...@@ -2743,18 +2743,11 @@ static void mv_err_intr(struct ata_port *ap) ...@@ -2743,18 +2743,11 @@ static void mv_err_intr(struct ata_port *ap)
} }
} }
static void mv_process_crpb_response(struct ata_port *ap, static bool mv_process_crpb_response(struct ata_port *ap,
struct mv_crpb *response, unsigned int tag, int ncq_enabled) struct mv_crpb *response, unsigned int tag, int ncq_enabled)
{ {
u8 ata_status; u8 ata_status;
u16 edma_status = le16_to_cpu(response->flags); u16 edma_status = le16_to_cpu(response->flags);
struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
if (unlikely(!qc)) {
ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
__func__, tag);
return;
}
/* /*
* edma_status from a response queue entry: * edma_status from a response queue entry:
...@@ -2768,13 +2761,14 @@ static void mv_process_crpb_response(struct ata_port *ap, ...@@ -2768,13 +2761,14 @@ static void mv_process_crpb_response(struct ata_port *ap,
* Error will be seen/handled by * Error will be seen/handled by
* mv_err_intr(). So do nothing at all here. * mv_err_intr(). So do nothing at all here.
*/ */
return; return false;
} }
} }
ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT; ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
if (!ac_err_mask(ata_status)) if (!ac_err_mask(ata_status))
ata_qc_complete(qc); return true;
/* else: leave it for mv_err_intr() */ /* else: leave it for mv_err_intr() */
return false;
} }
static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp) static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
...@@ -2783,6 +2777,7 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp ...@@ -2783,6 +2777,7 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp
struct mv_host_priv *hpriv = ap->host->private_data; struct mv_host_priv *hpriv = ap->host->private_data;
u32 in_index; u32 in_index;
bool work_done = false; bool work_done = false;
u32 done_mask = 0;
int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN); int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
/* Get the hardware queue position index */ /* Get the hardware queue position index */
...@@ -2803,15 +2798,19 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp ...@@ -2803,15 +2798,19 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp
/* Gen II/IIE: get command tag from CRPB entry */ /* Gen II/IIE: get command tag from CRPB entry */
tag = le16_to_cpu(response->id) & 0x1f; tag = le16_to_cpu(response->id) & 0x1f;
} }
mv_process_crpb_response(ap, response, tag, ncq_enabled); if (mv_process_crpb_response(ap, response, tag, ncq_enabled))
done_mask |= 1 << tag;
work_done = true; work_done = true;
} }
/* Update the software queue position index in hardware */ if (work_done) {
if (work_done) ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
/* Update the software queue position index in hardware */
writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
(pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT), (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
port_mmio + EDMA_RSP_Q_OUT_PTR); port_mmio + EDMA_RSP_Q_OUT_PTR);
}
} }
static void mv_port_intr(struct ata_port *ap, u32 port_cause) static void mv_port_intr(struct ata_port *ap, u32 port_cause)
......
...@@ -873,29 +873,11 @@ static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err) ...@@ -873,29 +873,11 @@ static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
ata_port_freeze(ap); ata_port_freeze(ap);
else else
ata_port_abort(ap); ata_port_abort(ap);
return 1; return -1;
} }
if (likely(flags & NV_CPB_RESP_DONE)) { if (likely(flags & NV_CPB_RESP_DONE))
struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num); return 1;
VPRINTK("CPB flags done, flags=0x%x\n", flags);
if (likely(qc)) {
DPRINTK("Completing qc from tag %d\n", cpb_num);
ata_qc_complete(qc);
} else {
struct ata_eh_info *ehi = &ap->link.eh_info;
/* Notifier bits set without a command may indicate the drive
is misbehaving. Raise host state machine violation on this
condition. */
ata_port_printk(ap, KERN_ERR,
"notifier for tag %d with no cmd?\n",
cpb_num);
ehi->err_mask |= AC_ERR_HSM;
ehi->action |= ATA_EH_RESET;
ata_port_freeze(ap);
return 1;
}
}
return 0; return 0;
} }
...@@ -1018,6 +1000,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) ...@@ -1018,6 +1000,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
NV_ADMA_STAT_CPBERR | NV_ADMA_STAT_CPBERR |
NV_ADMA_STAT_CMD_COMPLETE)) { NV_ADMA_STAT_CMD_COMPLETE)) {
u32 check_commands = notifier_clears[i]; u32 check_commands = notifier_clears[i];
u32 done_mask = 0;
int pos, rc; int pos, rc;
if (status & NV_ADMA_STAT_CPBERR) { if (status & NV_ADMA_STAT_CPBERR) {
...@@ -1034,10 +1017,13 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) ...@@ -1034,10 +1017,13 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
pos--; pos--;
rc = nv_adma_check_cpb(ap, pos, rc = nv_adma_check_cpb(ap, pos,
notifier_error & (1 << pos)); notifier_error & (1 << pos));
if (unlikely(rc)) if (rc > 0)
done_mask |= 1 << pos;
else if (unlikely(rc < 0))
check_commands = 0; check_commands = 0;
check_commands &= ~(1 << pos); check_commands &= ~(1 << pos);
} }
ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
} }
} }
...@@ -2132,7 +2118,6 @@ static int nv_swncq_sdbfis(struct ata_port *ap) ...@@ -2132,7 +2118,6 @@ static int nv_swncq_sdbfis(struct ata_port *ap)
struct ata_eh_info *ehi = &ap->link.eh_info; struct ata_eh_info *ehi = &ap->link.eh_info;
u32 sactive; u32 sactive;
u32 done_mask; u32 done_mask;
int i;
u8 host_stat; u8 host_stat;
u8 lack_dhfis = 0; u8 lack_dhfis = 0;
...@@ -2152,27 +2137,11 @@ static int nv_swncq_sdbfis(struct ata_port *ap) ...@@ -2152,27 +2137,11 @@ static int nv_swncq_sdbfis(struct ata_port *ap)
sactive = readl(pp->sactive_block); sactive = readl(pp->sactive_block);
done_mask = pp->qc_active ^ sactive; done_mask = pp->qc_active ^ sactive;
if (unlikely(done_mask & sactive)) { pp->qc_active &= ~done_mask;
ata_ehi_clear_desc(ehi); pp->dhfis_bits &= ~done_mask;
ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition" pp->dmafis_bits &= ~done_mask;
"(%08x->%08x)", pp->qc_active, sactive); pp->sdbfis_bits |= done_mask;
ehi->err_mask |= AC_ERR_HSM; ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
ehi->action |= ATA_EH_RESET;
return -EINVAL;
}
for (i = 0; i < ATA_MAX_QUEUE; i++) {
if (!(done_mask & (1 << i)))
continue;
qc = ata_qc_from_tag(ap, i);
if (qc) {
ata_qc_complete(qc);
pp->qc_active &= ~(1 << i);
pp->dhfis_bits &= ~(1 << i);
pp->dmafis_bits &= ~(1 << i);
pp->sdbfis_bits |= (1 << i);
}
}
if (!ap->qc_active) { if (!ap->qc_active) {
DPRINTK("over\n"); DPRINTK("over\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment