Commit 7a803754 authored by David Brownell's avatar David Brownell Committed by Greg Kroah-Hartman

[PATCH] PATCH 2.5.14 -- ehci misc fixes

    - Report better errors, and in one case the correct one.
    - Adds strategic wmb() calls
    - Claims the right (scaled) ISO bandwidth
    - Uses non-CVS version ID

This will likely resolve that Archos MP3 Jukebox issue, where "-104"
(-ECONNRESET) was appearing mysteriously.
parent 7c1bce85
......@@ -53,7 +53,7 @@
/*
* EHCI hc_driver implementation ... experimental, incomplete.
* Based on the 0.96 register interface specification.
* Based on the final 1.0 register interface specification.
*
* There are lots of things to help out with here ... notably
* everything "periodic", and of course testing with all sorts
......@@ -70,6 +70,8 @@
*
* HISTORY:
*
* 2002-05-07 Some error path cleanups to report better errors; wmb();
* use non-CVS version id; better iso bandwidth claim.
* 2002-04-19 Control/bulk/interrupt submit no longer uses giveback() on
* errors in submit path. Bugfixes to interrupt scheduling/processing.
* 2002-03-05 Initial high-speed ISO support; reduce ITD memory; shift
......@@ -83,7 +85,7 @@
* 2001-June Works with usb-storage and NEC EHCI on 2.4
*/
#define DRIVER_VERSION "$Revision: 0.31 $"
#define DRIVER_VERSION "2002-May-07"
#define DRIVER_AUTHOR "David Brownell"
#define DRIVER_DESC "USB 2.0 'Enhanced' Host Controller (EHCI) Driver"
......@@ -582,7 +584,7 @@ dbg ("wait for dequeue: state %d, reclaim %p, hcd state %d",
intr_deschedule (ehci, urb->start_frame, qh, urb->interval);
if (ehci->hcd.state == USB_STATE_HALT)
urb->status = -ESHUTDOWN;
qh_completions (ehci, &qh->qtd_list, 1);
qh_completions (ehci, qh, 1);
return 0;
case PIPE_ISOCHRONOUS:
......
......@@ -21,17 +21,17 @@
/*-------------------------------------------------------------------------*/
/*
* EHCI hardware queue manipulation
* EHCI hardware queue manipulation ... the core. QH/QTD manipulation.
*
* Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd"
* entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned
* buffers needed for the larger number). We use one QH per endpoint, queue
* multiple (bulk or control) urbs per endpoint. URBs may need several qtds.
* A scheduled interrupt qh always has one qtd, one urb.
* A scheduled interrupt qh always (for now) has one qtd, one urb.
*
* ISO traffic uses "ISO TD" (itd, and sitd) records, and (along with
* interrupts) needs careful scheduling. Performance improvements can be
* an ongoing challenge.
* an ongoing challenge. That's in "ehci-sched.c".
*
* USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs,
* or otherwise through transaction translators (TTs) in USB 2.0 hubs using
......@@ -91,6 +91,7 @@ static inline void qh_update (struct ehci_qh *qh, struct ehci_qtd *qtd)
qh->hw_alt_next = EHCI_LIST_END;
/* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
wmb ();
qh->hw_token &= __constant_cpu_to_le32 (QTD_TOGGLE | QTD_STS_PING);
}
......@@ -105,29 +106,29 @@ static inline void qtd_copy_status (struct urb *urb, size_t length, u32 token)
/* don't modify error codes */
if (unlikely (urb->status == -EINPROGRESS && (token & QTD_STS_HALT))) {
if (token & QTD_STS_BABBLE) {
/* FIXME "must" disable babbling device's port too */
urb->status = -EOVERFLOW;
} else if (!QTD_CERR (token)) {
if (token & QTD_STS_DBE)
} else if (token & QTD_STS_MMF) {
/* fs/ls interrupt xfer missed the complete-split */
urb->status = -EPROTO;
} else if (token & QTD_STS_DBE) {
urb->status = (QTD_PID (token) == 1) /* IN ? */
? -ENOSR /* hc couldn't read data */
: -ECOMM; /* hc couldn't write data */
else if (token & QTD_STS_MMF) /* missed tt uframe */
urb->status = -EPROTO;
else if (token & QTD_STS_XACT) {
if (QTD_LENGTH (token))
} else if (token & QTD_STS_XACT) {
/* timeout, bad crc, wrong PID, etc; retried */
if (QTD_CERR (token))
urb->status = -EPIPE;
else {
dbg ("3strikes");
urb->status = -EPROTO;
}
} else /* presumably a stall */
urb->status = -EPIPE;
/* CERR nonzero + data left + halt --> stall */
} else if (QTD_LENGTH (token))
/* CERR nonzero + no errors + halt --> stall */
} else if (QTD_CERR (token))
urb->status = -EPIPE;
else /* unknown */
urb->status = -EPROTO;
dbg ("ep %d-%s qtd token %08x --> status %d",
/* devpath */
usb_pipeendpoint (urb->pipe),
......@@ -220,12 +221,11 @@ static void ehci_urb_done (
static int
qh_completions (
struct ehci_hcd *ehci,
struct list_head *qtd_list,
struct ehci_qh *qh,
int freeing
) {
struct ehci_qtd *qtd, *last;
struct list_head *next;
struct ehci_qh *qh = 0;
struct list_head *next, *qtd_list = &qh->qtd_list;
int unlink = 0, halted = 0;
unsigned long flags;
int retval = 0;
......@@ -245,9 +245,6 @@ qh_completions (
struct urb *urb = qtd->urb;
u32 token = 0;
/* qh is non-null iff these qtds were queued to the HC */
qh = (struct ehci_qh *) urb->hcpriv;
/* clean up any state from previous QTD ...*/
if (last) {
if (likely (last->urb != urb)) {
......@@ -266,7 +263,7 @@ qh_completions (
/* qh overlays can have HC's old cached copies of
* next qtd ptrs, if an URB was queued afterwards.
*/
if (qh && cpu_to_le32 (last->qtd_dma) == qh->hw_current
if (cpu_to_le32 (last->qtd_dma) == qh->hw_current
&& last->hw_next != qh->hw_qtd_next) {
qh->hw_alt_next = last->hw_alt_next;
qh->hw_qtd_next = last->hw_next;
......@@ -278,58 +275,49 @@ qh_completions (
}
next = qtd->qtd_list.next;
/* if these qtds were queued to the HC, some may be active.
* else we're cleaning up after a failed URB submission.
*
* FIXME can simplify: cleanup case is gone now.
/* QTDs at tail may be active if QH+HC are running,
* or when unlinking some urbs queued to this QH
*/
if (likely (qh != 0)) {
int qh_halted;
qh_halted = __constant_cpu_to_le32 (QTD_STS_HALT)
& qh->hw_token;
token = le32_to_cpu (qtd->hw_token);
halted = halted
|| qh_halted
|| (__constant_cpu_to_le32 (QTD_STS_HALT)
& qh->hw_token) != 0
|| (ehci->hcd.state == USB_STATE_HALT)
|| (qh->qh_state == QH_STATE_IDLE);
/* QH halts only because of fault or unlink; in both
* cases, queued URBs get unlinked. But for unlink,
* URBs at the head of the queue can stay linked.
*/
if (unlikely (halted != 0)) {
/* fault: unlink the rest, since this qtd saw an error? */
if (unlikely ((token & QTD_STS_HALT) != 0)) {
freeing = unlink = 1;
/* status copied below */
/* unlink everything because of HC shutdown? */
/* QH halts only because of fault (above) or unlink (here). */
} else if (unlikely (halted != 0)) {
/* unlinking everything because of HC shutdown? */
if (ehci->hcd.state == USB_STATE_HALT) {
freeing = unlink = 1;
urb->status = -ESHUTDOWN;
/* explicit unlink, starting here? */
/* explicit unlink, maybe starting here? */
} else if (qh->qh_state == QH_STATE_IDLE
&& (urb->status == -ECONNRESET
|| urb->status == -ENOENT)) {
freeing = unlink = 1;
/* unlink everything because of error? */
} else if (qh_halted
&& !(token & QTD_STS_HALT)) {
freeing = unlink = 1;
if (urb->status == -EINPROGRESS)
urb->status = -ECONNRESET;
/* unlink the rest? */
} else if (unlink) {
urb->status = -ECONNRESET;
/* QH halted to unlink urbs after this? */
} else if ((token & QTD_STS_ACTIVE) != 0) {
/* QH halted to unlink urbs _after_ this? */
} else if (!unlink && (token & QTD_STS_ACTIVE) != 0) {
qtd = 0;
continue;
}
/* unlink the rest? once we start unlinking, after
* a fault or explicit unlink, we unlink all later
* urbs. usb spec requires that.
*/
if (unlink && urb->status == -EINPROGRESS)
urb->status = -ECONNRESET;
/* Else QH is active, so we must not modify QTDs
* that HC may be working on. Break from loop.
* that HC may be working on. No more qtds to check.
*/
} else if (unlikely ((token & QTD_STS_ACTIVE) != 0)) {
next = qtd_list;
......@@ -340,7 +328,6 @@ qh_completions (
spin_lock (&urb->lock);
qtd_copy_status (urb, qtd->length, token);
spin_unlock (&urb->lock);
}
/*
* NOTE: this won't work right with interrupt urbs that
......@@ -386,7 +373,7 @@ qh_completions (
}
/* patch up list head? */
if (unlikely (halted && qh && !list_empty (qtd_list))) {
if (unlikely (halted && !list_empty (qtd_list))) {
qh_update (qh, list_entry (qtd_list->next,
struct ehci_qtd, qtd_list));
}
......@@ -736,6 +723,7 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
qh->hw_info1 |= __constant_cpu_to_le32 (QH_HEAD); /* [4.8] */
qh->qh_next.qh = qh;
qh->hw_next = dma;
wmb ();
ehci->async = qh;
writel ((u32)qh->qh_dma, &ehci->regs->async_next);
cmd |= CMD_ASE | CMD_RUN;
......@@ -747,6 +735,7 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
qh->hw_info1 &= ~__constant_cpu_to_le32 (QH_HEAD); /* [4.8] */
qh->qh_next = q->qh_next;
qh->hw_next = q->hw_next;
wmb ();
q->qh_next.qh = qh;
q->hw_next = dma;
}
......@@ -884,7 +873,7 @@ static void end_unlink_async (struct ehci_hcd *ehci)
ehci->reclaim = 0;
ehci->reclaim_ready = 0;
qh_completions (ehci, &qh->qtd_list, 1);
qh_completions (ehci, qh, 1);
// unlink any urb should now unlink all following urbs, so that
// relinking only happens for urbs before the unlinked ones.
......@@ -960,6 +949,7 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
}
prev->hw_next = qh->hw_next;
prev->qh_next = qh->qh_next;
wmb ();
ehci->reclaim_ready = 0;
cmd |= CMD_IAAD;
......@@ -986,7 +976,7 @@ static void scan_async (struct ehci_hcd *ehci)
spin_unlock_irqrestore (&ehci->lock, flags);
/* concurrent unlink could happen here */
qh_completions (ehci, &qh->qtd_list, 1);
qh_completions (ehci, qh, 1);
spin_lock_irqsave (&ehci->lock, flags);
qh_put (ehci, qh);
......
......@@ -23,6 +23,14 @@
/*
* EHCI scheduled transaction support: interrupt, iso, split iso
* These are called "periodic" transactions in the EHCI spec.
*
* Note that for interrupt transfers, the QH/QTD manipulation is shared
* with the "asynchronous" transaction support (control/bulk transfers).
* The only real difference is in how interrupt transfers are scheduled.
* We get some funky API restrictions from the current URB model, which
* works notably better for reading transfers than for writing. (And
* which accordingly needs to change before it'll work inside devices,
* or with "USB On The Go" additions to USB 2.0 ...)
*/
/*
......@@ -430,6 +438,7 @@ static int intr_submit (
ehci->periodic [frame] =
QH_NEXT (qh->qh_dma);
}
wmb ();
frame += period;
} while (frame < ehci->periodic_size);
......@@ -478,7 +487,7 @@ intr_complete (
/* call any completions, after patching for reactivation */
spin_unlock_irqrestore (&ehci->lock, flags);
/* NOTE: currently restricted to one qtd per qh! */
if (qh_completions (ehci, &qh->qtd_list, 0) == 0)
if (qh_completions (ehci, qh, 0) == 0)
urb = 0;
spin_lock_irqsave (&ehci->lock, flags);
......@@ -825,17 +834,26 @@ itd_schedule (struct ehci_hcd *ehci, struct urb *urb)
itd->hw_transaction [uframe & 0x07] = itd->transaction;
itd_link (ehci, (uframe >> 3) % ehci->periodic_size,
itd);
wmb ();
usecs += itd->usecs;
itd = list_entry (itd->itd_list.next,
struct ehci_itd, itd_list);
}
/* update bandwidth utilization records (for usbfs) */
/* FIXME usbcore expects per-frame average, which isn't
* most accurate model... this provides the total claim,
* and expects the average to be computed only display.
/* update bandwidth utilization records (for usbfs)
*
* FIXME This claims each URB queued to an endpoint, as if
* transfers were concurrent, not sequential. So bandwidth
* typically gets double-billed ... comes from tying it to
* URBs rather than endpoints in the schedule. Luckily we
* don't use this usbfs data for serious decision making.
*/
usecs /= urb->number_of_packets;
usecs /= urb->interval;
usecs >>= 3;
if (usecs < 1)
usecs = 1;
usb_claim_bandwidth (urb->dev, urb, usecs, 1);
/* maybe enable periodic schedule processing */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment