Commit d8808735 authored by David Brownell's avatar David Brownell Committed by Greg Kroah-Hartman

[PATCH] ehci-hcd, handle async_next register correctly

This patch should improve behavior of the EHCI driver,
particularly on VIA hardware.

  - A more careful reading of the EHCI spec turns up
    requirements not to change this register's value
    while the async schedule is enabled.  That means
    in effect that it must never point to a QH that'd
    get unlinked ... driver now uses a dedicated QH.

  - Disables async schedule a bit faster:  after 50msec
    idle, not 330msec idle.

  - Streamline the "can't init memory" failure path.

  - Start to use the dev_dbg()/dev_info()/... macros
    in more places.

This version acts a bunch happier than the previous
version, removing some failure modes I could never
quite convince myself were hardware (they weren't!)
I suspect it'll remove a lot of the "it hangs" failures
that some folk have reported (mostly on 2.4 though).
parent ee91716d
...@@ -18,6 +18,17 @@ ...@@ -18,6 +18,17 @@
/* this file is part of ehci-hcd.c */ /* this file is part of ehci-hcd.c */
#define ehci_dbg(ehci, fmt, args...) \
dev_dbg (*(ehci)->hcd.controller, fmt, ## args )
#ifdef EHCI_VERBOSE_DEBUG
#define ehci_vdbg(ehci, fmt, args...) \
dev_dbg (*(ehci)->hcd.controller, fmt, ## args )
#else
#define ehci_vdbg(ehci, fmt, args...) do { } while (0)
#endif
#ifdef EHCI_VERBOSE_DEBUG #ifdef EHCI_VERBOSE_DEBUG
# define vdbg dbg # define vdbg dbg
#else #else
...@@ -338,12 +349,8 @@ show_async (struct device *dev, char *buf, size_t count, loff_t off) ...@@ -338,12 +349,8 @@ show_async (struct device *dev, char *buf, size_t count, loff_t off)
* one QH per line, and TDs we know about * one QH per line, and TDs we know about
*/ */
spin_lock_irqsave (&ehci->lock, flags); spin_lock_irqsave (&ehci->lock, flags);
if (ehci->async) { for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh)
qh = ehci->async; qh_lines (qh, &next, &size);
do {
qh_lines (qh, &next, &size);
} while ((qh = qh->qh_next.qh) != ehci->async);
}
if (ehci->reclaim) { if (ehci->reclaim) {
temp = snprintf (next, size, "\nreclaim =\n"); temp = snprintf (next, size, "\nreclaim =\n");
size -= temp; size -= temp;
......
...@@ -17,6 +17,13 @@ ...@@ -17,6 +17,13 @@
*/ */
#include <linux/config.h> #include <linux/config.h>
#ifdef CONFIG_USB_DEBUG
#define DEBUG
#else
#undef DEBUG
#endif
#include <linux/module.h> #include <linux/module.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -31,12 +38,6 @@ ...@@ -31,12 +38,6 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#ifdef CONFIG_USB_DEBUG
#define DEBUG
#else
#undef DEBUG
#endif
#include <linux/usb.h> #include <linux/usb.h>
#include <linux/version.h> #include <linux/version.h>
...@@ -70,6 +71,7 @@ ...@@ -70,6 +71,7 @@
* *
* HISTORY: * HISTORY:
* *
* 2002-11-29 Correct handling for hw async_next register.
* 2002-08-06 Handling for bulk and interrupt transfers is mostly shared; * 2002-08-06 Handling for bulk and interrupt transfers is mostly shared;
* only scheduling is different, no arbitrary limitations. * only scheduling is different, no arbitrary limitations.
* 2002-07-25 Sanity check PCI reads, mostly for better cardbus support, * 2002-07-25 Sanity check PCI reads, mostly for better cardbus support,
...@@ -92,7 +94,7 @@ ...@@ -92,7 +94,7 @@
* 2001-June Works with usb-storage and NEC EHCI on 2.4 * 2001-June Works with usb-storage and NEC EHCI on 2.4
*/ */
#define DRIVER_VERSION "2002-Sep-23" #define DRIVER_VERSION "2002-Nov-29"
#define DRIVER_AUTHOR "David Brownell" #define DRIVER_AUTHOR "David Brownell"
#define DRIVER_DESC "USB 2.0 'Enhanced' Host Controller (EHCI) Driver" #define DRIVER_DESC "USB 2.0 'Enhanced' Host Controller (EHCI) Driver"
...@@ -114,7 +116,7 @@ static const char hcd_name [] = "ehci-hcd"; ...@@ -114,7 +116,7 @@ static const char hcd_name [] = "ehci-hcd";
#define EHCI_TUNE_MULT_TT 1 #define EHCI_TUNE_MULT_TT 1
#define EHCI_WATCHDOG_JIFFIES (HZ/100) /* arbitrary; ~10 msec */ #define EHCI_WATCHDOG_JIFFIES (HZ/100) /* arbitrary; ~10 msec */
#define EHCI_ASYNC_JIFFIES (HZ/3) /* async idle timeout */ #define EHCI_ASYNC_JIFFIES (HZ/20) /* async idle timeout */
/* Initial IRQ latency: lower than default */ /* Initial IRQ latency: lower than default */
static int log2_irq_thresh = 0; // 0 to 6 static int log2_irq_thresh = 0; // 0 to 6
...@@ -215,7 +217,7 @@ static void ehci_ready (struct ehci_hcd *ehci) ...@@ -215,7 +217,7 @@ static void ehci_ready (struct ehci_hcd *ehci)
/* wait for any schedule enables/disables to take effect */ /* wait for any schedule enables/disables to take effect */
temp = 0; temp = 0;
if (ehci->async) if (ehci->async->qh_next.qh)
temp = STS_ASS; temp = STS_ASS;
if (ehci->next_uframe != -1) if (ehci->next_uframe != -1)
temp |= STS_PSS; temp |= STS_PSS;
...@@ -360,7 +362,6 @@ static int ehci_start (struct usb_hcd *hcd) ...@@ -360,7 +362,6 @@ static int ehci_start (struct usb_hcd *hcd)
else // N microframes cached else // N microframes cached
ehci->i_thresh = 2 + HCC_ISOC_THRES (hcc_params); ehci->i_thresh = 2 + HCC_ISOC_THRES (hcc_params);
ehci->async = 0;
ehci->reclaim = 0; ehci->reclaim = 0;
ehci->next_uframe = -1; ehci->next_uframe = -1;
...@@ -374,6 +375,21 @@ static int ehci_start (struct usb_hcd *hcd) ...@@ -374,6 +375,21 @@ static int ehci_start (struct usb_hcd *hcd)
writel (INTR_MASK, &ehci->regs->intr_enable); writel (INTR_MASK, &ehci->regs->intr_enable);
writel (ehci->periodic_dma, &ehci->regs->frame_list); writel (ehci->periodic_dma, &ehci->regs->frame_list);
/*
* dedicate a qh for the async ring head, since we couldn't unlink
* a 'real' qh without stopping the async schedule [4.8]. use it
* as the 'reclamation list head' too.
*/
ehci->async->qh_next.qh = 0;
ehci->async->hw_next = QH_NEXT (ehci->async->qh_dma);
ehci->async->hw_info1 = cpu_to_le32 (QH_HEAD);
ehci->async->hw_token = cpu_to_le32 (QTD_STS_HALT);
ehci->async->hw_qtd_next = EHCI_LIST_END;
ehci->async->qh_state = QH_STATE_LINKED;
ehci_qtd_free (ehci, ehci->async->dummy);
ehci->async->dummy = 0;
writel ((u32)ehci->async->qh_dma, &ehci->regs->async_next);
/* /*
* hcc_params controls whether ehci->regs->segment must (!!!) * hcc_params controls whether ehci->regs->segment must (!!!)
* be used; it constrains QH/ITD/SITD and QTD locations. * be used; it constrains QH/ITD/SITD and QTD locations.
......
...@@ -142,6 +142,10 @@ static void qh_put (struct ehci_hcd *ehci, struct ehci_qh *qh) ...@@ -142,6 +142,10 @@ static void qh_put (struct ehci_hcd *ehci, struct ehci_qh *qh)
static void ehci_mem_cleanup (struct ehci_hcd *ehci) static void ehci_mem_cleanup (struct ehci_hcd *ehci)
{ {
if (ehci->async)
qh_put (ehci, ehci->async);
ehci->async = 0;
/* PCI consistent memory and pools */ /* PCI consistent memory and pools */
if (ehci->qtd_pool) if (ehci->qtd_pool)
pci_pool_destroy (ehci->qtd_pool); pci_pool_destroy (ehci->qtd_pool);
...@@ -183,20 +187,20 @@ static int ehci_mem_init (struct ehci_hcd *ehci, int flags) ...@@ -183,20 +187,20 @@ static int ehci_mem_init (struct ehci_hcd *ehci, int flags)
32 /* byte alignment (for hw parts) */, 32 /* byte alignment (for hw parts) */,
4096 /* can't cross 4K */); 4096 /* can't cross 4K */);
if (!ehci->qtd_pool) { if (!ehci->qtd_pool) {
dbg ("no qtd pool"); goto fail;
ehci_mem_cleanup (ehci);
return -ENOMEM;
} }
/* QH for control/bulk/intr transfers */ /* QHs for control/bulk/intr transfers */
ehci->qh_pool = pci_pool_create ("ehci_qh", ehci->hcd.pdev, ehci->qh_pool = pci_pool_create ("ehci_qh", ehci->hcd.pdev,
sizeof (struct ehci_qh), sizeof (struct ehci_qh),
32 /* byte alignment (for hw parts) */, 32 /* byte alignment (for hw parts) */,
4096 /* can't cross 4K */); 4096 /* can't cross 4K */);
if (!ehci->qh_pool) { if (!ehci->qh_pool) {
dbg ("no qh pool"); goto fail;
ehci_mem_cleanup (ehci); }
return -ENOMEM; ehci->async = ehci_qh_alloc (ehci, flags);
if (!ehci->async) {
goto fail;
} }
/* ITD for high speed ISO transfers */ /* ITD for high speed ISO transfers */
...@@ -205,9 +209,7 @@ static int ehci_mem_init (struct ehci_hcd *ehci, int flags) ...@@ -205,9 +209,7 @@ static int ehci_mem_init (struct ehci_hcd *ehci, int flags)
32 /* byte alignment (for hw parts) */, 32 /* byte alignment (for hw parts) */,
4096 /* can't cross 4K */); 4096 /* can't cross 4K */);
if (!ehci->itd_pool) { if (!ehci->itd_pool) {
dbg ("no itd pool"); goto fail;
ehci_mem_cleanup (ehci);
return -ENOMEM;
} }
/* SITD for full/low speed split ISO transfers */ /* SITD for full/low speed split ISO transfers */
...@@ -216,9 +218,7 @@ static int ehci_mem_init (struct ehci_hcd *ehci, int flags) ...@@ -216,9 +218,7 @@ static int ehci_mem_init (struct ehci_hcd *ehci, int flags)
32 /* byte alignment (for hw parts) */, 32 /* byte alignment (for hw parts) */,
4096 /* can't cross 4K */); 4096 /* can't cross 4K */);
if (!ehci->sitd_pool) { if (!ehci->sitd_pool) {
dbg ("no sitd pool"); goto fail;
ehci_mem_cleanup (ehci);
return -ENOMEM;
} }
/* Hardware periodic table */ /* Hardware periodic table */
...@@ -227,9 +227,7 @@ static int ehci_mem_init (struct ehci_hcd *ehci, int flags) ...@@ -227,9 +227,7 @@ static int ehci_mem_init (struct ehci_hcd *ehci, int flags)
ehci->periodic_size * sizeof (u32), ehci->periodic_size * sizeof (u32),
&ehci->periodic_dma); &ehci->periodic_dma);
if (ehci->periodic == 0) { if (ehci->periodic == 0) {
dbg ("no hw periodic table"); goto fail;
ehci_mem_cleanup (ehci);
return -ENOMEM;
} }
for (i = 0; i < ehci->periodic_size; i++) for (i = 0; i < ehci->periodic_size; i++)
ehci->periodic [i] = EHCI_LIST_END; ehci->periodic [i] = EHCI_LIST_END;
...@@ -237,11 +235,14 @@ static int ehci_mem_init (struct ehci_hcd *ehci, int flags) ...@@ -237,11 +235,14 @@ static int ehci_mem_init (struct ehci_hcd *ehci, int flags)
/* software shadow of hardware table */ /* software shadow of hardware table */
ehci->pshadow = kmalloc (ehci->periodic_size * sizeof (void *), flags); ehci->pshadow = kmalloc (ehci->periodic_size * sizeof (void *), flags);
if (ehci->pshadow == 0) { if (ehci->pshadow == 0) {
dbg ("no shadow periodic table"); goto fail;
ehci_mem_cleanup (ehci);
return -ENOMEM;
} }
memset (ehci->pshadow, 0, ehci->periodic_size * sizeof (void *)); memset (ehci->pshadow, 0, ehci->periodic_size * sizeof (void *));
return 0; return 0;
fail:
ehci_dbg (ehci, "couldn't init memory\n");
ehci_mem_cleanup (ehci);
return -ENOMEM;
} }
...@@ -678,33 +678,33 @@ ehci_qh_make ( ...@@ -678,33 +678,33 @@ ehci_qh_make (
static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh) static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
{ {
u32 dma = QH_NEXT (qh->qh_dma); u32 dma = QH_NEXT (qh->qh_dma);
struct ehci_qh *q; struct ehci_qh *head;
if (unlikely (!(q = ehci->async))) { /* (re)start the async schedule? */
head = ehci->async;
if (ehci->async_idle)
del_timer (&ehci->watchdog);
else if (!head->qh_next.qh) {
u32 cmd = readl (&ehci->regs->command); u32 cmd = readl (&ehci->regs->command);
/* in case a clear of CMD_ASE didn't take yet */ if (!(cmd & CMD_ASE)) {
(void) handshake (&ehci->regs->status, STS_ASS, 0, 150); /* in case a clear of CMD_ASE didn't take yet */
(void) handshake (&ehci->regs->status, STS_ASS, 0, 150);
qh->hw_info1 |= __constant_cpu_to_le32 (QH_HEAD); /* [4.8] */ cmd |= CMD_ASE | CMD_RUN;
qh->qh_next.qh = qh; writel (cmd, &ehci->regs->command);
qh->hw_next = dma; ehci->hcd.state = USB_STATE_RUNNING;
wmb (); /* posted write need not be known to HC yet ... */
ehci->async = qh; }
writel ((u32)qh->qh_dma, &ehci->regs->async_next);
cmd |= CMD_ASE | CMD_RUN;
writel (cmd, &ehci->regs->command);
ehci->hcd.state = USB_STATE_RUNNING;
/* posted write need not be known to HC yet ... */
} else {
/* splice right after "start" of ring */
qh->hw_info1 &= ~__constant_cpu_to_le32 (QH_HEAD); /* [4.8] */
qh->qh_next = q->qh_next;
qh->hw_next = q->hw_next;
wmb ();
q->qh_next.qh = qh;
q->hw_next = dma;
} }
/* splice right after start */
qh->qh_next = head->qh_next;
qh->hw_next = head->hw_next;
wmb ();
head->qh_next.qh = qh;
head->hw_next = dma;
qh->qh_state = QH_STATE_LINKED; qh->qh_state = QH_STATE_LINKED;
/* qtd completions reported later by interrupt */ /* qtd completions reported later by interrupt */
...@@ -897,6 +897,14 @@ static void end_unlink_async (struct ehci_hcd *ehci) ...@@ -897,6 +897,14 @@ static void end_unlink_async (struct ehci_hcd *ehci)
qh_link_async (ehci, qh); qh_link_async (ehci, qh);
else else
qh_put (ehci, qh); // refcount from async list qh_put (ehci, qh); // refcount from async list
/* it's not free to turn the async schedule on/off, so we leave it
* active but idle for a while once it empties.
*/
if (!ehci->async->qh_next.qh && !timer_pending (&ehci->watchdog)) {
ehci->async_idle = 1;
mod_timer (&ehci->watchdog, jiffies + EHCI_ASYNC_JIFFIES);
}
} }
/* makes sure the async qh will become idle */ /* makes sure the async qh will become idle */
...@@ -909,7 +917,6 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh) ...@@ -909,7 +917,6 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
#ifdef DEBUG #ifdef DEBUG
if (ehci->reclaim if (ehci->reclaim
|| !ehci->async
|| qh->qh_state != QH_STATE_LINKED || qh->qh_state != QH_STATE_LINKED
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
// this macro lies except on SMP compiles // this macro lies except on SMP compiles
...@@ -919,31 +926,20 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh) ...@@ -919,31 +926,20 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
BUG (); BUG ();
#endif #endif
qh->qh_state = QH_STATE_UNLINK; /* stop async schedule right now? */
ehci->reclaim = qh = qh_get (qh); if (unlikely (qh == ehci->async)) {
// dbg_qh ("start unlink", ehci, qh);
/* Remove the last QH (qhead)? Stop async schedule first. */
if (unlikely (qh == ehci->async && qh->qh_next.qh == qh)) {
/* can't get here without STS_ASS set */ /* can't get here without STS_ASS set */
if (ehci->hcd.state != USB_STATE_HALT) { if (ehci->hcd.state != USB_STATE_HALT) {
writel (cmd & ~CMD_ASE, &ehci->regs->command); writel (cmd & ~CMD_ASE, &ehci->regs->command);
(void) handshake (&ehci->regs->status, STS_ASS, 0, 150); wmb ();
#if 0 // handshake later, if we need to
// one VT8235 system wants to die with STS_FATAL
// unless this qh is leaked here. others seem ok...
qh = qh_get (qh);
dbg_qh ("async/off", ehci, qh);
#endif
} }
qh->qh_next.qh = ehci->async = 0;
ehci->reclaim_ready = 1;
tasklet_schedule (&ehci->tasklet);
return; return;
} }
qh->qh_state = QH_STATE_UNLINK;
ehci->reclaim = qh = qh_get (qh);
if (unlikely (ehci->hcd.state == USB_STATE_HALT)) { if (unlikely (ehci->hcd.state == USB_STATE_HALT)) {
ehci->reclaim_ready = 1; ehci->reclaim_ready = 1;
tasklet_schedule (&ehci->tasklet); tasklet_schedule (&ehci->tasklet);
...@@ -951,13 +947,9 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh) ...@@ -951,13 +947,9 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
} }
prev = ehci->async; prev = ehci->async;
while (prev->qh_next.qh != qh && prev->qh_next.qh != ehci->async) while (prev->qh_next.qh != qh)
prev = prev->qh_next.qh; prev = prev->qh_next.qh;
if (qh->hw_info1 & __constant_cpu_to_le32 (QH_HEAD)) {
ehci->async = prev;
prev->hw_info1 |= __constant_cpu_to_le32 (QH_HEAD);
}
prev->hw_next = qh->hw_next; prev->hw_next = qh->hw_next;
prev->qh_next = qh->qh_next; prev->qh_next = qh->qh_next;
wmb (); wmb ();
...@@ -979,7 +971,7 @@ scan_async (struct ehci_hcd *ehci) ...@@ -979,7 +971,7 @@ scan_async (struct ehci_hcd *ehci)
unsigned count; unsigned count;
rescan: rescan:
qh = ehci->async; qh = ehci->async->qh_next.qh;
count = 0; count = 0;
if (likely (qh != 0)) { if (likely (qh != 0)) {
do { do {
...@@ -991,25 +983,17 @@ scan_async (struct ehci_hcd *ehci) ...@@ -991,25 +983,17 @@ scan_async (struct ehci_hcd *ehci)
/* concurrent unlink could happen here */ /* concurrent unlink could happen here */
count += qh_completions (ehci, qh); count += qh_completions (ehci, qh);
qh_put (ehci, qh); qh_put (ehci, qh);
goto rescan;
} }
/* unlink idle entries, reducing HC PCI usage as /* unlink idle entries, reducing HC PCI usage as
* well as HCD schedule-scanning costs. removing * well as HCD schedule-scanning costs.
* the last qh is deferred, since it's costly.
* *
* FIXME don't unlink idle entries so quickly; it * FIXME don't unlink idle entries so quickly; it
* can penalize (common) half duplex protocols. * can penalize (common) half duplex protocols.
*/ */
if (list_empty (&qh->qtd_list) && !ehci->reclaim) { if (list_empty (&qh->qtd_list) && !ehci->reclaim) {
if (qh->qh_next.qh != qh) { start_unlink_async (ehci, qh);
// dbg ("irq/empty");
start_unlink_async (ehci, qh);
} else if (!timer_pending (&ehci->watchdog)) {
/* can't use IAA for last entry */
ehci->async_idle = 1;
mod_timer (&ehci->watchdog,
jiffies + EHCI_ASYNC_JIFFIES);
}
} }
/* keep latencies down: let any irqs in */ /* keep latencies down: let any irqs in */
...@@ -1021,8 +1005,6 @@ scan_async (struct ehci_hcd *ehci) ...@@ -1021,8 +1005,6 @@ scan_async (struct ehci_hcd *ehci)
} }
qh = qh->qh_next.qh; qh = qh->qh_next.qh;
if (!qh) /* unlinked? */ } while (qh);
goto rescan;
} while (qh != ehci->async);
} }
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment