Commit 97c17beb authored by David Brownell's avatar David Brownell Committed by Greg Kroah-Hartman

[PATCH] ehci-hcd (1/2): portability (2.4), tasklet,

This should be innocuous; I expect most folk won't notice anything
better (or worse) from this patch unless they're using Intel EHCI.

removing tasklet
     - parts of davem's patch (passing pt_regs down)
     - remove 'max_completions'
     - update cleanup after hc died
     - fix an urb unlink oops (null ptr) that happens more often this way

talking to hardware
     - fixes for some short read issues (may still be others)
	* use qtd->hw_alt_next to stop qh processing after short reads
	* detect/report short reads differently
     - longer reset timeout (it was excessively short, broke Intel)

other
     - simpler diagnostics portability to 2.4:  wrap dev_err() etc
     - urb unlink wait and non-wait unlink codepaths share most code
     - don't try ehci_stop() in interrupt context (bug from hcd layer)
     - minor stuff, including
	* some "after hc died" paths were wrong
	* verbose debug messages compile again
	* don't break error irq count
parent 91ec8aa9
......@@ -18,21 +18,45 @@
/* this file is part of ehci-hcd.c */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,50)
#define ehci_dbg(ehci, fmt, args...) \
dev_dbg (*(ehci)->hcd.controller, fmt, ## args )
#define ehci_err(ehci, fmt, args...) \
dev_err (*(ehci)->hcd.controller, fmt, ## args )
#define ehci_info(ehci, fmt, args...) \
dev_info (*(ehci)->hcd.controller, fmt, ## args )
#define ehci_warn(ehci, fmt, args...) \
dev_warn (*(ehci)->hcd.controller, fmt, ## args )
#ifdef EHCI_VERBOSE_DEBUG
#define ehci_vdbg(ehci, fmt, args...) \
dev_dbg (*(ehci)->hcd.controller, fmt, ## args )
#else
#define ehci_vdbg(ehci, fmt, args...) do { } while (0)
#ifdef DEBUG
#define ehci_dbg(ehci, fmt, args...) \
printk(KERN_DEBUG "%s %s: " fmt, hcd_name, \
(ehci)->hcd.pdev->slot_name, ## args )
#else
#define ehci_dbg(ehci, fmt, args...) do { } while (0)
#endif
#define ehci_err(ehci, fmt, args...) \
printk(KERN_ERR "%s %s: " fmt, hcd_name, \
(ehci)->hcd.pdev->slot_name, ## args )
#define ehci_info(ehci, fmt, args...) \
printk(KERN_INFO "%s %s: " fmt, hcd_name, \
(ehci)->hcd.pdev->slot_name, ## args )
#define ehci_warn(ehci, fmt, args...) \
printk(KERN_WARNING "%s %s: " fmt, hcd_name, \
(ehci)->hcd.pdev->slot_name, ## args )
#endif
#ifdef EHCI_VERBOSE_DEBUG
# define vdbg dbg
# define ehci_vdbg ehci_dbg
#else
static inline void vdbg (char *fmt, ...) { }
# define vdbg(fmt,args...) do { } while (0)
# define ehci_vdbg(ehci, fmt, args...) do { } while (0)
#endif
#ifdef DEBUG
......@@ -289,7 +313,8 @@ static void qh_lines (struct ehci_qh *qh, char **nextp, unsigned *sizep)
scratch = cpu_to_le32p (&qh->hw_info1);
hw_curr = cpu_to_le32p (&qh->hw_current);
temp = snprintf (next, size, "qh/%p dev%d %cs ep%d %08x %08x (%08x %08x)",
temp = snprintf (next, size,
"qh/%p dev%d %cs ep%d %08x %08x (%08x %08x)",
qh, scratch & 0x007f,
speed_char (scratch),
(scratch >> 8) & 0x000f,
......
......@@ -123,12 +123,6 @@ static int log2_irq_thresh = 0; // 0 to 6
MODULE_PARM (log2_irq_thresh, "i");
MODULE_PARM_DESC (log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
/* allow irqs at least every N URB completions */
static int max_completions = 16;
MODULE_PARM (max_completions, "i");
MODULE_PARM_DESC (max_completions,
"limit for urb completions called with irqs disenabled");
#define INTR_MASK (STS_IAA | STS_FATAL | STS_ERR | STS_INT)
/*-------------------------------------------------------------------------*/
......@@ -202,7 +196,7 @@ static int ehci_reset (struct ehci_hcd *ehci)
dbg_cmd (ehci, "reset", command);
writel (command, &ehci->regs->command);
ehci->hcd.state = USB_STATE_HALT;
return handshake (&ehci->regs->command, CMD_RESET, 0, 250);
return handshake (&ehci->regs->command, CMD_RESET, 0, 250 * 1000);
}
/* idle the controller (from running) */
......@@ -289,8 +283,7 @@ static int bios_handoff (struct ehci_hcd *ehci, int where, u32 cap)
pci_read_config_dword (ehci->hcd.pdev, where, &cap);
} while ((cap & (1 << 16)) && msec);
if (cap & (1 << 16)) {
dev_info (*ehci->hcd.controller,
"BIOS handoff failed (%d, %04x)\n",
ehci_err (ehci, "BIOS handoff failed (%d, %04x)\n",
where, cap);
return 1;
}
......@@ -333,8 +326,7 @@ static int ehci_start (struct usb_hcd *hcd)
return -EOPNOTSUPP;
break;
case 0: /* illegal reserved capability */
dev_warn (*ehci->hcd.controller,
"illegal capability!\n");
ehci_warn (ehci, "illegal capability!\n");
cap = 0;
/* FALLTHROUGH */
default: /* unknown */
......@@ -381,6 +373,8 @@ static int ehci_start (struct usb_hcd *hcd)
* dedicate a qh for the async ring head, since we couldn't unlink
* a 'real' qh without stopping the async schedule [4.8]. use it
* as the 'reclamation list head' too.
* its dummy is used in hw_alt_next of many tds, to prevent the qh
* from automatically advancing to the next td after short reads.
*/
ehci->async->qh_next.qh = 0;
ehci->async->hw_next = QH_NEXT (ehci->async->qh_dma);
......@@ -388,8 +382,7 @@ static int ehci_start (struct usb_hcd *hcd)
ehci->async->hw_token = cpu_to_le32 (QTD_STS_HALT);
ehci->async->hw_qtd_next = EHCI_LIST_END;
ehci->async->qh_state = QH_STATE_LINKED;
ehci_qtd_free (ehci, ehci->async->dummy);
ehci->async->dummy = 0;
ehci->async->hw_alt_next = QTD_NEXT (ehci->async->dummy->qtd_dma);
writel ((u32)ehci->async->qh_dma, &ehci->regs->async_next);
/*
......@@ -406,8 +399,7 @@ static int ehci_start (struct usb_hcd *hcd)
if (HCC_64BIT_ADDR (hcc_params)) {
writel (0, &ehci->regs->segment);
if (!pci_set_dma_mask (ehci->hcd.pdev, 0xffffffffffffffffULL))
dev_info (*ehci->hcd.controller,
"enabled 64bit PCI DMA (DAC)\n");
ehci_info (ehci, "enabled 64bit PCI DMA\n");
}
/* clear interrupt enables, set irq latency */
......@@ -454,7 +446,7 @@ static int ehci_start (struct usb_hcd *hcd)
/* PCI Serial Bus Release Number is at 0x60 offset */
pci_read_config_byte (hcd->pdev, 0x60, &tempbyte);
temp = readw (&ehci->caps->hci_version);
dev_info (*hcd->controller,
ehci_info (ehci,
"USB %x.%x enabled, EHCI %x.%02x, driver %s\n",
((tempbyte & 0xf0)>>4), (tempbyte & 0x0f),
temp >> 8, temp & 0xff, DRIVER_VERSION);
......@@ -494,9 +486,10 @@ static void ehci_stop (struct usb_hcd *hcd)
/* no more interrupts ... */
if (hcd->state == USB_STATE_RUNNING)
ehci_ready (ehci);
if (in_interrupt ()) /* should not happen!! */
dev_err (*hcd->controller, "stopped %s!\n", RUN_CONTEXT);
else
if (in_interrupt ()) { /* must not happen!! */
ehci_err (ehci, "stopped in_interrupt!\n");
return;
}
del_timer_sync (&ehci->watchdog);
ehci_reset (ehci);
......@@ -621,14 +614,15 @@ dbg ("%s: resume port %d", hcd_to_bus (hcd)->bus_name, i);
static void ehci_tasklet (unsigned long param)
{
struct ehci_hcd *ehci = (struct ehci_hcd *) param;
struct pt_regs *regs = NULL;
spin_lock_irq (&ehci->lock);
if (ehci->reclaim_ready)
end_unlink_async (ehci);
scan_async (ehci);
end_unlink_async (ehci, regs);
scan_async (ehci, regs);
if (ehci->next_uframe != -1)
scan_periodic (ehci);
scan_periodic (ehci, regs);
spin_unlock_irq (&ehci->lock);
}
......@@ -643,7 +637,7 @@ static void ehci_irq (struct usb_hcd *hcd, struct pt_regs *regs)
/* e.g. cardbus physical eject */
if (status == ~(u32) 0) {
dbg ("%s: device removed!", hcd_to_bus (hcd)->bus_name);
ehci_dbg (ehci, "device removed\n");
goto dead;
}
......@@ -681,8 +675,7 @@ static void ehci_irq (struct usb_hcd *hcd, struct pt_regs *regs)
/* PCI errors [4.15.2.4] */
if (unlikely ((status & STS_FATAL) != 0)) {
err ("%s: fatal error, state %x",
hcd_to_bus (hcd)->bus_name, hcd->state);
ehci_err (ehci, "fatal error\n");
dead:
ehci_reset (ehci);
/* generic layer kills/unlinks all urbs, then
......@@ -754,52 +747,52 @@ static int ehci_urb_enqueue (
static int ehci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
struct ehci_qh *qh;
unsigned long flags;
int maybe_irq = 1;
ehci_vdbg (ehci, "urb_dequeue %p qh %p state %d\n",
urb, qh, qh->qh_state);
spin_lock_irqsave (&ehci->lock, flags);
switch (usb_pipetype (urb->pipe)) {
// case PIPE_CONTROL:
// case PIPE_BULK:
default:
spin_lock_irqsave (&ehci->lock, flags);
if (ehci->reclaim) {
vdbg ("dq %p: reclaim = %p, %s",
qh, ehci->reclaim, RUN_CONTEXT);
if (qh == ehci->reclaim) {
/* unlinking qh for another queued urb? */
spin_unlock_irqrestore (&ehci->lock, flags);
return 0;
}
if (in_interrupt ()) {
spin_unlock_irqrestore (&ehci->lock, flags);
return -EAGAIN;
}
qh = (struct ehci_qh *) urb->hcpriv;
if (!qh)
break;
while (qh->qh_state == QH_STATE_LINKED
&& ehci->reclaim
&& ehci->hcd.state != USB_STATE_HALT
&& HCD_IS_RUNNING (ehci->hcd.state)
) {
spin_unlock_irqrestore (&ehci->lock, flags);
/* let pending unlinks complete */
if (maybe_irq) {
if (in_interrupt ())
return -EAGAIN;
maybe_irq = 0;
}
/* let pending unlinks complete, so this can start */
wait_ms (1);
spin_lock_irqsave (&ehci->lock, flags);
}
}
if (!HCD_IS_RUNNING (ehci->hcd.state) && ehci->reclaim)
end_unlink_async (ehci, NULL);
/* something else might have unlinked the qh by now */
if (qh->qh_state == QH_STATE_LINKED)
start_unlink_async (ehci, qh);
spin_unlock_irqrestore (&ehci->lock, flags);
break;
case PIPE_INTERRUPT:
spin_lock_irqsave (&ehci->lock, flags);
qh = (struct ehci_qh *) urb->hcpriv;
if (!qh)
break;
if (qh->qh_state == QH_STATE_LINKED) {
/* messy, can spin or block a microframe ... */
intr_deschedule (ehci, qh, 1);
/* qh_state == IDLE */
}
qh_completions (ehci, qh);
qh_completions (ehci, qh, NULL);
/* reschedule QH iff another request is queued */
if (!list_empty (&qh->qtd_list)
......@@ -817,7 +810,6 @@ static int ehci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
}
return status;
}
spin_unlock_irqrestore (&ehci->lock, flags);
break;
case PIPE_ISOCHRONOUS:
......@@ -828,6 +820,7 @@ static int ehci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
urb->transfer_flags |= EHCI_STATE_UNLINK;
break;
}
spin_unlock_irqrestore (&ehci->lock, flags);
return 0;
}
......
......@@ -315,7 +315,7 @@ static int ehci_hub_control (
wIndex + 1);
temp |= PORT_OWNER;
} else {
ehci_vdbg (ehci, "port %d reset", wIndex + 1);
ehci_vdbg (ehci, "port %d reset\n", wIndex + 1);
temp |= PORT_RESET;
temp &= ~PORT_PE;
......
......@@ -73,8 +73,11 @@ static struct ehci_qtd *ehci_qtd_alloc (struct ehci_hcd *ehci, int flags)
dma_addr_t dma;
qtd = pci_pool_alloc (ehci->qtd_pool, flags, &dma);
if (qtd != 0)
if (qtd != 0) {
ehci_qtd_init (qtd, dma);
if (ehci->async)
qtd->hw_alt_next = ehci->async->hw_alt_next;
}
return qtd;
}
......
......@@ -85,11 +85,12 @@ qtd_fill (struct ehci_qtd *qtd, dma_addr_t buf, size_t len, int token)
/* update halted (but potentially linked) qh */
static void qh_update (struct ehci_qh *qh, struct ehci_qtd *qtd)
static void
qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
{
qh->hw_current = 0;
qh->hw_qtd_next = QTD_NEXT (qtd->qtd_dma);
qh->hw_alt_next = EHCI_LIST_END;
qh->hw_alt_next = ehci->async->hw_alt_next;
/* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
wmb ();
......@@ -98,7 +99,12 @@ static void qh_update (struct ehci_qh *qh, struct ehci_qtd *qtd)
/*-------------------------------------------------------------------------*/
static inline void qtd_copy_status (struct urb *urb, size_t length, u32 token)
static inline void qtd_copy_status (
struct ehci_hcd *ehci,
struct urb *urb,
size_t length,
u32 token
)
{
/* count IN/OUT bytes, not SETUP (even short packets) */
if (likely (QTD_PID (token) != 2))
......@@ -132,7 +138,7 @@ static inline void qtd_copy_status (struct urb *urb, size_t length, u32 token)
ehci_vdbg (ehci,
"dev%d ep%d%s qtd token %08x --> status %d\n",
usb_pipedev (urb->pipe),
usb_pipedevice (urb->pipe),
usb_pipeendpoint (urb->pipe),
usb_pipein (urb->pipe) ? "in" : "out",
token, urb->status);
......@@ -157,9 +163,17 @@ static inline void qtd_copy_status (struct urb *urb, size_t length, u32 token)
}
}
}
/* force cleanup after short read; not necessarily an error */
if (unlikely (urb->status == -EINPROGRESS
&& QTD_LENGTH (token) != 0
&& QTD_PID (token) == 1)) {
urb->status = -EREMOTEIO;
}
}
static void ehci_urb_done (struct ehci_hcd *ehci, struct urb *urb)
static void
ehci_urb_done (struct ehci_hcd *ehci, struct urb *urb, struct pt_regs *regs)
{
if (likely (urb->hcpriv != 0)) {
struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
......@@ -174,24 +188,26 @@ static void ehci_urb_done (struct ehci_hcd *ehci, struct urb *urb)
urb->hcpriv = 0;
}
if (likely (urb->status == -EINPROGRESS)) {
if (urb->actual_length != urb->transfer_buffer_length
&& (urb->transfer_flags & URB_SHORT_NOT_OK))
urb->status = -EREMOTEIO;
else
switch (urb->status) {
case -EINPROGRESS: /* success */
urb->status = 0;
}
if (likely (urb->status == 0))
default: /* fault */
COUNT (ehci->stats.complete);
else if (urb->status == -ECONNRESET || urb->status == -ENOENT)
break;
case -EREMOTEIO: /* fault or normal */
if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
urb->status = 0;
COUNT (ehci->stats.complete);
break;
case -ECONNRESET: /* canceled */
case -ENOENT:
COUNT (ehci->stats.unlink);
else
COUNT (ehci->stats.error);
break;
}
/* complete() can reenter this HCD */
spin_unlock (&ehci->lock);
usb_hcd_giveback_urb (&ehci->hcd, urb, NULL);
usb_hcd_giveback_urb (&ehci->hcd, urb, regs);
spin_lock (&ehci->lock);
}
......@@ -203,14 +219,14 @@ static void ehci_urb_done (struct ehci_hcd *ehci, struct urb *urb)
* indicating how much "real" work we did.
*/
static unsigned
qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh, struct pt_regs *regs)
{
struct ehci_qtd *qtd, *last;
struct list_head *next, *qtd_list = &qh->qtd_list;
int unlink = 0, halted = 0;
int unlink = 0, stopped = 0;
unsigned count = 0;
if (unlikely (list_empty (qtd_list)))
if (unlikely (list_empty (&qh->qtd_list)))
return count;
/* scan QTDs till end of list, or we reach an active one */
......@@ -225,7 +241,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
/* clean up any state from previous QTD ...*/
if (last) {
if (likely (last->urb != urb)) {
ehci_urb_done (ehci, last->urb);
ehci_urb_done (ehci, last->urb, regs);
count++;
}
ehci_qtd_free (ehci, last);
......@@ -236,12 +252,14 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
/* QTDs at tail may be active if QH+HC are running,
* or when unlinking some urbs queued to this QH
*/
rmb ();
token = le32_to_cpu (qtd->hw_token);
halted = halted
stopped = stopped
|| (qh->qh_state == QH_STATE_IDLE)
|| (__constant_cpu_to_le32 (QTD_STS_HALT)
& qh->hw_token) != 0
|| (ehci->hcd.state == USB_STATE_HALT)
|| (qh->qh_state == QH_STATE_IDLE);
|| (qh->hw_current == ehci->async->hw_alt_next);
// FIXME Remove the automagic unlink mode.
// Drivers can now clean up safely; it's their job.
......@@ -257,7 +275,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
/* status copied below */
/* QH halts only because of fault (above) or unlink (here). */
} else if (unlikely (halted != 0)) {
} else if (unlikely (stopped != 0)) {
/* unlinking everything because of HC shutdown? */
if (ehci->hcd.state == USB_STATE_HALT) {
......@@ -293,7 +311,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
}
spin_lock (&urb->lock);
qtd_copy_status (urb, qtd->length, token);
qtd_copy_status (ehci, urb, qtd->length, token);
spin_unlock (&urb->lock);
list_del (&qtd->qtd_list);
......@@ -311,14 +329,14 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
/* last urb's completion might still need calling */
if (likely (last != 0)) {
ehci_urb_done (ehci, last->urb);
ehci_urb_done (ehci, last->urb, regs);
count++;
ehci_qtd_free (ehci, last);
}
/* reactivate queue after error and driver's cleanup */
if (unlikely (halted && !list_empty (qtd_list))) {
qh_update (qh, list_entry (qtd_list->next,
if (unlikely (stopped && !list_empty (&qh->qtd_list))) {
qh_update (ehci, qh, list_entry (qh->qtd_list.next,
struct ehci_qtd, qtd_list));
}
......@@ -420,7 +438,6 @@ qh_urb_transaction (
for (;;) {
int this_qtd_len;
qtd->urb = urb;
this_qtd_len = qtd_fill (qtd, buf, len, token);
len -= this_qtd_len;
buf += this_qtd_len;
......@@ -658,7 +675,7 @@ ehci_qh_make (
list_splice (qtd_list, &qh->qtd_list);
qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list);
qh_update (qh, qtd);
qh_update (ehci, qh, qtd);
} else {
qh->hw_qtd_next = qh->hw_alt_next = EHCI_LIST_END;
}
......@@ -784,6 +801,7 @@ static struct ehci_qh *qh_append_tds (
list_add (&dummy->qtd_list, qtd_list);
ehci_qtd_init (qtd, qtd->qtd_dma);
qtd->hw_alt_next = ehci->async->hw_alt_next;
qh->dummy = qtd;
/* hc must see the new dummy at list end */
......@@ -797,15 +815,6 @@ static struct ehci_qh *qh_append_tds (
/* no URB queued */
} else {
struct ehci_qtd *last_qtd;
/* make sure hc sees current dummy at the end */
last_qtd = list_entry (qtd_list->prev,
struct ehci_qtd, qtd_list);
last_qtd->hw_next = QTD_NEXT (qh->dummy->qtd_dma);
// dbg_qh ("empty qh", ehci, qh);
/* usb_clear_halt() means qh data toggle gets reset */
if (unlikely (!usb_gettoggle (urb->dev,
(epnum & 0x0f),
......@@ -813,8 +822,17 @@ static struct ehci_qh *qh_append_tds (
clear_toggle (urb->dev,
epnum & 0x0f, !(epnum & 0x10), qh);
}
if (qtd)
qh_update (qh, qtd);
/* make sure hc sees current dummy at the end */
if (qtd) {
struct ehci_qtd *last_qtd;
last_qtd = list_entry (qtd_list->prev,
struct ehci_qtd, qtd_list);
last_qtd->hw_next = QTD_NEXT (
qh->dummy->qtd_dma);
qh_update (ehci, qh, qtd);
}
}
list_splice (qtd_list, qh->qtd_list.prev);
......@@ -877,34 +895,38 @@ submit_async (
/*-------------------------------------------------------------------------*/
/* the async qh for the qtds being reclaimed are now unlinked from the HC */
/* caller must not own ehci->lock */
static void end_unlink_async (struct ehci_hcd *ehci)
static void end_unlink_async (struct ehci_hcd *ehci, struct pt_regs *regs)
{
struct ehci_qh *qh = ehci->reclaim;
del_timer (&ehci->watchdog);
qh->hw_next = cpu_to_le32 (qh->qh_dma);
qh->qh_state = QH_STATE_IDLE;
qh->qh_next.qh = 0;
qh_put (ehci, qh); // refcount from reclaim
ehci->reclaim = 0;
ehci->reclaim_ready = 0;
qh_completions (ehci, qh);
qh_completions (ehci, qh, regs);
if (!list_empty (&qh->qtd_list)
&& HCD_IS_RUNNING (ehci->hcd.state))
qh_link_async (ehci, qh);
else
else {
qh_put (ehci, qh); // refcount from async list
/* it's not free to turn the async schedule on/off, so we leave it
/* it's not free to turn the async schedule on/off; leave it
* active but idle for a while once it empties.
*/
if (!ehci->async->qh_next.qh && !timer_pending (&ehci->watchdog)) {
if (HCD_IS_RUNNING (ehci->hcd.state)
&& ehci->async->qh_next.qh == 0
&& !timer_pending (&ehci->watchdog)) {
ehci->async_idle = 1;
mod_timer (&ehci->watchdog, jiffies + EHCI_ASYNC_JIFFIES);
mod_timer (&ehci->watchdog,
jiffies + EHCI_ASYNC_JIFFIES);
}
}
}
......@@ -941,12 +963,6 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
qh->qh_state = QH_STATE_UNLINK;
ehci->reclaim = qh = qh_get (qh);
if (unlikely (ehci->hcd.state == USB_STATE_HALT)) {
ehci->reclaim_ready = 1;
tasklet_schedule (&ehci->tasklet);
return;
}
prev = ehci->async;
while (prev->qh_next.qh != qh)
prev = prev->qh_next.qh;
......@@ -955,6 +971,11 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
prev->qh_next = qh->qh_next;
wmb ();
if (unlikely (ehci->hcd.state == USB_STATE_HALT)) {
end_unlink_async (ehci, NULL);
return;
}
ehci->reclaim_ready = 0;
cmd |= CMD_IAAD;
writel (cmd, &ehci->regs->command);
......@@ -966,7 +987,7 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
/*-------------------------------------------------------------------------*/
static void
scan_async (struct ehci_hcd *ehci)
scan_async (struct ehci_hcd *ehci, struct pt_regs *regs)
{
struct ehci_qh *qh;
unsigned count;
......@@ -984,7 +1005,7 @@ scan_async (struct ehci_hcd *ehci)
* reporting drops the lock.
*/
qh = qh_get (qh);
temp = qh_completions (ehci, qh);
temp = qh_completions (ehci, qh, regs);
qh_put (ehci, qh);
if (temp != 0) {
count += temp;
......@@ -1002,14 +1023,6 @@ scan_async (struct ehci_hcd *ehci)
start_unlink_async (ehci, qh);
}
/* keep latencies down: let any irqs in */
if (count > max_completions) {
spin_unlock_irq (&ehci->lock);
cpu_relax ();
spin_lock_irq (&ehci->lock);
goto rescan;
}
qh = qh->qh_next.qh;
} while (qh);
}
......
......@@ -495,7 +495,8 @@ static unsigned
intr_complete (
struct ehci_hcd *ehci,
unsigned frame,
struct ehci_qh *qh
struct ehci_qh *qh,
struct pt_regs *regs
) {
unsigned count;
......@@ -509,7 +510,7 @@ intr_complete (
}
/* handle any completions */
count = qh_completions (ehci, qh);
count = qh_completions (ehci, qh, regs);
if (unlikely (list_empty (&qh->qtd_list)))
intr_deschedule (ehci, qh, 0);
......@@ -867,7 +868,8 @@ static unsigned
itd_complete (
struct ehci_hcd *ehci,
struct ehci_itd *itd,
unsigned uframe
unsigned uframe,
struct pt_regs *regs
) {
struct urb *urb = itd->urb;
struct usb_iso_packet_descriptor *desc;
......@@ -922,7 +924,7 @@ itd_complete (
/* complete() can reenter this HCD */
spin_unlock (&ehci->lock);
usb_hcd_giveback_urb (&ehci->hcd, urb, NULL);
usb_hcd_giveback_urb (&ehci->hcd, urb, regs);
spin_lock (&ehci->lock);
/* defer stopping schedule; completion can submit */
......@@ -973,7 +975,7 @@ static int itd_submit (struct ehci_hcd *ehci, struct urb *urb, int mem_flags)
/*-------------------------------------------------------------------------*/
static void
scan_periodic (struct ehci_hcd *ehci)
scan_periodic (struct ehci_hcd *ehci, struct pt_regs *regs)
{
unsigned frame, clock, now_uframe, mod;
unsigned count = 0;
......@@ -999,14 +1001,6 @@ scan_periodic (struct ehci_hcd *ehci)
u32 type, *hw_p;
unsigned uframes;
/* keep latencies down: let any irqs in */
if (count > max_completions) {
spin_unlock_irq (&ehci->lock);
cpu_relax ();
count = 0;
spin_lock_irq (&ehci->lock);
}
restart:
/* scan schedule to _before_ current frame index */
if (frame == clock)
......@@ -1031,7 +1025,7 @@ scan_periodic (struct ehci_hcd *ehci)
temp = q.qh->qh_next;
type = Q_NEXT_TYPE (q.qh->hw_next);
count += intr_complete (ehci, frame,
qh_get (q.qh));
qh_get (q.qh), regs);
qh_put (ehci, q.qh);
q = temp;
break;
......@@ -1064,7 +1058,7 @@ scan_periodic (struct ehci_hcd *ehci)
/* might free q.itd ... */
count += itd_complete (ehci,
temp.itd, uf);
temp.itd, uf, regs);
break;
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment