Commit 3c273a05 authored by Alan Stern's avatar Alan Stern Committed by Greg Kroah-Hartman

USB: EHCI: unlink multiple async QHs together

This patch (as1582) changes ehci-hcd's strategy for unlinking async
QHs.  Currently the driver never unlinks more than one QH at a time.
This can be inefficient and cause unnecessary delays, since a QH
cannot be reused while it is waiting to be unlinked.

The new strategy unlinks all the waiting QHs at once.  In practice the
improvement won't be very big, because it's somewhat uncommon to have
two or more QHs waiting to be unlinked at any time.  But it does
happen, and in any case, doing things this way makes more sense IMO.

The change requires the async unlinking code to be refactored
slightly.  Now in addition to the routines for starting and ending an
unlink, there are new routines for unlinking a single QH and starting
an IAA cycle.  This approach is needed because there are two separate
paths for unlinking async QHs:

	When a transfer error occurs or an URB is cancelled, the QH
	must be unlinked right away;

	When a QH has been idle sufficiently long, it is unlinked
	to avoid consuming DMA bandwidth uselessly.

In the first case we want the unlink to proceed as quickly as
possible, whereas in the second case we can afford to batch several
QHs together and unlink them all at once.  Hence the division of
labor.
Signed-off-by: default avatarAlan Stern <stern@rowland.harvard.edu>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 9d938747
...@@ -795,7 +795,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd) ...@@ -795,7 +795,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
/* guard against (alleged) silicon errata */ /* guard against (alleged) silicon errata */
if (cmd & CMD_IAAD) if (cmd & CMD_IAAD)
ehci_dbg(ehci, "IAA with IAAD still set?\n"); ehci_dbg(ehci, "IAA with IAAD still set?\n");
if (ehci->async_unlink) { if (ehci->async_iaa) {
COUNT(ehci->stats.iaa); COUNT(ehci->stats.iaa);
end_unlink_async(ehci); end_unlink_async(ehci);
} else } else
...@@ -926,33 +926,6 @@ static int ehci_urb_enqueue ( ...@@ -926,33 +926,6 @@ static int ehci_urb_enqueue (
} }
} }
static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
/* failfast */
if (ehci->rh_state < EHCI_RH_RUNNING && ehci->async_unlink)
end_unlink_async(ehci);
/* If the QH isn't linked then there's nothing we can do
* unless we were called during a giveback, in which case
* qh_completions() has to deal with it.
*/
if (qh->qh_state != QH_STATE_LINKED) {
if (qh->qh_state == QH_STATE_COMPLETING)
qh->needs_rescan = 1;
return;
}
/* defer till later if busy */
if (ehci->async_unlink) {
qh->qh_state = QH_STATE_UNLINK_WAIT;
ehci->async_unlink_last->unlink_next = qh;
ehci->async_unlink_last = qh;
/* start IAA cycle */
} else
start_unlink_async (ehci, qh);
}
/* remove from hardware lists /* remove from hardware lists
* completions normally happen asynchronously * completions normally happen asynchronously
*/ */
...@@ -979,7 +952,7 @@ static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) ...@@ -979,7 +952,7 @@ static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
switch (qh->qh_state) { switch (qh->qh_state) {
case QH_STATE_LINKED: case QH_STATE_LINKED:
case QH_STATE_COMPLETING: case QH_STATE_COMPLETING:
unlink_async(ehci, qh); start_unlink_async(ehci, qh);
break; break;
case QH_STATE_UNLINK: case QH_STATE_UNLINK:
case QH_STATE_UNLINK_WAIT: case QH_STATE_UNLINK_WAIT:
...@@ -1070,7 +1043,7 @@ ehci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep) ...@@ -1070,7 +1043,7 @@ ehci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
* may already be unlinked. * may already be unlinked.
*/ */
if (tmp) if (tmp)
unlink_async(ehci, qh); start_unlink_async(ehci, qh);
/* FALL THROUGH */ /* FALL THROUGH */
case QH_STATE_UNLINK: /* wait for hw to finish? */ case QH_STATE_UNLINK: /* wait for hw to finish? */
case QH_STATE_UNLINK_WAIT: case QH_STATE_UNLINK_WAIT:
...@@ -1133,7 +1106,7 @@ ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep) ...@@ -1133,7 +1106,7 @@ ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
* re-linking will call qh_refresh(). * re-linking will call qh_refresh().
*/ */
if (eptype == USB_ENDPOINT_XFER_BULK) if (eptype == USB_ENDPOINT_XFER_BULK)
unlink_async(ehci, qh); start_unlink_async(ehci, qh);
else else
start_unlink_intr(ehci, qh); start_unlink_intr(ehci, qh);
} }
......
...@@ -299,8 +299,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) ...@@ -299,8 +299,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
ehci_halt (ehci); ehci_halt (ehci);
ehci->rh_state = EHCI_RH_SUSPENDED; ehci->rh_state = EHCI_RH_SUSPENDED;
if (ehci->async_unlink) end_unlink_async(ehci);
end_unlink_async(ehci);
ehci_handle_intr_unlinks(ehci); ehci_handle_intr_unlinks(ehci);
end_free_itds(ehci); end_free_itds(ehci);
......
...@@ -293,9 +293,6 @@ __acquires(ehci->lock) ...@@ -293,9 +293,6 @@ __acquires(ehci->lock)
spin_lock (&ehci->lock); spin_lock (&ehci->lock);
} }
static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh); static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
/* /*
...@@ -1166,82 +1163,114 @@ submit_async ( ...@@ -1166,82 +1163,114 @@ submit_async (
/*-------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------*/
/* the async qh for the qtds being unlinked are now gone from the HC */ static void single_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
static void end_unlink_async (struct ehci_hcd *ehci)
{ {
struct ehci_qh *qh = ehci->async_unlink; struct ehci_qh *prev;
struct ehci_qh *next;
// qh->hw_next = cpu_to_hc32(qh->qh_dma); /* Add to the end of the list of QHs waiting for the next IAAD */
qh->qh_state = QH_STATE_IDLE; qh->qh_state = QH_STATE_UNLINK;
qh->qh_next.qh = NULL; if (ehci->async_unlink)
ehci->async_unlink_last->unlink_next = qh;
else
ehci->async_unlink = qh;
ehci->async_unlink_last = qh;
/* Unlink it from the schedule */
prev = ehci->async;
while (prev->qh_next.qh != qh)
prev = prev->qh_next.qh;
prev->hw->hw_next = qh->hw->hw_next;
prev->qh_next = qh->qh_next;
if (ehci->qh_scan_next == qh)
ehci->qh_scan_next = qh->qh_next.qh;
}
/* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */ static void start_iaa_cycle(struct ehci_hcd *ehci, bool nested)
next = qh->unlink_next; {
ehci->async_unlink = next; /*
qh->unlink_next = NULL; * Do nothing if an IAA cycle is already running or
* if one will be started shortly.
*/
if (ehci->async_iaa || ehci->async_unlinking)
return;
qh_completions (ehci, qh); /* Do all the waiting QHs at once */
ehci->async_iaa = ehci->async_unlink;
ehci->async_unlink = NULL;
if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) /* If the controller isn't running, we don't have to wait for it */
qh_link_async (ehci, qh); if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) {
if (!nested) /* Avoid recursion */
end_unlink_async(ehci);
disable_async(ehci); /* Otherwise start a new IAA cycle */
} else {
/* Make sure the unlinks are all visible to the hardware */
wmb();
if (next) { ehci_writel(ehci, ehci->command | CMD_IAAD,
ehci->async_unlink = NULL; &ehci->regs->command);
start_unlink_async (ehci, next); ehci_readl(ehci, &ehci->regs->command);
ehci_enable_event(ehci, EHCI_HRTIMER_IAA_WATCHDOG, true);
} }
}
/* the async qh for the qtds being unlinked are now gone from the HC */
static void end_unlink_async(struct ehci_hcd *ehci)
{
struct ehci_qh *qh;
if (ehci->has_synopsys_hc_bug) if (ehci->has_synopsys_hc_bug)
ehci_writel(ehci, (u32) ehci->async->qh_dma, ehci_writel(ehci, (u32) ehci->async->qh_dma,
&ehci->regs->async_next); &ehci->regs->async_next);
/* Process the idle QHs */
restart:
ehci->async_unlinking = true;
while (ehci->async_iaa) {
qh = ehci->async_iaa;
ehci->async_iaa = qh->unlink_next;
qh->unlink_next = NULL;
qh->qh_state = QH_STATE_IDLE;
qh->qh_next.qh = NULL;
qh_completions(ehci, qh);
if (!list_empty(&qh->qtd_list) &&
ehci->rh_state == EHCI_RH_RUNNING)
qh_link_async(ehci, qh);
disable_async(ehci);
}
ehci->async_unlinking = false;
/* Start a new IAA cycle if any QHs are waiting for it */
if (ehci->async_unlink) {
start_iaa_cycle(ehci, true);
if (unlikely(ehci->rh_state < EHCI_RH_RUNNING))
goto restart;
}
} }
/* makes sure the async qh will become idle */ /* makes sure the async qh will become idle */
/* caller must own ehci->lock */ /* caller must own ehci->lock */
static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh) static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
{ {
struct ehci_qh *prev; /*
* If the QH isn't linked then there's nothing we can do
#ifdef DEBUG * unless we were called during a giveback, in which case
assert_spin_locked(&ehci->lock); * qh_completions() has to deal with it.
if (ehci->async_unlink */
|| (qh->qh_state != QH_STATE_LINKED if (qh->qh_state != QH_STATE_LINKED) {
&& qh->qh_state != QH_STATE_UNLINK_WAIT) if (qh->qh_state == QH_STATE_COMPLETING)
) qh->needs_rescan = 1;
BUG ();
#endif
qh->qh_state = QH_STATE_UNLINK;
ehci->async_unlink = qh;
if (!qh->unlink_next)
ehci->async_unlink_last = qh;
prev = ehci->async;
while (prev->qh_next.qh != qh)
prev = prev->qh_next.qh;
prev->hw->hw_next = qh->hw->hw_next;
prev->qh_next = qh->qh_next;
if (ehci->qh_scan_next == qh)
ehci->qh_scan_next = qh->qh_next.qh;
wmb ();
/* If the controller isn't running, we don't have to wait for it */
if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) {
/* if (unlikely (qh->unlink_next != 0))
* this will recurse, probably not much
*/
end_unlink_async (ehci);
return; return;
} }
ehci_writel(ehci, ehci->command | CMD_IAAD, &ehci->regs->command); single_unlink_async(ehci, qh);
(void)ehci_readl(ehci, &ehci->regs->command); start_iaa_cycle(ehci, false);
ehci_enable_event(ehci, EHCI_HRTIMER_IAA_WATCHDOG, true);
} }
/*-------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------*/
...@@ -1269,11 +1298,11 @@ static void scan_async (struct ehci_hcd *ehci) ...@@ -1269,11 +1298,11 @@ static void scan_async (struct ehci_hcd *ehci)
* drops the lock. That's why ehci->qh_scan_next * drops the lock. That's why ehci->qh_scan_next
* always holds the next qh to scan; if the next qh * always holds the next qh to scan; if the next qh
* gets unlinked then ehci->qh_scan_next is adjusted * gets unlinked then ehci->qh_scan_next is adjusted
* in start_unlink_async(). * in single_unlink_async().
*/ */
temp = qh_completions(ehci, qh); temp = qh_completions(ehci, qh);
if (qh->needs_rescan) if (qh->needs_rescan)
unlink_async(ehci, qh); start_unlink_async(ehci, qh);
qh->unlink_time = jiffies + EHCI_SHRINK_JIFFIES; qh->unlink_time = jiffies + EHCI_SHRINK_JIFFIES;
if (temp != 0) if (temp != 0)
goto rescan; goto rescan;
......
...@@ -211,6 +211,7 @@ static void ehci_handle_controller_death(struct ehci_hcd *ehci) ...@@ -211,6 +211,7 @@ static void ehci_handle_controller_death(struct ehci_hcd *ehci)
ehci_writel(ehci, 0, &ehci->regs->configured_flag); ehci_writel(ehci, 0, &ehci->regs->configured_flag);
ehci_writel(ehci, 0, &ehci->regs->intr_enable); ehci_writel(ehci, 0, &ehci->regs->intr_enable);
ehci_work(ehci); ehci_work(ehci);
end_unlink_async(ehci);
/* Not in process context, so don't try to reset the controller */ /* Not in process context, so don't try to reset the controller */
} }
...@@ -304,7 +305,7 @@ static void ehci_iaa_watchdog(struct ehci_hcd *ehci) ...@@ -304,7 +305,7 @@ static void ehci_iaa_watchdog(struct ehci_hcd *ehci)
* (a) SMP races against real IAA firing and retriggering, and * (a) SMP races against real IAA firing and retriggering, and
* (b) clean HC shutdown, when IAA watchdog was pending. * (b) clean HC shutdown, when IAA watchdog was pending.
*/ */
if (ehci->async_unlink) { if (ehci->async_iaa) {
u32 cmd, status; u32 cmd, status;
/* If we get here, IAA is *REALLY* late. It's barely /* If we get here, IAA is *REALLY* late. It's barely
......
...@@ -114,12 +114,14 @@ struct ehci_hcd { /* one per controller */ ...@@ -114,12 +114,14 @@ struct ehci_hcd { /* one per controller */
/* general schedule support */ /* general schedule support */
unsigned scanning:1; unsigned scanning:1;
bool intr_unlinking:1; bool intr_unlinking:1;
bool async_unlinking:1;
/* async schedule support */ /* async schedule support */
struct ehci_qh *async; struct ehci_qh *async;
struct ehci_qh *dummy; /* For AMD quirk use */ struct ehci_qh *dummy; /* For AMD quirk use */
struct ehci_qh *async_unlink; struct ehci_qh *async_unlink;
struct ehci_qh *async_unlink_last; struct ehci_qh *async_unlink_last;
struct ehci_qh *async_iaa;
struct ehci_qh *qh_scan_next; struct ehci_qh *qh_scan_next;
unsigned async_count; /* async activity count */ unsigned async_count; /* async activity count */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment