Commit 23d15e07 authored by Ajay Kumar Gupta's avatar Ajay Kumar Gupta Committed by Greg Kroah-Hartman

usb: musb: fix BULK request on different available endpoints

Fixes co-working issue of usb serial device with usb/net devices while
oter endpoints are free and can be used.This patch implements the policy
that if endpoint resources are available then different BULK request goes
to different endpoint otherwise they are multiplexed to one reserved
endpoint as currently done.

Switch statement case is reordered in musb_giveback() to take care of
bulk request both in multiplex scenario and otherwise.

NAK limit scheme has to be added for multiplexed BULK request scenario
to avoid endpoint starvation due to usb/net devices.
Signed-off-by: default avatarAjay Kumar Gupta <ajay.gupta@ti.com>
Signed-off-by: default avatarFelipe Balbi <felipe.balbi@nokia.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent b60c72ab
...@@ -378,28 +378,30 @@ musb_giveback(struct musb_qh *qh, struct urb *urb, int status) ...@@ -378,28 +378,30 @@ musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
switch (qh->type) { switch (qh->type) {
case USB_ENDPOINT_XFER_ISOC:
case USB_ENDPOINT_XFER_INT:
/* this is where periodic bandwidth should be
* de-allocated if it's tracked and allocated;
* and where we'd update the schedule tree...
*/
musb->periodic[ep->epnum] = NULL;
kfree(qh);
qh = NULL;
break;
case USB_ENDPOINT_XFER_CONTROL: case USB_ENDPOINT_XFER_CONTROL:
case USB_ENDPOINT_XFER_BULK: case USB_ENDPOINT_XFER_BULK:
/* fifo policy for these lists, except that NAKing /* fifo policy for these lists, except that NAKing
* should rotate a qh to the end (for fairness). * should rotate a qh to the end (for fairness).
*/ */
if (qh->mux == 1) {
head = qh->ring.prev; head = qh->ring.prev;
list_del(&qh->ring); list_del(&qh->ring);
kfree(qh); kfree(qh);
qh = first_qh(head); qh = first_qh(head);
break; break;
} }
case USB_ENDPOINT_XFER_ISOC:
case USB_ENDPOINT_XFER_INT:
/* this is where periodic bandwidth should be
* de-allocated if it's tracked and allocated;
* and where we'd update the schedule tree...
*/
musb->periodic[ep->epnum] = NULL;
kfree(qh);
qh = NULL;
break;
}
} }
return qh; return qh;
} }
...@@ -1708,22 +1710,9 @@ static int musb_schedule( ...@@ -1708,22 +1710,9 @@ static int musb_schedule(
struct list_head *head = NULL; struct list_head *head = NULL;
/* use fixed hardware for control and bulk */ /* use fixed hardware for control and bulk */
switch (qh->type) { if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
case USB_ENDPOINT_XFER_CONTROL:
head = &musb->control; head = &musb->control;
hw_ep = musb->control_ep; hw_ep = musb->control_ep;
break;
case USB_ENDPOINT_XFER_BULK:
hw_ep = musb->bulk_ep;
if (is_in)
head = &musb->in_bulk;
else
head = &musb->out_bulk;
break;
}
if (head) {
idle = list_empty(head);
list_add_tail(&qh->ring, head);
goto success; goto success;
} }
...@@ -1762,19 +1751,34 @@ static int musb_schedule( ...@@ -1762,19 +1751,34 @@ static int musb_schedule(
else else
diff = hw_ep->max_packet_sz_tx - qh->maxpacket; diff = hw_ep->max_packet_sz_tx - qh->maxpacket;
if (diff > 0 && best_diff > diff) { if (diff >= 0 && best_diff > diff) {
best_diff = diff; best_diff = diff;
best_end = epnum; best_end = epnum;
} }
} }
if (best_end < 0) /* use bulk reserved ep1 if no other ep is free */
if (best_end > 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
hw_ep = musb->bulk_ep;
if (is_in)
head = &musb->in_bulk;
else
head = &musb->out_bulk;
goto success;
} else if (best_end < 0) {
return -ENOSPC; return -ENOSPC;
}
idle = 1; idle = 1;
qh->mux = 0;
hw_ep = musb->endpoints + best_end; hw_ep = musb->endpoints + best_end;
musb->periodic[best_end] = qh; musb->periodic[best_end] = qh;
DBG(4, "qh %p periodic slot %d\n", qh, best_end); DBG(4, "qh %p periodic slot %d\n", qh, best_end);
success: success:
if (head) {
idle = list_empty(head);
list_add_tail(&qh->ring, head);
qh->mux = 1;
}
qh->hw_ep = hw_ep; qh->hw_ep = hw_ep;
qh->hep->hcpriv = qh; qh->hep->hcpriv = qh;
if (idle) if (idle)
...@@ -2052,11 +2056,13 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) ...@@ -2052,11 +2056,13 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
sched = &musb->control; sched = &musb->control;
break; break;
case USB_ENDPOINT_XFER_BULK: case USB_ENDPOINT_XFER_BULK:
if (qh->mux == 1) {
if (usb_pipein(urb->pipe)) if (usb_pipein(urb->pipe))
sched = &musb->in_bulk; sched = &musb->in_bulk;
else else
sched = &musb->out_bulk; sched = &musb->out_bulk;
break; break;
}
default: default:
/* REVISIT when we get a schedule tree, periodic /* REVISIT when we get a schedule tree, periodic
* transfers won't always be at the head of a * transfers won't always be at the head of a
...@@ -2104,11 +2110,13 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) ...@@ -2104,11 +2110,13 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
sched = &musb->control; sched = &musb->control;
break; break;
case USB_ENDPOINT_XFER_BULK: case USB_ENDPOINT_XFER_BULK:
if (qh->mux == 1) {
if (is_in) if (is_in)
sched = &musb->in_bulk; sched = &musb->in_bulk;
else else
sched = &musb->out_bulk; sched = &musb->out_bulk;
break; break;
}
default: default:
/* REVISIT when we get a schedule tree, periodic transfers /* REVISIT when we get a schedule tree, periodic transfers
* won't always be at the head of a singleton queue... * won't always be at the head of a singleton queue...
......
...@@ -53,6 +53,7 @@ struct musb_qh { ...@@ -53,6 +53,7 @@ struct musb_qh {
struct list_head ring; /* of musb_qh */ struct list_head ring; /* of musb_qh */
/* struct musb_qh *next; */ /* for periodic tree */ /* struct musb_qh *next; */ /* for periodic tree */
u8 mux; /* qh multiplexed to hw_ep */
unsigned offset; /* in urb->transfer_buffer */ unsigned offset; /* in urb->transfer_buffer */
unsigned segsize; /* current xfer fragment */ unsigned segsize; /* current xfer fragment */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment