Commit d0ce5c6b authored by Alan Stern's avatar Alan Stern Committed by Greg Kroah-Hartman

USB: EHCI: use a bandwidth-allocation table

This patch significantly changes the scheduling code in ehci-hcd.
Instead of calculating the current bandwidth utilization by trudging
through the schedule and adding up the times used by the existing
transfers, we will now maintain a table holding the time used for each
of 64 microframes.  This will drastically speed up the bandwidth
computations.

In addition, it eliminates a theoretical bug.  An isochronous endpoint
may have bandwidth reserved even at times when it has no transfers
listed in the schedule.  The table will keep track of the reserved
bandwidth, whereas adding up entries in the schedule would miss it.

As a corollary, we can keep bandwidth reserved for endpoints even
when they aren't in active use.  Eventually the bandwidth will be
reserved when a new alternate setting is installed; for now the
endpoint's reservation takes place when its first URB is submitted.

A drawback of this approach is that transfers with an interval larger
than 64 microframes will have to be charged for bandwidth as though
the interval was 64.  In practice this shouldn't matter much;
transfers with longer intervals tend to be rather short anyway (things
like hubs or HID devices).

Another minor drawback is that we will keep track of two different
period and phase values: the actual ones and the ones used for
bandwidth allocation (which are limited to 64).  This adds only a
small amount of overhead: 3 bytes for each endpoint.

The patch also adds a new debugfs file named "bandwidth" to display
the information stored in the new table.
Signed-off-by: default avatarAlan Stern <stern@rowland.harvard.edu>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent ffa0248e
...@@ -334,6 +334,7 @@ static inline void remove_debug_files (struct ehci_hcd *bus) { } ...@@ -334,6 +334,7 @@ static inline void remove_debug_files (struct ehci_hcd *bus) { }
/* troubleshooting help: expose state in debugfs */ /* troubleshooting help: expose state in debugfs */
static int debug_async_open(struct inode *, struct file *); static int debug_async_open(struct inode *, struct file *);
static int debug_bandwidth_open(struct inode *, struct file *);
static int debug_periodic_open(struct inode *, struct file *); static int debug_periodic_open(struct inode *, struct file *);
static int debug_registers_open(struct inode *, struct file *); static int debug_registers_open(struct inode *, struct file *);
...@@ -347,6 +348,13 @@ static const struct file_operations debug_async_fops = { ...@@ -347,6 +348,13 @@ static const struct file_operations debug_async_fops = {
.release = debug_close, .release = debug_close,
.llseek = default_llseek, .llseek = default_llseek,
}; };
static const struct file_operations debug_bandwidth_fops = {
.owner = THIS_MODULE,
.open = debug_bandwidth_open,
.read = debug_output,
.release = debug_close,
.llseek = default_llseek,
};
static const struct file_operations debug_periodic_fops = { static const struct file_operations debug_periodic_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.open = debug_periodic_open, .open = debug_periodic_open,
...@@ -525,6 +533,41 @@ static ssize_t fill_async_buffer(struct debug_buffer *buf) ...@@ -525,6 +533,41 @@ static ssize_t fill_async_buffer(struct debug_buffer *buf)
return strlen(buf->output_buf); return strlen(buf->output_buf);
} }
static ssize_t fill_bandwidth_buffer(struct debug_buffer *buf)
{
struct ehci_hcd *ehci;
unsigned temp, size;
char *next;
unsigned i;
u8 *bw;
ehci = hcd_to_ehci(bus_to_hcd(buf->bus));
next = buf->output_buf;
size = buf->alloc_size;
*next = 0;
spin_lock_irq(&ehci->lock);
/* Dump the HS bandwidth table */
temp = scnprintf(next, size,
"HS bandwidth allocation (us per microframe)\n");
size -= temp;
next += temp;
for (i = 0; i < EHCI_BANDWIDTH_SIZE; i += 8) {
bw = &ehci->bandwidth[i];
temp = scnprintf(next, size,
"%2u: %4u%4u%4u%4u%4u%4u%4u%4u\n",
i, bw[0], bw[1], bw[2], bw[3],
bw[4], bw[5], bw[6], bw[7]);
size -= temp;
next += temp;
}
spin_unlock_irq(&ehci->lock);
return next - buf->output_buf;
}
#define DBG_SCHED_LIMIT 64 #define DBG_SCHED_LIMIT 64
static ssize_t fill_periodic_buffer(struct debug_buffer *buf) static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
{ {
...@@ -919,6 +962,7 @@ static int debug_close(struct inode *inode, struct file *file) ...@@ -919,6 +962,7 @@ static int debug_close(struct inode *inode, struct file *file)
return 0; return 0;
} }
static int debug_async_open(struct inode *inode, struct file *file) static int debug_async_open(struct inode *inode, struct file *file)
{ {
file->private_data = alloc_buffer(inode->i_private, fill_async_buffer); file->private_data = alloc_buffer(inode->i_private, fill_async_buffer);
...@@ -926,6 +970,14 @@ static int debug_async_open(struct inode *inode, struct file *file) ...@@ -926,6 +970,14 @@ static int debug_async_open(struct inode *inode, struct file *file)
return file->private_data ? 0 : -ENOMEM; return file->private_data ? 0 : -ENOMEM;
} }
static int debug_bandwidth_open(struct inode *inode, struct file *file)
{
file->private_data = alloc_buffer(inode->i_private,
fill_bandwidth_buffer);
return file->private_data ? 0 : -ENOMEM;
}
static int debug_periodic_open(struct inode *inode, struct file *file) static int debug_periodic_open(struct inode *inode, struct file *file)
{ {
struct debug_buffer *buf; struct debug_buffer *buf;
...@@ -958,6 +1010,10 @@ static inline void create_debug_files (struct ehci_hcd *ehci) ...@@ -958,6 +1010,10 @@ static inline void create_debug_files (struct ehci_hcd *ehci)
&debug_async_fops)) &debug_async_fops))
goto file_error; goto file_error;
if (!debugfs_create_file("bandwidth", S_IRUGO, ehci->debug_dir, bus,
&debug_bandwidth_fops))
goto file_error;
if (!debugfs_create_file("periodic", S_IRUGO, ehci->debug_dir, bus, if (!debugfs_create_file("periodic", S_IRUGO, ehci->debug_dir, bus,
&debug_periodic_fops)) &debug_periodic_fops))
goto file_error; goto file_error;
......
...@@ -956,6 +956,7 @@ ehci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep) ...@@ -956,6 +956,7 @@ ehci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
goto idle_timeout; goto idle_timeout;
/* BUG_ON(!list_empty(&stream->free_list)); */ /* BUG_ON(!list_empty(&stream->free_list)); */
reserve_release_iso_bandwidth(ehci, stream, -1);
kfree(stream); kfree(stream);
goto done; goto done;
} }
...@@ -982,6 +983,8 @@ ehci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep) ...@@ -982,6 +983,8 @@ ehci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
if (qh->clearing_tt) if (qh->clearing_tt)
goto idle_timeout; goto idle_timeout;
if (list_empty (&qh->qtd_list)) { if (list_empty (&qh->qtd_list)) {
if (qh->ps.bw_uperiod)
reserve_release_intr_bandwidth(ehci, qh, -1);
qh_destroy(ehci, qh); qh_destroy(ehci, qh);
break; break;
} }
......
...@@ -797,6 +797,8 @@ qh_make ( ...@@ -797,6 +797,8 @@ qh_make (
* For control/bulk requests, the HC or TT handles these. * For control/bulk requests, the HC or TT handles these.
*/ */
if (type == PIPE_INTERRUPT) { if (type == PIPE_INTERRUPT) {
unsigned tmp;
qh->ps.usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH, qh->ps.usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
is_input, 0, is_input, 0,
hb_mult(maxp) * max_packet(maxp))); hb_mult(maxp) * max_packet(maxp)));
...@@ -816,6 +818,14 @@ qh_make ( ...@@ -816,6 +818,14 @@ qh_make (
urb->interval = ehci->periodic_size << 3; urb->interval = ehci->periodic_size << 3;
} }
qh->ps.period = urb->interval >> 3; qh->ps.period = urb->interval >> 3;
/* period for bandwidth allocation */
tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE,
1 << (urb->ep->desc.bInterval - 1));
/* Allow urb->interval to override */
qh->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval);
qh->ps.bw_period = qh->ps.bw_uperiod >> 3;
} else { } else {
int think_time; int think_time;
...@@ -839,6 +849,15 @@ qh_make ( ...@@ -839,6 +849,15 @@ qh_make (
if (urb->interval > ehci->periodic_size) if (urb->interval > ehci->periodic_size)
urb->interval = ehci->periodic_size; urb->interval = ehci->periodic_size;
qh->ps.period = urb->interval; qh->ps.period = urb->interval;
/* period for bandwidth allocation */
tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES,
urb->ep->desc.bInterval);
tmp = rounddown_pow_of_two(tmp);
/* Allow urb->interval to override */
qh->ps.bw_period = min_t(unsigned, tmp, urb->interval);
qh->ps.bw_uperiod = qh->ps.bw_period << 3;
} }
} }
......
This diff is collapsed.
...@@ -97,8 +97,7 @@ static ssize_t store_uframe_periodic_max(struct device *dev, ...@@ -97,8 +97,7 @@ static ssize_t store_uframe_periodic_max(struct device *dev,
{ {
struct ehci_hcd *ehci; struct ehci_hcd *ehci;
unsigned uframe_periodic_max; unsigned uframe_periodic_max;
unsigned frame, uframe; unsigned uframe;
unsigned short allocated_max;
unsigned long flags; unsigned long flags;
ssize_t ret; ssize_t ret;
...@@ -122,16 +121,14 @@ static ssize_t store_uframe_periodic_max(struct device *dev, ...@@ -122,16 +121,14 @@ static ssize_t store_uframe_periodic_max(struct device *dev,
/* /*
* for request to decrease max periodic bandwidth, we have to check * for request to decrease max periodic bandwidth, we have to check
* every microframe in the schedule to see whether the decrease is * to see whether the decrease is possible.
* possible.
*/ */
if (uframe_periodic_max < ehci->uframe_periodic_max) { if (uframe_periodic_max < ehci->uframe_periodic_max) {
allocated_max = 0; u8 allocated_max = 0;
for (frame = 0; frame < ehci->periodic_size; ++frame) for (uframe = 0; uframe < EHCI_BANDWIDTH_SIZE; ++uframe)
for (uframe = 0; uframe < 7; ++uframe)
allocated_max = max(allocated_max, allocated_max = max(allocated_max,
periodic_usecs (ehci, frame, uframe)); ehci->bandwidth[uframe]);
if (allocated_max > uframe_periodic_max) { if (allocated_max > uframe_periodic_max) {
ehci_info(ehci, ehci_info(ehci,
......
...@@ -62,10 +62,16 @@ struct ehci_per_sched { ...@@ -62,10 +62,16 @@ struct ehci_per_sched {
struct usb_device *udev; /* access to the TT */ struct usb_device *udev; /* access to the TT */
struct usb_host_endpoint *ep; struct usb_host_endpoint *ep;
u16 tt_usecs; /* time on the FS/LS bus */ u16 tt_usecs; /* time on the FS/LS bus */
u16 cs_mask; /* C-mask and S-mask bytes */
u16 period; /* actual period in frames */ u16 period; /* actual period in frames */
u16 phase; /* actual phase, frame part */ u16 phase; /* actual phase, frame part */
u8 bw_phase; /* same, for bandwidth
reservation */
u8 phase_uf; /* uframe part of the phase */ u8 phase_uf; /* uframe part of the phase */
u8 usecs, c_usecs; /* times on the HS bus */ u8 usecs, c_usecs; /* times on the HS bus */
u8 bw_uperiod; /* period in microframes, for
bandwidth reservation */
u8 bw_period; /* same, in frames */
}; };
#define NO_FRAME 29999 /* frame not assigned yet */ #define NO_FRAME 29999 /* frame not assigned yet */
...@@ -245,6 +251,12 @@ struct ehci_hcd { /* one per controller */ ...@@ -245,6 +251,12 @@ struct ehci_hcd { /* one per controller */
struct dentry *debug_dir; struct dentry *debug_dir;
#endif #endif
/* bandwidth usage */
#define EHCI_BANDWIDTH_SIZE 64
#define EHCI_BANDWIDTH_FRAMES (EHCI_BANDWIDTH_SIZE >> 3)
u8 bandwidth[EHCI_BANDWIDTH_SIZE];
/* us allocated per uframe */
/* platform-specific data -- must come last */ /* platform-specific data -- must come last */
unsigned long priv[0] __aligned(sizeof(s64)); unsigned long priv[0] __aligned(sizeof(s64));
}; };
...@@ -469,7 +481,6 @@ struct ehci_iso_stream { ...@@ -469,7 +481,6 @@ struct ehci_iso_stream {
*/ */
u16 uperiod; /* period in uframes */ u16 uperiod; /* period in uframes */
u16 maxp; u16 maxp;
u16 raw_mask;
unsigned bandwidth; unsigned bandwidth;
/* This is used to initialize iTD's hw_bufp fields */ /* This is used to initialize iTD's hw_bufp fields */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment