Commit 7ed10dbc authored by Johannes Erdfelt's avatar Johannes Erdfelt Committed by Greg Kroah-Hartman

[PATCH] uhci-hcd for 2.5.15

So I finally hunkered down and got uhci-hcd working. I was surprised how
easy it was to make the modifications for hcd.c.

This patch has received a moderate amount of testing so far. I've played
with various devices and haven't had any problems. I won't claim it's
bug free yet and I'll continue doing more testing.

The patch includes a small change to hcd.c to not call free_config if
the HCD doesn't implement it.

There are some other messages that get printed such as:

hcd.c: usb_hcd_pci_remove 00:10.0, count != 1

but it appears from the code that this is just a soft warning that all
of the references to the bus aren't freed yet. This isn't a bug.

Differences from usb-uhci/usb-uhci-hcd:
- Cleaner code. uhci-hcd is based off of uhci, so it maintains the same
  look and feel as well as readability.
- Faster. The tests I've run so far show that uhci-hcd is faster in
  every case than usb-uhci-hcd
- It does not have the watchdog type feature for VIA chipsets. It's
  something that is definately possible to implement, but I'd like to
  find out what Windows does first.

Differences from uhci:
- Modified to use the hcd.c framework. This removed a significant amount
  of code and nesessitated lots of little changes.
- Big endian support. I haven't been able to test it on a big endian
  machine yet, but atleast 90+% of the work should be done. Once I get
  my PowerPC working again, I'll test this and submit any appropriate
  patches. This was the biggest functional change between uhci.c.
- No more urb->next processing. Completely ripped out.
- urb->interval support for Isochronous pipes.
- A couple of bug fixes for some problems I noticed while working on the
  code. These will be submitted for uhci.c in a seperate email.
parent a8025e16
...@@ -1732,6 +1732,7 @@ static int hcd_free_dev (struct usb_device *udev) ...@@ -1732,6 +1732,7 @@ static int hcd_free_dev (struct usb_device *udev)
return -EINVAL; return -EINVAL;
} }
if (hcd->driver->free_config)
hcd->driver->free_config (hcd, udev); hcd->driver->free_config (hcd, udev);
spin_lock_irqsave (&hcd_data_lock, flags); spin_lock_irqsave (&hcd_data_lock, flags);
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
comment 'USB Host Controller Drivers' comment 'USB Host Controller Drivers'
dep_tristate ' EHCI HCD (USB 2.0) support (EXPERIMENTAL)' CONFIG_USB_EHCI_HCD $CONFIG_USB $CONFIG_EXPERIMENTAL dep_tristate ' EHCI HCD (USB 2.0) support (EXPERIMENTAL)' CONFIG_USB_EHCI_HCD $CONFIG_USB $CONFIG_EXPERIMENTAL
dep_tristate ' OHCI HCD support (EXPERIMENTAL)' CONFIG_USB_OHCI_HCD $CONFIG_USB $CONFIG_EXPERIMENTAL dep_tristate ' OHCI HCD support (EXPERIMENTAL)' CONFIG_USB_OHCI_HCD $CONFIG_USB $CONFIG_EXPERIMENTAL
# dep_tristate ' UHCI HCD (most Intel and VIA) support (EXPERIMENTAL)' CONFIG_USB_UHCI_HCD $CONFIG_USB $CONFIG_EXPERIMENTAL dep_tristate ' UHCI HCD (most Intel and VIA) support (EXPERIMENTAL)' CONFIG_USB_UHCI_HCD $CONFIG_USB $CONFIG_EXPERIMENTAL
if [ "$CONFIG_USB_UHCI_ALT" != "y" ]; then if [ "$CONFIG_USB_UHCI_ALT" != "y" ]; then
dep_tristate ' UHCI (Intel PIIX4, VIA, ...) support' CONFIG_USB_UHCI $CONFIG_USB dep_tristate ' UHCI (Intel PIIX4, VIA, ...) support' CONFIG_USB_UHCI $CONFIG_USB
fi fi
......
...@@ -7,7 +7,7 @@ O_TARGET := host.o ...@@ -7,7 +7,7 @@ O_TARGET := host.o
obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o
obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o
# obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o
obj-$(CONFIG_USB_UHCI) += usb-uhci.o obj-$(CONFIG_USB_UHCI) += usb-uhci.o
obj-$(CONFIG_USB_UHCI_ALT) += uhci.o obj-$(CONFIG_USB_UHCI_ALT) += uhci.o
......
/*
* UHCI-specific debugging code. Invaluable when something
* goes wrong, but don't get in my face.
*
* Kernel visible pointers are surrounded in []'s and bus
* visible pointers are surrounded in ()'s
*
* (C) Copyright 1999 Linus Torvalds
* (C) Copyright 1999-2001 Johannes Erdfelt
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/proc_fs.h>
#include <linux/smp_lock.h>
#include <asm/io.h>
#include "uhci-hcd.h"
/* Handle REALLY large printk's so we don't overflow buffers */
static void inline lprintk(char *buf)
{
char *p;
/* Just write one line at a time */
while (buf) {
p = strchr(buf, '\n');
if (p)
*p = 0;
printk("%s\n", buf);
buf = p;
if (buf)
buf++;
}
}
static int inline uhci_is_skeleton_td(struct uhci_hcd *uhci, struct uhci_td *td)
{
int i;
for (i = 0; i < UHCI_NUM_SKELTD; i++)
if (td == uhci->skeltd[i])
return 1;
return 0;
}
static int inline uhci_is_skeleton_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
int i;
for (i = 0; i < UHCI_NUM_SKELQH; i++)
if (qh == uhci->skelqh[i])
return 1;
return 0;
}
static int uhci_show_td(struct uhci_td *td, char *buf, int len, int space)
{
char *out = buf;
char *spid;
u32 status, token;
/* Try to make sure there's enough memory */
if (len < 160)
return 0;
status = td_status(td);
out += sprintf(out, "%*s[%p] link (%08x) ", space, "", td, le32_to_cpu(td->link));
out += sprintf(out, "e%d %s%s%s%s%s%s%s%s%s%sLength=%x ",
((status >> 27) & 3),
(status & TD_CTRL_SPD) ? "SPD " : "",
(status & TD_CTRL_LS) ? "LS " : "",
(status & TD_CTRL_IOC) ? "IOC " : "",
(status & TD_CTRL_ACTIVE) ? "Active " : "",
(status & TD_CTRL_STALLED) ? "Stalled " : "",
(status & TD_CTRL_DBUFERR) ? "DataBufErr " : "",
(status & TD_CTRL_BABBLE) ? "Babble " : "",
(status & TD_CTRL_NAK) ? "NAK " : "",
(status & TD_CTRL_CRCTIMEO) ? "CRC/Timeo " : "",
(status & TD_CTRL_BITSTUFF) ? "BitStuff " : "",
status & 0x7ff);
token = td_token(td);
switch (uhci_packetid(token)) {
case USB_PID_SETUP:
spid = "SETUP";
break;
case USB_PID_OUT:
spid = "OUT";
break;
case USB_PID_IN:
spid = "IN";
break;
default:
spid = "?";
break;
}
out += sprintf(out, "MaxLen=%x DT%d EndPt=%x Dev=%x, PID=%x(%s) ",
token >> 21,
((token >> 19) & 1),
(token >> 15) & 15,
(token >> 8) & 127,
(token & 0xff),
spid);
out += sprintf(out, "(buf=%08x)\n", le32_to_cpu(td->buffer));
return out - buf;
}
static int uhci_show_sc(int port, unsigned short status, char *buf, int len)
{
char *out = buf;
/* Try to make sure there's enough memory */
if (len < 80)
return 0;
out += sprintf(out, " stat%d = %04x %s%s%s%s%s%s%s%s\n",
port,
status,
(status & USBPORTSC_SUSP) ? "PortSuspend " : "",
(status & USBPORTSC_PR) ? "PortReset " : "",
(status & USBPORTSC_LSDA) ? "LowSpeed " : "",
(status & USBPORTSC_RD) ? "ResumeDetect " : "",
(status & USBPORTSC_PEC) ? "EnableChange " : "",
(status & USBPORTSC_PE) ? "PortEnabled " : "",
(status & USBPORTSC_CSC) ? "ConnectChange " : "",
(status & USBPORTSC_CCS) ? "PortConnected " : "");
return out - buf;
}
static int uhci_show_status(struct uhci_hcd *uhci, char *buf, int len)
{
char *out = buf;
unsigned int io_addr = uhci->io_addr;
unsigned short usbcmd, usbstat, usbint, usbfrnum;
unsigned int flbaseadd;
unsigned char sof;
unsigned short portsc1, portsc2;
/* Try to make sure there's enough memory */
if (len < 80 * 6)
return 0;
usbcmd = inw(io_addr + 0);
usbstat = inw(io_addr + 2);
usbint = inw(io_addr + 4);
usbfrnum = inw(io_addr + 6);
flbaseadd = inl(io_addr + 8);
sof = inb(io_addr + 12);
portsc1 = inw(io_addr + 16);
portsc2 = inw(io_addr + 18);
out += sprintf(out, " usbcmd = %04x %s%s%s%s%s%s%s%s\n",
usbcmd,
(usbcmd & USBCMD_MAXP) ? "Maxp64 " : "Maxp32 ",
(usbcmd & USBCMD_CF) ? "CF " : "",
(usbcmd & USBCMD_SWDBG) ? "SWDBG " : "",
(usbcmd & USBCMD_FGR) ? "FGR " : "",
(usbcmd & USBCMD_EGSM) ? "EGSM " : "",
(usbcmd & USBCMD_GRESET) ? "GRESET " : "",
(usbcmd & USBCMD_HCRESET) ? "HCRESET " : "",
(usbcmd & USBCMD_RS) ? "RS " : "");
out += sprintf(out, " usbstat = %04x %s%s%s%s%s%s\n",
usbstat,
(usbstat & USBSTS_HCH) ? "HCHalted " : "",
(usbstat & USBSTS_HCPE) ? "HostControllerProcessError " : "",
(usbstat & USBSTS_HSE) ? "HostSystemError " : "",
(usbstat & USBSTS_RD) ? "ResumeDetect " : "",
(usbstat & USBSTS_ERROR) ? "USBError " : "",
(usbstat & USBSTS_USBINT) ? "USBINT " : "");
out += sprintf(out, " usbint = %04x\n", usbint);
out += sprintf(out, " usbfrnum = (%d)%03x\n", (usbfrnum >> 10) & 1,
0xfff & (4*(unsigned int)usbfrnum));
out += sprintf(out, " flbaseadd = %08x\n", flbaseadd);
out += sprintf(out, " sof = %02x\n", sof);
out += uhci_show_sc(1, portsc1, out, len - (out - buf));
out += uhci_show_sc(2, portsc2, out, len - (out - buf));
return out - buf;
}
static int uhci_show_qh(struct uhci_qh *qh, char *buf, int len, int space)
{
char *out = buf;
struct urb_priv *urbp;
struct list_head *head, *tmp;
struct uhci_td *td;
int i = 0, checked = 0, prevactive = 0;
/* Try to make sure there's enough memory */
if (len < 80 * 6)
return 0;
out += sprintf(out, "%*s[%p] link (%08x) element (%08x)\n", space, "",
qh, le32_to_cpu(qh->link), le32_to_cpu(qh->element));
if (qh->element & UHCI_PTR_QH)
out += sprintf(out, "%*s Element points to QH (bug?)\n", space, "");
if (qh->element & UHCI_PTR_DEPTH)
out += sprintf(out, "%*s Depth traverse\n", space, "");
if (qh->element & cpu_to_le32(8))
out += sprintf(out, "%*s Bit 3 set (bug?)\n", space, "");
if (!(qh->element & ~(UHCI_PTR_QH | UHCI_PTR_DEPTH)))
out += sprintf(out, "%*s Element is NULL (bug?)\n", space, "");
if (!qh->urbp) {
out += sprintf(out, "%*s urbp == NULL\n", space, "");
goto out;
}
urbp = qh->urbp;
head = &urbp->td_list;
tmp = head->next;
td = list_entry(tmp, struct uhci_td, list);
if (cpu_to_le32(td->dma_handle) != (qh->element & ~UHCI_PTR_BITS))
out += sprintf(out, "%*s Element != First TD\n", space, "");
while (tmp != head) {
struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
tmp = tmp->next;
out += sprintf(out, "%*s%d: ", space + 2, "", i++);
out += uhci_show_td(td, out, len - (out - buf), 0);
if (i > 10 && !checked && prevactive && tmp != head &&
debug <= 2) {
struct list_head *ntmp = tmp;
struct uhci_td *ntd = td;
int active = 1, ni = i;
checked = 1;
while (ntmp != head && ntmp->next != head && active) {
ntd = list_entry(ntmp, struct uhci_td, list);
ntmp = ntmp->next;
active = td_status(ntd) & TD_CTRL_ACTIVE;
ni++;
}
if (active && ni > i) {
out += sprintf(out, "%*s[skipped %d active TD's]\n", space, "", ni - i);
tmp = ntmp;
td = ntd;
i = ni;
}
}
prevactive = td_status(td) & TD_CTRL_ACTIVE;
}
if (list_empty(&urbp->queue_list) || urbp->queued)
goto out;
out += sprintf(out, "%*sQueued QH's:\n", -space, "--");
head = &urbp->queue_list;
tmp = head->next;
while (tmp != head) {
struct urb_priv *nurbp = list_entry(tmp, struct urb_priv,
queue_list);
tmp = tmp->next;
out += uhci_show_qh(nurbp->qh, out, len - (out - buf), space);
}
out:
return out - buf;
}
static const char *td_names[] = {"skel_int1_td", "skel_int2_td",
"skel_int4_td", "skel_int8_td",
"skel_int16_td", "skel_int32_td",
"skel_int64_td", "skel_int128_td",
"skel_int256_td", "skel_term_td" };
static const char *qh_names[] = { "skel_ls_control_qh", "skel_hs_control_qh",
"skel_bulk_qh", "skel_term_qh" };
#define show_frame_num() \
if (!shown) { \
shown = 1; \
out += sprintf(out, "- Frame %d\n", i); \
}
#define show_td_name() \
if (!shown) { \
shown = 1; \
out += sprintf(out, "- %s\n", td_names[i]); \
}
#define show_qh_name() \
if (!shown) { \
shown = 1; \
out += sprintf(out, "- %s\n", qh_names[i]); \
}
static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len)
{
char *out = buf;
int i;
struct uhci_qh *qh;
struct uhci_td *td;
struct list_head *tmp, *head;
out += sprintf(out, "HC status\n");
out += uhci_show_status(uhci, out, len - (out - buf));
out += sprintf(out, "Frame List\n");
for (i = 0; i < UHCI_NUMFRAMES; ++i) {
int shown = 0;
td = uhci->fl->frame_cpu[i];
if (!td)
continue;
if (td->dma_handle != (dma_addr_t)uhci->fl->frame[i]) {
show_frame_num();
out += sprintf(out, " frame list does not match td->dma_handle!\n");
}
if (uhci_is_skeleton_td(uhci, td))
continue;
show_frame_num();
head = &td->fl_list;
tmp = head;
do {
td = list_entry(tmp, struct uhci_td, fl_list);
tmp = tmp->next;
out += uhci_show_td(td, out, len - (out - buf), 4);
} while (tmp != head);
}
out += sprintf(out, "Skeleton TD's\n");
for (i = UHCI_NUM_SKELTD - 1; i >= 0; i--) {
int shown = 0;
td = uhci->skeltd[i];
if (debug > 1) {
show_td_name();
out += uhci_show_td(td, out, len - (out - buf), 4);
}
if (list_empty(&td->fl_list)) {
/* TD 0 is the int1 TD and links to control_ls_qh */
if (!i) {
if (td->link !=
(cpu_to_le32(uhci->skel_ls_control_qh->dma_handle) | UHCI_PTR_QH)) {
show_td_name();
out += sprintf(out, " skeleton TD not linked to ls_control QH!\n");
}
} else if (i < 9) {
if (td->link != cpu_to_le32(uhci->skeltd[i - 1]->dma_handle)) {
show_td_name();
out += sprintf(out, " skeleton TD not linked to next skeleton TD!\n");
}
} else {
show_td_name();
if (td->link != cpu_to_le32(td->dma_handle))
out += sprintf(out, " skel_term_td does not link to self\n");
/* Don't show it twice */
if (debug <= 1)
out += uhci_show_td(td, out, len - (out - buf), 4);
}
continue;
}
show_td_name();
head = &td->fl_list;
tmp = head->next;
while (tmp != head) {
td = list_entry(tmp, struct uhci_td, fl_list);
tmp = tmp->next;
out += uhci_show_td(td, out, len - (out - buf), 4);
}
if (!i) {
if (td->link !=
(cpu_to_le32(uhci->skel_ls_control_qh->dma_handle) | UHCI_PTR_QH))
out += sprintf(out, " last TD not linked to ls_control QH!\n");
} else if (i < 9) {
if (td->link != cpu_to_le32(uhci->skeltd[i - 1]->dma_handle))
out += sprintf(out, " last TD not linked to next skeleton!\n");
}
}
out += sprintf(out, "Skeleton QH's\n");
for (i = 0; i < UHCI_NUM_SKELQH; ++i) {
int shown = 0;
qh = uhci->skelqh[i];
if (debug > 1) {
show_qh_name();
out += uhci_show_qh(qh, out, len - (out - buf), 4);
}
/* QH 3 is the Terminating QH, it's different */
if (i == 3) {
if (qh->link != UHCI_PTR_TERM) {
show_qh_name();
out += sprintf(out, " bandwidth reclamation on!\n");
}
if (qh->element != cpu_to_le32(uhci->skel_term_td->dma_handle)) {
show_qh_name();
out += sprintf(out, " skel_term_qh element is not set to skel_term_td\n");
}
}
if (list_empty(&qh->list)) {
if (i < 3) {
if (qh->link !=
(cpu_to_le32(uhci->skelqh[i + 1]->dma_handle) | UHCI_PTR_QH)) {
show_qh_name();
out += sprintf(out, " skeleton QH not linked to next skeleton QH!\n");
}
}
continue;
}
show_qh_name();
head = &qh->list;
tmp = head->next;
while (tmp != head) {
qh = list_entry(tmp, struct uhci_qh, list);
tmp = tmp->next;
out += uhci_show_qh(qh, out, len - (out - buf), 4);
}
if (i < 3) {
if (qh->link !=
(cpu_to_le32(uhci->skelqh[i + 1]->dma_handle) | UHCI_PTR_QH))
out += sprintf(out, " last QH not linked to next skeleton!\n");
}
}
return out - buf;
}
#ifdef CONFIG_PROC_FS
#define MAX_OUTPUT (PAGE_SIZE * 8)
static struct proc_dir_entry *uhci_proc_root = NULL;
struct uhci_proc {
int size;
char *data;
struct uhci_hcd *uhci;
};
static int uhci_proc_open(struct inode *inode, struct file *file)
{
const struct proc_dir_entry *dp = PDE(inode);
struct uhci_hcd *uhci = dp->data;
struct uhci_proc *up;
unsigned long flags;
int ret = -ENOMEM;
lock_kernel();
up = kmalloc(sizeof(*up), GFP_KERNEL);
if (!up)
goto out;
up->data = kmalloc(MAX_OUTPUT, GFP_KERNEL);
if (!up->data) {
kfree(up);
goto out;
}
spin_lock_irqsave(&uhci->frame_list_lock, flags);
up->size = uhci_sprint_schedule(uhci, up->data, MAX_OUTPUT);
spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
file->private_data = up;
ret = 0;
out:
unlock_kernel();
return ret;
}
static loff_t uhci_proc_lseek(struct file *file, loff_t off, int whence)
{
struct uhci_proc *up;
loff_t new = -1;
lock_kernel();
up = file->private_data;
switch (whence) {
case 0:
new = off;
break;
case 1:
new = file->f_pos + off;
break;
}
if (new < 0 || new > up->size) {
unlock_kernel();
return -EINVAL;
}
unlock_kernel();
return (file->f_pos = new);
}
static ssize_t uhci_proc_read(struct file *file, char *buf, size_t nbytes,
loff_t *ppos)
{
struct uhci_proc *up = file->private_data;
unsigned int pos;
unsigned int size;
pos = *ppos;
size = up->size;
if (pos >= size)
return 0;
if (nbytes >= size)
nbytes = size;
if (pos + nbytes > size)
nbytes = size - pos;
if (!access_ok(VERIFY_WRITE, buf, nbytes))
return -EINVAL;
copy_to_user(buf, up->data + pos, nbytes);
*ppos += nbytes;
return nbytes;
}
static int uhci_proc_release(struct inode *inode, struct file *file)
{
struct uhci_proc *up = file->private_data;
kfree(up->data);
kfree(up);
return 0;
}
static struct file_operations uhci_proc_operations = {
open: uhci_proc_open,
llseek: uhci_proc_lseek,
read: uhci_proc_read,
// write: uhci_proc_write,
release: uhci_proc_release,
};
#endif
/*
* Universal Host Controller Interface driver for USB.
*
* Maintainer: Johannes Erdfelt <johannes@erdfelt.com>
*
* (C) Copyright 1999 Linus Torvalds
* (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
* (C) Copyright 1999 Randy Dunlap
* (C) Copyright 1999 Georg Acher, acher@in.tum.de
* (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
* (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
* (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
* (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
* support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
* (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
*
* Intel documents this fairly well, and as far as I know there
* are no royalties or anything like that, but even so there are
* people who decided that they want to do the same thing in a
* completely different way.
*
* WARNING! The USB documentation is downright evil. Most of it
* is just crap, written by a committee. You're better off ignoring
* most of it, the important stuff is:
* - the low-level protocol (fairly simple but lots of small details)
* - working around the horridness of the rest
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/smp_lock.h>
#include <linux/errno.h>
#include <linux/unistd.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/proc_fs.h>
#ifdef CONFIG_USB_DEBUG
#define DEBUG
#else
#undef DEBUG
#endif
#include <linux/usb.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/system.h>
#include "../core/hcd.h"
#include "uhci-hcd.h"
#include <linux/pm.h>
/*
* Version Information
*/
#define DRIVER_VERSION "v2.0"
#define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, Roman Weissgaerber"
#define DRIVER_DESC "USB Universal Host Controller Interface driver"
/*
* debug = 0, no debugging messages
* debug = 1, dump failed URB's except for stalls
* debug = 2, dump all failed URB's (including stalls)
* show all queues in /proc/uhci/hc*
* debug = 3, show all TD's in URB's when dumping
*/
#ifdef DEBUG
static int debug = 1;
#else
static int debug = 0;
#endif
MODULE_PARM(debug, "i");
MODULE_PARM_DESC(debug, "Debug level");
static char *errbuf;
#define ERRBUF_LEN (PAGE_SIZE * 8)
#include "uhci-hub.c"
#include "uhci-debug.c"
static kmem_cache_t *uhci_up_cachep; /* urb_priv */
static int uhci_get_current_frame_number(struct uhci_hcd *uhci);
static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb);
static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb);
static void uhci_finish_urb(struct usb_hcd *hcd, struct urb *urb);
static int ports_active(struct uhci_hcd *uhci);
static void suspend_hc(struct uhci_hcd *uhci);
static void wakeup_hc(struct uhci_hcd *uhci);
/* If a transfer is still active after this much time, turn off FSBR */
#define IDLE_TIMEOUT (HZ / 20) /* 50 ms */
#define FSBR_DELAY (HZ / 20) /* 50 ms */
/* When we timeout an idle transfer for FSBR, we'll switch it over to */
/* depth first traversal. We'll do it in groups of this number of TD's */
/* to make sure it doesn't hog all of the bandwidth */
#define DEPTH_INTERVAL 5
#define MAX_URB_LOOP 2048 /* Maximum number of linked URB's */
/*
* Technically, updating td->status here is a race, but it's not really a
* problem. The worst that can happen is that we set the IOC bit again
* generating a spurios interrupt. We could fix this by creating another
* QH and leaving the IOC bit always set, but then we would have to play
* games with the FSBR code to make sure we get the correct order in all
* the cases. I don't think it's worth the effort
*/
static inline void uhci_set_next_interrupt(struct uhci_hcd *uhci)
{
unsigned long flags;
spin_lock_irqsave(&uhci->frame_list_lock, flags);
uhci->skel_term_td->status |= cpu_to_le32(TD_CTRL_IOC);
spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
}
static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
{
unsigned long flags;
spin_lock_irqsave(&uhci->frame_list_lock, flags);
uhci->skel_term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
}
static inline void uhci_add_complete(struct uhci_hcd *uhci, struct urb *urb)
{
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
unsigned long flags;
spin_lock_irqsave(&uhci->complete_list_lock, flags);
list_add(&urbp->complete_list, &uhci->complete_list);
spin_unlock_irqrestore(&uhci->complete_list_lock, flags);
}
static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci, struct usb_device *dev)
{
dma_addr_t dma_handle;
struct uhci_td *td;
td = pci_pool_alloc(uhci->td_pool, GFP_DMA | GFP_ATOMIC, &dma_handle);
if (!td)
return NULL;
td->dma_handle = dma_handle;
td->link = UHCI_PTR_TERM;
td->buffer = 0;
td->frame = -1;
td->dev = dev;
INIT_LIST_HEAD(&td->list);
INIT_LIST_HEAD(&td->fl_list);
usb_get_dev(dev);
return td;
}
static void inline uhci_fill_td(struct uhci_td *td, __u32 status,
__u32 token, __u32 buffer)
{
td->status = cpu_to_le32(status);
td->token = cpu_to_le32(token);
td->buffer = cpu_to_le32(buffer);
}
static void uhci_insert_td(struct uhci_hcd *uhci, struct uhci_td *skeltd, struct uhci_td *td)
{
unsigned long flags;
struct uhci_td *ltd;
spin_lock_irqsave(&uhci->frame_list_lock, flags);
ltd = list_entry(skeltd->fl_list.prev, struct uhci_td, fl_list);
td->link = ltd->link;
mb();
ltd->link = cpu_to_le32(td->dma_handle);
list_add_tail(&td->fl_list, &skeltd->fl_list);
spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
}
/*
* We insert Isochronous transfers directly into the frame list at the
* beginning
* The layout looks as follows:
* frame list pointer -> iso td's (if any) ->
* periodic interrupt td (if frame 0) -> irq td's -> control qh -> bulk qh
*/
static void uhci_insert_td_frame_list(struct uhci_hcd *uhci, struct uhci_td *td, unsigned framenum)
{
unsigned long flags;
framenum %= UHCI_NUMFRAMES;
spin_lock_irqsave(&uhci->frame_list_lock, flags);
td->frame = framenum;
/* Is there a TD already mapped there? */
if (uhci->fl->frame_cpu[framenum]) {
struct uhci_td *ftd, *ltd;
ftd = uhci->fl->frame_cpu[framenum];
ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
list_add_tail(&td->fl_list, &ftd->fl_list);
td->link = ltd->link;
mb();
ltd->link = cpu_to_le32(td->dma_handle);
} else {
td->link = uhci->fl->frame[framenum];
mb();
uhci->fl->frame[framenum] = cpu_to_le32(td->dma_handle);
uhci->fl->frame_cpu[framenum] = td;
}
spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
}
static void uhci_remove_td(struct uhci_hcd *uhci, struct uhci_td *td)
{
unsigned long flags;
/* If it's not inserted, don't remove it */
spin_lock_irqsave(&uhci->frame_list_lock, flags);
if (td->frame == -1 && list_empty(&td->fl_list))
goto out;
if (td->frame != -1 && uhci->fl->frame_cpu[td->frame] == td) {
if (list_empty(&td->fl_list)) {
uhci->fl->frame[td->frame] = td->link;
uhci->fl->frame_cpu[td->frame] = NULL;
} else {
struct uhci_td *ntd;
ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
uhci->fl->frame[td->frame] = cpu_to_le32(ntd->dma_handle);
uhci->fl->frame_cpu[td->frame] = ntd;
}
} else {
struct uhci_td *ptd;
ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
ptd->link = td->link;
}
mb();
td->link = UHCI_PTR_TERM;
list_del_init(&td->fl_list);
td->frame = -1;
out:
spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
}
/*
* Inserts a td into qh list at the top.
*/
static void uhci_insert_tds_in_qh(struct uhci_qh *qh, struct urb *urb, int breadth)
{
struct list_head *tmp, *head;
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
struct uhci_td *td, *ptd;
if (list_empty(&urbp->td_list))
return;
head = &urbp->td_list;
tmp = head->next;
/* Ordering isn't important here yet since the QH hasn't been */
/* inserted into the schedule yet */
td = list_entry(tmp, struct uhci_td, list);
/* Add the first TD to the QH element pointer */
qh->element = cpu_to_le32(td->dma_handle) | (breadth ? 0 : UHCI_PTR_DEPTH);
ptd = td;
/* Then link the rest of the TD's */
tmp = tmp->next;
while (tmp != head) {
td = list_entry(tmp, struct uhci_td, list);
tmp = tmp->next;
ptd->link = cpu_to_le32(td->dma_handle) | (breadth ? 0 : UHCI_PTR_DEPTH);
ptd = td;
}
ptd->link = UHCI_PTR_TERM;
}
static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
{
/*
if (!list_empty(&td->list) || !list_empty(&td->fl_list))
dbg("td %p is still in URB list!", td);
*/
if (!list_empty(&td->list))
dbg("td %p is still in list!", td);
if (!list_empty(&td->fl_list))
dbg("td %p is still in fl_list!", td);
if (td->dev)
usb_put_dev(td->dev);
pci_pool_free(uhci->td_pool, td, td->dma_handle);
}
static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, struct usb_device *dev)
{
dma_addr_t dma_handle;
struct uhci_qh *qh;
qh = pci_pool_alloc(uhci->qh_pool, GFP_DMA | GFP_ATOMIC, &dma_handle);
if (!qh)
return NULL;
qh->dma_handle = dma_handle;
qh->element = UHCI_PTR_TERM;
qh->link = UHCI_PTR_TERM;
qh->dev = dev;
qh->urbp = NULL;
INIT_LIST_HEAD(&qh->list);
INIT_LIST_HEAD(&qh->remove_list);
usb_get_dev(dev);
return qh;
}
static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
if (!list_empty(&qh->list))
dbg("qh %p list not empty!", qh);
if (!list_empty(&qh->remove_list))
dbg("qh %p still in remove_list!", qh);
if (qh->dev)
usb_put_dev(qh->dev);
pci_pool_free(uhci->qh_pool, qh, qh->dma_handle);
}
/*
* MUST be called with uhci->frame_list_lock acquired
*/
static void _uhci_insert_qh(struct uhci_hcd *uhci, struct uhci_qh *skelqh, struct urb *urb)
{
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
struct list_head *head, *tmp;
struct uhci_qh *lqh;
/* Grab the last QH */
lqh = list_entry(skelqh->list.prev, struct uhci_qh, list);
if (lqh->urbp) {
head = &lqh->urbp->queue_list;
tmp = head->next;
while (head != tmp) {
struct urb_priv *turbp =
list_entry(tmp, struct urb_priv, queue_list);
tmp = tmp->next;
turbp->qh->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
}
}
head = &urbp->queue_list;
tmp = head->next;
while (head != tmp) {
struct urb_priv *turbp =
list_entry(tmp, struct urb_priv, queue_list);
tmp = tmp->next;
turbp->qh->link = lqh->link;
}
urbp->qh->link = lqh->link;
mb(); /* Ordering is important */
lqh->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
list_add_tail(&urbp->qh->list, &skelqh->list);
}
static void uhci_insert_qh(struct uhci_hcd *uhci, struct uhci_qh *skelqh, struct urb *urb)
{
unsigned long flags;
spin_lock_irqsave(&uhci->frame_list_lock, flags);
_uhci_insert_qh(uhci, skelqh, urb);
spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
}
static void uhci_remove_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
{
unsigned long flags;
struct uhci_qh *pqh;
if (!qh)
return;
qh->urbp = NULL;
/* Only go through the hoops if it's actually linked in */
spin_lock_irqsave(&uhci->frame_list_lock, flags);
if (!list_empty(&qh->list)) {
pqh = list_entry(qh->list.prev, struct uhci_qh, list);
if (pqh->urbp) {
struct list_head *head, *tmp;
head = &pqh->urbp->queue_list;
tmp = head->next;
while (head != tmp) {
struct urb_priv *turbp =
list_entry(tmp, struct urb_priv, queue_list);
tmp = tmp->next;
turbp->qh->link = qh->link;
}
}
pqh->link = qh->link;
mb();
qh->element = qh->link = UHCI_PTR_TERM;
list_del_init(&qh->list);
}
spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
spin_lock_irqsave(&uhci->qh_remove_list_lock, flags);
/* Check to see if the remove list is empty. Set the IOC bit */
/* to force an interrupt so we can remove the QH */
if (list_empty(&uhci->qh_remove_list))
uhci_set_next_interrupt(uhci);
list_add(&qh->remove_list, &uhci->qh_remove_list);
spin_unlock_irqrestore(&uhci->qh_remove_list_lock, flags);
}
static int uhci_fixup_toggle(struct urb *urb, unsigned int toggle)
{
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
struct list_head *head, *tmp;
head = &urbp->td_list;
tmp = head->next;
while (head != tmp) {
struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
tmp = tmp->next;
if (toggle)
td->token |= cpu_to_le32(TD_TOKEN_TOGGLE);
else
td->token &= ~cpu_to_le32(TD_TOKEN_TOGGLE);
toggle ^= 1;
}
return toggle;
}
/* This function will append one URB's QH to another URB's QH. This is for */
/* USB_QUEUE_BULK support for bulk transfers and soon implicitily for */
/* control transfers */
static void uhci_append_queued_urb(struct uhci_hcd *uhci, struct urb *eurb, struct urb *urb)
{
struct urb_priv *eurbp, *urbp, *furbp, *lurbp;
struct list_head *tmp;
struct uhci_td *lltd;
unsigned long flags;
eurbp = eurb->hcpriv;
urbp = urb->hcpriv;
spin_lock_irqsave(&uhci->frame_list_lock, flags);
/* Find the first URB in the queue */
if (eurbp->queued) {
struct list_head *head = &eurbp->queue_list;
tmp = head->next;
while (tmp != head) {
struct urb_priv *turbp =
list_entry(tmp, struct urb_priv, queue_list);
if (!turbp->queued)
break;
tmp = tmp->next;
}
} else
tmp = &eurbp->queue_list;
furbp = list_entry(tmp, struct urb_priv, queue_list);
lurbp = list_entry(furbp->queue_list.prev, struct urb_priv, queue_list);
lltd = list_entry(lurbp->td_list.prev, struct uhci_td, list);
usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe),
uhci_fixup_toggle(urb, uhci_toggle(td_token(lltd)) ^ 1));
/* All qh's in the queue need to link to the next queue */
urbp->qh->link = eurbp->qh->link;
mb(); /* Make sure we flush everything */
/* Only support bulk right now, so no depth */
lltd->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
list_add_tail(&urbp->queue_list, &furbp->queue_list);
urbp->queued = 1;
spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
}
static void uhci_delete_queued_urb(struct uhci_hcd *uhci, struct urb *urb)
{
struct urb_priv *urbp, *nurbp;
struct list_head *head, *tmp;
struct urb_priv *purbp;
struct uhci_td *pltd;
unsigned int toggle;
unsigned long flags;
urbp = urb->hcpriv;
spin_lock_irqsave(&uhci->frame_list_lock, flags);
if (list_empty(&urbp->queue_list))
goto out;
nurbp = list_entry(urbp->queue_list.next, struct urb_priv, queue_list);
/* Fix up the toggle for the next URB's */
if (!urbp->queued)
/* We set the toggle when we unlink */
toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe));
else {
/* If we're in the middle of the queue, grab the toggle */
/* from the TD previous to us */
purbp = list_entry(urbp->queue_list.prev, struct urb_priv,
queue_list);
pltd = list_entry(purbp->td_list.prev, struct uhci_td, list);
toggle = uhci_toggle(td_token(pltd)) ^ 1;
}
head = &urbp->queue_list;
tmp = head->next;
while (head != tmp) {
struct urb_priv *turbp;
turbp = list_entry(tmp, struct urb_priv, queue_list);
tmp = tmp->next;
if (!turbp->queued)
break;
toggle = uhci_fixup_toggle(turbp->urb, toggle);
}
usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
usb_pipeout(urb->pipe), toggle);
if (!urbp->queued) {
nurbp->queued = 0;
_uhci_insert_qh(uhci, uhci->skel_bulk_qh, nurbp->urb);
} else {
/* We're somewhere in the middle (or end). A bit trickier */
/* than the head scenario */
purbp = list_entry(urbp->queue_list.prev, struct urb_priv,
queue_list);
pltd = list_entry(purbp->td_list.prev, struct uhci_td, list);
if (nurbp->queued)
pltd->link = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
else
/* The next URB happens to be the beginning, so */
/* we're the last, end the chain */
pltd->link = UHCI_PTR_TERM;
}
list_del_init(&urbp->queue_list);
out:
spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
}
static struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
{
struct urb_priv *urbp;
urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC);
if (!urbp) {
err("uhci_alloc_urb_priv: couldn't allocate memory for urb_priv\n");
return NULL;
}
memset((void *)urbp, 0, sizeof(*urbp));
urbp->inserttime = jiffies;
urbp->fsbrtime = jiffies;
urbp->urb = urb;
urbp->dev = urb->dev;
INIT_LIST_HEAD(&urbp->td_list);
INIT_LIST_HEAD(&urbp->queue_list);
INIT_LIST_HEAD(&urbp->complete_list);
INIT_LIST_HEAD(&urbp->urb_list);
list_add_tail(&urbp->urb_list, &uhci->urb_list);
urb->hcpriv = urbp;
if (urb->transfer_buffer_length) {
urbp->transfer_buffer_dma_handle = pci_map_single(uhci->dev,
urb->transfer_buffer, urb->transfer_buffer_length,
usb_pipein(urb->pipe) ? PCI_DMA_FROMDEVICE :
PCI_DMA_TODEVICE);
if (!urbp->transfer_buffer_dma_handle)
return NULL;
}
if (usb_pipetype(urb->pipe) == PIPE_CONTROL && urb->setup_packet) {
urbp->setup_packet_dma_handle = pci_map_single(uhci->dev,
urb->setup_packet, sizeof(struct usb_ctrlrequest),
PCI_DMA_TODEVICE);
if (!urbp->setup_packet_dma_handle)
return NULL;
}
return urbp;
}
/*
* MUST be called with urb->lock acquired
*/
static void uhci_add_td_to_urb(struct urb *urb, struct uhci_td *td)
{
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
td->urb = urb;
list_add_tail(&td->list, &urbp->td_list);
}
/*
* MUST be called with urb->lock acquired
*/
static void uhci_remove_td_from_urb(struct uhci_td *td)
{
if (list_empty(&td->list))
return;
list_del_init(&td->list);
td->urb = NULL;
}
/*
* MUST be called with urb->lock acquired
*/
static void uhci_destroy_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
{
struct list_head *head, *tmp;
struct urb_priv *urbp;
urbp = (struct urb_priv *)urb->hcpriv;
if (!urbp)
return;
if (!list_empty(&urbp->urb_list))
warn("uhci_destroy_urb_priv: urb %p still on uhci->urb_list or uhci->remove_list", urb);
if (!list_empty(&urbp->complete_list))
warn("uhci_destroy_urb_priv: urb %p still on uhci->complete_list", urb);
head = &urbp->td_list;
tmp = head->next;
while (tmp != head) {
struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
tmp = tmp->next;
uhci_remove_td_from_urb(td);
uhci_remove_td(uhci, td);
uhci_free_td(uhci, td);
}
if (urbp->setup_packet_dma_handle) {
pci_unmap_single(uhci->dev, urbp->setup_packet_dma_handle,
sizeof(struct usb_ctrlrequest), PCI_DMA_TODEVICE);
urbp->setup_packet_dma_handle = 0;
}
if (urbp->transfer_buffer_dma_handle) {
pci_unmap_single(uhci->dev, urbp->transfer_buffer_dma_handle,
urb->transfer_buffer_length, usb_pipein(urb->pipe) ?
PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
urbp->transfer_buffer_dma_handle = 0;
}
urb->hcpriv = NULL;
kmem_cache_free(uhci_up_cachep, urbp);
}
static void uhci_inc_fsbr(struct uhci_hcd *uhci, struct urb *urb)
{
unsigned long flags;
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
spin_lock_irqsave(&uhci->frame_list_lock, flags);
if ((!(urb->transfer_flags & USB_NO_FSBR)) && !urbp->fsbr) {
urbp->fsbr = 1;
if (!uhci->fsbr++ && !uhci->fsbrtimeout)
uhci->skel_term_qh->link = cpu_to_le32(uhci->skel_hs_control_qh->dma_handle) | UHCI_PTR_QH;
}
spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
}
static void uhci_dec_fsbr(struct uhci_hcd *uhci, struct urb *urb)
{
unsigned long flags;
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
spin_lock_irqsave(&uhci->frame_list_lock, flags);
if ((!(urb->transfer_flags & USB_NO_FSBR)) && urbp->fsbr) {
urbp->fsbr = 0;
if (!--uhci->fsbr)
uhci->fsbrtimeout = jiffies + FSBR_DELAY;
}
spin_unlock_irqrestore(&uhci->frame_list_lock, flags);
}
/*
* Map status to standard result codes
*
* <status> is (td->status & 0xFE0000) [a.k.a. uhci_status_bits(td->status)]
* <dir_out> is True for output TDs and False for input TDs.
*/
static int uhci_map_status(int status, int dir_out)
{
if (!status)
return 0;
if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */
return -EPROTO;
if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */
if (dir_out)
return -ETIMEDOUT;
else
return -EILSEQ;
}
if (status & TD_CTRL_NAK) /* NAK */
return -ETIMEDOUT;
if (status & TD_CTRL_BABBLE) /* Babble */
return -EOVERFLOW;
if (status & TD_CTRL_DBUFERR) /* Buffer error */
return -ENOSR;
if (status & TD_CTRL_STALLED) /* Stalled */
return -EPIPE;
if (status & TD_CTRL_ACTIVE) /* Active */
return 0;
return -EINVAL;
}
/*
* Control transfers
*/
static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb)
{
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
struct uhci_td *td;
struct uhci_qh *qh;
unsigned long destination, status;
int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
int len = urb->transfer_buffer_length;
dma_addr_t data = urbp->transfer_buffer_dma_handle;
/* The "pipe" thing contains the destination in bits 8--18 */
destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
/* 3 errors */
status = TD_CTRL_ACTIVE | uhci_maxerr(3);
if (urb->dev->speed == USB_SPEED_LOW)
status |= TD_CTRL_LS;
/*
* Build the TD for the control request
*/
td = uhci_alloc_td(uhci, urb->dev);
if (!td)
return -ENOMEM;
uhci_add_td_to_urb(urb, td);
uhci_fill_td(td, status, destination | uhci_explen(7),
urbp->setup_packet_dma_handle);
/*
* If direction is "send", change the frame from SETUP (0x2D)
* to OUT (0xE1). Else change it from SETUP to IN (0x69).
*/
destination ^= (USB_PID_SETUP ^ usb_packetid(urb->pipe));
if (!(urb->transfer_flags & USB_DISABLE_SPD))
status |= TD_CTRL_SPD;
/*
* Build the DATA TD's
*/
while (len > 0) {
int pktsze = len;
if (pktsze > maxsze)
pktsze = maxsze;
td = uhci_alloc_td(uhci, urb->dev);
if (!td)
return -ENOMEM;
/* Alternate Data0/1 (start with Data1) */
destination ^= TD_TOKEN_TOGGLE;
uhci_add_td_to_urb(urb, td);
uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1),
data);
data += pktsze;
len -= pktsze;
}
/*
* Build the final TD for control status
*/
td = uhci_alloc_td(uhci, urb->dev);
if (!td)
return -ENOMEM;
/*
* It's IN if the pipe is an output pipe or we're not expecting
* data back.
*/
destination &= ~TD_TOKEN_PID_MASK;
if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length)
destination |= USB_PID_IN;
else
destination |= USB_PID_OUT;
destination |= TD_TOKEN_TOGGLE; /* End in Data1 */
status &= ~TD_CTRL_SPD;
uhci_add_td_to_urb(urb, td);
uhci_fill_td(td, status | TD_CTRL_IOC,
destination | uhci_explen(UHCI_NULL_DATA_SIZE), 0);
qh = uhci_alloc_qh(uhci, urb->dev);
if (!qh)
return -ENOMEM;
urbp->qh = qh;
qh->urbp = urbp;
/* Low speed or small transfers gets a different queue and treatment */
if (urb->dev->speed == USB_SPEED_LOW) {
uhci_insert_tds_in_qh(qh, urb, 0);
uhci_insert_qh(uhci, uhci->skel_ls_control_qh, urb);
} else {
uhci_insert_tds_in_qh(qh, urb, 1);
uhci_insert_qh(uhci, uhci->skel_hs_control_qh, urb);
uhci_inc_fsbr(uhci, urb);
}
return -EINPROGRESS;
}
static int usb_control_retrigger_status(struct uhci_hcd *uhci, struct urb *urb)
{
struct list_head *tmp, *head;
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
urbp->short_control_packet = 1;
/* Create a new QH to avoid pointer overwriting problems */
uhci_remove_qh(uhci, urbp->qh);
/* Delete all of the TD's except for the status TD at the end */
head = &urbp->td_list;
tmp = head->next;
while (tmp != head && tmp->next != head) {
struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
tmp = tmp->next;
uhci_remove_td_from_urb(td);
uhci_remove_td(uhci, td);
uhci_free_td(uhci, td);
}
urbp->qh = uhci_alloc_qh(uhci, urb->dev);
if (!urbp->qh) {
err("unable to allocate new QH for control retrigger");
return -ENOMEM;
}
urbp->qh->urbp = urbp;
/* One TD, who cares about Breadth first? */
uhci_insert_tds_in_qh(urbp->qh, urb, 0);
/* Low speed or small transfers gets a different queue and treatment */
if (urb->dev->speed == USB_SPEED_LOW)
uhci_insert_qh(uhci, uhci->skel_ls_control_qh, urb);
else
uhci_insert_qh(uhci, uhci->skel_hs_control_qh, urb);
return -EINPROGRESS;
}
static int uhci_result_control(struct uhci_hcd *uhci, struct urb *urb)
{
struct list_head *tmp, *head;
struct urb_priv *urbp = urb->hcpriv;
struct uhci_td *td;
unsigned int status;
int ret = 0;
if (list_empty(&urbp->td_list))
return -EINVAL;
head = &urbp->td_list;
if (urbp->short_control_packet) {
tmp = head->prev;
goto status_phase;
}
tmp = head->next;
td = list_entry(tmp, struct uhci_td, list);
/* The first TD is the SETUP phase, check the status, but skip */
/* the count */
status = uhci_status_bits(td_status(td));
if (status & TD_CTRL_ACTIVE)
return -EINPROGRESS;
if (status)
goto td_error;
urb->actual_length = 0;
/* The rest of the TD's (but the last) are data */
tmp = tmp->next;
while (tmp != head && tmp->next != head) {
td = list_entry(tmp, struct uhci_td, list);
tmp = tmp->next;
status = uhci_status_bits(td_status(td));
if (status & TD_CTRL_ACTIVE)
return -EINPROGRESS;
urb->actual_length += uhci_actual_length(td_status(td));
if (status)
goto td_error;
/* Check to see if we received a short packet */
if (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td))) {
if (urb->transfer_flags & USB_DISABLE_SPD) {
ret = -EREMOTEIO;
goto err;
}
if (uhci_packetid(td_token(td)) == USB_PID_IN)
return usb_control_retrigger_status(uhci, urb);
else
return 0;
}
}
status_phase:
td = list_entry(tmp, struct uhci_td, list);
/* Control status phase */
status = td_status(td);
#ifdef I_HAVE_BUGGY_APC_BACKUPS
/* APC BackUPS Pro kludge */
/* It tries to send all of the descriptor instead of the amount */
/* we requested */
if (status & TD_CTRL_IOC && /* IOC is masked out by uhci_status_bits */
status & TD_CTRL_ACTIVE &&
status & TD_CTRL_NAK)
return 0;
#endif
if (status & TD_CTRL_ACTIVE)
return -EINPROGRESS;
if (uhci_status_bits(status))
goto td_error;
return 0;
td_error:
ret = uhci_map_status(status, uhci_packetout(td_token(td)));
if (ret == -EPIPE)
/* endpoint has stalled - mark it halted */
usb_endpoint_halt(urb->dev, uhci_endpoint(td_token(td)),
uhci_packetout(td_token(td)));
err:
if ((debug == 1 && ret != -EPIPE) || debug > 1) {
/* Some debugging code */
dbg("uhci_result_control() failed with status %x", status);
if (errbuf) {
/* Print the chain for debugging purposes */
uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
lprintk(errbuf);
}
}
return ret;
}
/*
* Interrupt transfers
*/
static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb)
{
struct uhci_td *td;
unsigned long destination, status;
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
if (urb->transfer_buffer_length > usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)))
return -EINVAL;
/* The "pipe" thing contains the destination in bits 8--18 */
destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
status = TD_CTRL_ACTIVE | TD_CTRL_IOC;
if (urb->dev->speed == USB_SPEED_LOW)
status |= TD_CTRL_LS;
td = uhci_alloc_td(uhci, urb->dev);
if (!td)
return -ENOMEM;
destination |= (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT);
destination |= uhci_explen(urb->transfer_buffer_length - 1);
usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe));
uhci_add_td_to_urb(urb, td);
uhci_fill_td(td, status, destination, urbp->transfer_buffer_dma_handle);
uhci_insert_td(uhci, uhci->skeltd[__interval_to_skel(urb->interval)], td);
return -EINPROGRESS;
}
static int uhci_result_interrupt(struct uhci_hcd *uhci, struct urb *urb)
{
struct list_head *tmp, *head;
struct urb_priv *urbp = urb->hcpriv;
struct uhci_td *td;
unsigned int status;
int ret = 0;
urb->actual_length = 0;
head = &urbp->td_list;
tmp = head->next;
while (tmp != head) {
td = list_entry(tmp, struct uhci_td, list);
tmp = tmp->next;
status = uhci_status_bits(td_status(td));
if (status & TD_CTRL_ACTIVE)
return -EINPROGRESS;
urb->actual_length += uhci_actual_length(td_status(td));
if (status)
goto td_error;
if (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td))) {
if (urb->transfer_flags & USB_DISABLE_SPD) {
ret = -EREMOTEIO;
goto err;
} else
return 0;
}
}
return 0;
td_error:
ret = uhci_map_status(status, uhci_packetout(td_token(td)));
if (ret == -EPIPE)
/* endpoint has stalled - mark it halted */
usb_endpoint_halt(urb->dev, uhci_endpoint(td_token(td)),
uhci_packetout(td_token(td)));
err:
if ((debug == 1 && ret != -EPIPE) || debug > 1) {
/* Some debugging code */
dbg("uhci_result_interrupt/bulk() failed with status %x",
status);
if (errbuf) {
/* Print the chain for debugging purposes */
if (urbp->qh)
uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
else
uhci_show_td(td, errbuf, ERRBUF_LEN, 0);
lprintk(errbuf);
}
}
return ret;
}
static void uhci_reset_interrupt(struct uhci_hcd *uhci, struct urb *urb)
{
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
struct uhci_td *td;
unsigned long flags;
spin_lock_irqsave(&urb->lock, flags);
td = list_entry(urbp->td_list.next, struct uhci_td, list);
td->status = (td->status & cpu_to_le32(0x2F000000)) | cpu_to_le32(TD_CTRL_ACTIVE | TD_CTRL_IOC);
td->token &= ~cpu_to_le32(TD_TOKEN_TOGGLE);
td->token |= cpu_to_le32(usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT);
usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe));
urb->status = -EINPROGRESS;
spin_unlock_irqrestore(&urb->lock, flags);
}
/*
* Bulk transfers
*/
static int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
{
struct uhci_td *td;
struct uhci_qh *qh;
unsigned long destination, status;
int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
int len = urb->transfer_buffer_length;
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
dma_addr_t data = urbp->transfer_buffer_dma_handle;
if (len < 0)
return -EINVAL;
/* Can't have low speed bulk transfers */
if (urb->dev->speed == USB_SPEED_LOW)
return -EINVAL;
/* The "pipe" thing contains the destination in bits 8--18 */
destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
/* 3 errors */
status = TD_CTRL_ACTIVE | uhci_maxerr(3);
if (!(urb->transfer_flags & USB_DISABLE_SPD))
status |= TD_CTRL_SPD;
/*
* Build the DATA TD's
*/
do { /* Allow zero length packets */
int pktsze = len;
if (pktsze > maxsze)
pktsze = maxsze;
td = uhci_alloc_td(uhci, urb->dev);
if (!td)
return -ENOMEM;
uhci_add_td_to_urb(urb, td);
uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1) |
(usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
data);
data += pktsze;
len -= maxsze;
usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
usb_pipeout(urb->pipe));
} while (len > 0);
/*
* USB_ZERO_PACKET means adding a 0-length packet, if
* direction is OUT and the transfer_length was an
* exact multiple of maxsze, hence
* (len = transfer_length - N * maxsze) == 0
* however, if transfer_length == 0, the zero packet
* was already prepared above.
*/
if (usb_pipeout(urb->pipe) && (urb->transfer_flags & USB_ZERO_PACKET) &&
!len && urb->transfer_buffer_length) {
td = uhci_alloc_td(uhci, urb->dev);
if (!td)
return -ENOMEM;
uhci_add_td_to_urb(urb, td);
uhci_fill_td(td, status, destination | uhci_explen(UHCI_NULL_DATA_SIZE) |
(usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
data);
usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
usb_pipeout(urb->pipe));
}
/* Set the flag on the last packet */
td->status |= cpu_to_le32(TD_CTRL_IOC);
qh = uhci_alloc_qh(uhci, urb->dev);
if (!qh)
return -ENOMEM;
urbp->qh = qh;
qh->urbp = urbp;
/* Always assume breadth first */
uhci_insert_tds_in_qh(qh, urb, 1);
if (urb->transfer_flags & USB_QUEUE_BULK && eurb)
uhci_append_queued_urb(uhci, eurb, urb);
else
uhci_insert_qh(uhci, uhci->skel_bulk_qh, urb);
uhci_inc_fsbr(uhci, urb);
return -EINPROGRESS;
}
/* We can use the result interrupt since they're identical */
#define uhci_result_bulk uhci_result_interrupt
/*
* Isochronous transfers
*/
static int isochronous_find_limits(struct uhci_hcd *uhci, struct urb *urb, unsigned int *start, unsigned int *end)
{
struct urb *last_urb = NULL;
struct list_head *tmp, *head;
int ret = 0;
head = &uhci->urb_list;
tmp = head->next;
while (tmp != head) {
struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
struct urb *u = up->urb;
tmp = tmp->next;
/* look for pending URB's with identical pipe handle */
if ((urb->pipe == u->pipe) && (urb->dev == u->dev) &&
(u->status == -EINPROGRESS) && (u != urb)) {
if (!last_urb)
*start = u->start_frame;
last_urb = u;
}
}
if (last_urb) {
*end = (last_urb->start_frame + last_urb->number_of_packets) & 1023;
ret = 0;
} else
ret = -1; /* no previous urb found */
return ret;
}
static int isochronous_find_start(struct uhci_hcd *uhci, struct urb *urb)
{
int limits;
unsigned int start = 0, end = 0;
if (urb->number_of_packets > 900) /* 900? Why? */
return -EFBIG;
limits = isochronous_find_limits(uhci, urb, &start, &end);
if (urb->transfer_flags & USB_ISO_ASAP) {
if (limits) {
int curframe;
curframe = uhci_get_current_frame_number(uhci) % UHCI_NUMFRAMES;
urb->start_frame = (curframe + 10) % UHCI_NUMFRAMES;
} else
urb->start_frame = end;
} else {
urb->start_frame %= UHCI_NUMFRAMES;
/* FIXME: Sanity check */
}
return 0;
}
/*
* Isochronous transfers
*/
static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb)
{
struct uhci_td *td;
int i, ret, frame;
int status, destination;
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
ret = isochronous_find_start(uhci, urb);
if (ret)
return ret;
frame = urb->start_frame;
for (i = 0; i < urb->number_of_packets; i++, frame += urb->interval) {
if (!urb->iso_frame_desc[i].length)
continue;
td = uhci_alloc_td(uhci, urb->dev);
if (!td)
return -ENOMEM;
uhci_add_td_to_urb(urb, td);
uhci_fill_td(td, status, destination | uhci_explen(urb->iso_frame_desc[i].length - 1),
urbp->transfer_buffer_dma_handle + urb->iso_frame_desc[i].offset);
if (i + 1 >= urb->number_of_packets)
td->status |= cpu_to_le32(TD_CTRL_IOC);
uhci_insert_td_frame_list(uhci, td, frame);
}
return -EINPROGRESS;
}
static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
{
struct list_head *tmp, *head;
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
int status;
int i, ret = 0;
urb->actual_length = 0;
i = 0;
head = &urbp->td_list;
tmp = head->next;
while (tmp != head) {
struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
int actlength;
tmp = tmp->next;
if (td_status(td) & TD_CTRL_ACTIVE)
return -EINPROGRESS;
actlength = uhci_actual_length(td_status(td));
urb->iso_frame_desc[i].actual_length = actlength;
urb->actual_length += actlength;
status = uhci_map_status(uhci_status_bits(td_status(td)), usb_pipeout(urb->pipe));
urb->iso_frame_desc[i].status = status;
if (status) {
urb->error_count++;
ret = status;
}
i++;
}
return ret;
}
/*
* MUST be called with uhci->urb_list_lock acquired
*/
static struct urb *uhci_find_urb_ep(struct uhci_hcd *uhci, struct urb *urb)
{
struct list_head *tmp, *head;
/* We don't match Isoc transfers since they are special */
if (usb_pipeisoc(urb->pipe))
return NULL;
head = &uhci->urb_list;
tmp = head->next;
while (tmp != head) {
struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
struct urb *u = up->urb;
tmp = tmp->next;
if (u->dev == urb->dev && u->status == -EINPROGRESS) {
/* For control, ignore the direction */
if (usb_pipecontrol(urb->pipe) &&
(u->pipe & ~USB_DIR_IN) == (urb->pipe & ~USB_DIR_IN))
return u;
else if (u->pipe == urb->pipe)
return u;
}
}
return NULL;
}
static int uhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, int mem_flags)
{
int ret = -EINVAL;
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
unsigned long flags;
struct urb *eurb;
int bustime;
spin_lock_irqsave(&uhci->urb_list_lock, flags);
eurb = uhci_find_urb_ep(uhci, urb);
if (eurb && !(urb->transfer_flags & USB_QUEUE_BULK)) {
spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
return -ENXIO;
}
if (!uhci_alloc_urb_priv(uhci, urb)) {
spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
return -ENOMEM;
}
switch (usb_pipetype(urb->pipe)) {
case PIPE_CONTROL:
ret = uhci_submit_control(uhci, urb);
break;
case PIPE_INTERRUPT:
if (urb->bandwidth == 0) { /* not yet checked/allocated */
bustime = usb_check_bandwidth(urb->dev, urb);
if (bustime < 0)
ret = bustime;
else {
ret = uhci_submit_interrupt(uhci, urb);
if (ret == -EINPROGRESS)
usb_claim_bandwidth(urb->dev, urb, bustime, 0);
}
} else /* bandwidth is already set */
ret = uhci_submit_interrupt(uhci, urb);
break;
case PIPE_BULK:
ret = uhci_submit_bulk(uhci, urb, eurb);
break;
case PIPE_ISOCHRONOUS:
if (urb->bandwidth == 0) { /* not yet checked/allocated */
if (urb->number_of_packets <= 0) {
ret = -EINVAL;
break;
}
bustime = usb_check_bandwidth(urb->dev, urb);
if (bustime < 0) {
ret = bustime;
break;
}
ret = uhci_submit_isochronous(uhci, urb);
if (ret == -EINPROGRESS)
usb_claim_bandwidth(urb->dev, urb, bustime, 1);
} else /* bandwidth is already set */
ret = uhci_submit_isochronous(uhci, urb);
break;
}
spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
return 0;
}
/*
* Return the result of a transfer
*
* MUST be called with urb_list_lock acquired
*/
static void uhci_transfer_result(struct uhci_hcd *uhci, struct urb *urb)
{
int ret = -EINVAL;
unsigned long flags;
struct urb_priv *urbp;
spin_lock_irqsave(&urb->lock, flags);
urbp = (struct urb_priv *)urb->hcpriv;
if (urb->status != -EINPROGRESS) {
info("uhci_transfer_result: called for URB %p not in flight?", urb);
goto out;
}
switch (usb_pipetype(urb->pipe)) {
case PIPE_CONTROL:
ret = uhci_result_control(uhci, urb);
break;
case PIPE_INTERRUPT:
ret = uhci_result_interrupt(uhci, urb);
break;
case PIPE_BULK:
ret = uhci_result_bulk(uhci, urb);
break;
case PIPE_ISOCHRONOUS:
ret = uhci_result_isochronous(uhci, urb);
break;
}
urbp->status = ret;
if (ret == -EINPROGRESS)
goto out;
switch (usb_pipetype(urb->pipe)) {
case PIPE_CONTROL:
case PIPE_BULK:
case PIPE_ISOCHRONOUS:
/* Release bandwidth for Interrupt or Isoc. transfers */
/* Spinlock needed ? */
if (urb->bandwidth)
usb_release_bandwidth(urb->dev, urb, 1);
uhci_unlink_generic(uhci, urb);
break;
case PIPE_INTERRUPT:
/* Interrupts are an exception */
if (urb->interval)
goto out_complete;
/* Release bandwidth for Interrupt or Isoc. transfers */
/* Spinlock needed ? */
if (urb->bandwidth)
usb_release_bandwidth(urb->dev, urb, 0);
uhci_unlink_generic(uhci, urb);
break;
default:
info("uhci_transfer_result: unknown pipe type %d for urb %p\n",
usb_pipetype(urb->pipe), urb);
}
/* Remove it from uhci->urb_list */
list_del_init(&urbp->urb_list);
out_complete:
uhci_add_complete(uhci, urb);
out:
spin_unlock_irqrestore(&urb->lock, flags);
}
/*
* MUST be called with urb->lock acquired
*/
static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb)
{
struct list_head *head, *tmp;
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
int prevactive = 1;
/* We can get called when urbp allocation fails, so check */
if (!urbp)
return;
uhci_dec_fsbr(uhci, urb); /* Safe since it checks */
/*
* Now we need to find out what the last successful toggle was
* so we can update the local data toggle for the next transfer
*
* There's 3 way's the last successful completed TD is found:
*
* 1) The TD is NOT active and the actual length < expected length
* 2) The TD is NOT active and it's the last TD in the chain
* 3) The TD is active and the previous TD is NOT active
*
* Control and Isochronous ignore the toggle, so this is safe
* for all types
*/
head = &urbp->td_list;
tmp = head->next;
while (tmp != head) {
struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
tmp = tmp->next;
if (!(td_status(td) & TD_CTRL_ACTIVE) &&
(uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td)) ||
tmp == head))
usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
uhci_packetout(td_token(td)),
uhci_toggle(td_token(td)) ^ 1);
else if ((td_status(td) & TD_CTRL_ACTIVE) && !prevactive)
usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
uhci_packetout(td_token(td)),
uhci_toggle(td_token(td)));
prevactive = td_status(td) & TD_CTRL_ACTIVE;
}
uhci_delete_queued_urb(uhci, urb);
/* The interrupt loop will reclaim the QH's */
uhci_remove_qh(uhci, urbp->qh);
urbp->qh = NULL;
}
static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
unsigned long flags;
struct urb_priv *urbp = urb->hcpriv;
spin_lock_irqsave(&uhci->urb_list_lock, flags);
list_del_init(&urbp->urb_list);
uhci_unlink_generic(uhci, urb);
if (urb->transfer_flags & USB_ASYNC_UNLINK) {
urbp->status = urb->status = -ECONNABORTED;
spin_lock(&uhci->urb_remove_list_lock);
/* If we're the first, set the next interrupt bit */
if (list_empty(&uhci->urb_remove_list))
uhci_set_next_interrupt(uhci);
list_add(&urbp->urb_list, &uhci->urb_remove_list);
spin_unlock(&uhci->urb_remove_list_lock);
spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
} else {
urb->status = -ENOENT;
spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
if (in_interrupt()) { /* wait at least 1 frame */
static int errorcount = 10;
if (errorcount--)
dbg("uhci_urb_dequeue called from interrupt for urb %p", urb);
udelay(1000);
} else
schedule_timeout(1+1*HZ/1000);
uhci_finish_urb(hcd, urb);
}
return 0;
}
static int uhci_fsbr_timeout(struct uhci_hcd *uhci, struct urb *urb)
{
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
struct list_head *head, *tmp;
int count = 0;
uhci_dec_fsbr(uhci, urb);
urbp->fsbr_timeout = 1;
/*
* Ideally we would want to fix qh->element as well, but it's
* read/write by the HC, so that can introduce a race. It's not
* really worth the hassle
*/
head = &urbp->td_list;
tmp = head->next;
while (tmp != head) {
struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
tmp = tmp->next;
/*
* Make sure we don't do the last one (since it'll have the
* TERM bit set) as well as we skip every so many TD's to
* make sure it doesn't hog the bandwidth
*/
if (tmp != head && (count % DEPTH_INTERVAL) == (DEPTH_INTERVAL - 1))
td->link |= UHCI_PTR_DEPTH;
count++;
}
return 0;
}
/*
* uhci_get_current_frame_number()
*
* returns the current frame number for a USB bus/controller.
*/
static int uhci_get_current_frame_number(struct uhci_hcd *uhci)
{
return inw(uhci->io_addr + USBFRNUM);
}
static int init_stall_timer(struct usb_hcd *hcd);
static void stall_callback(unsigned long ptr)
{
struct usb_hcd *hcd = (struct usb_hcd *)ptr;
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
struct list_head list, *tmp, *head;
unsigned long flags;
INIT_LIST_HEAD(&list);
spin_lock_irqsave(&uhci->urb_list_lock, flags);
head = &uhci->urb_list;
tmp = head->next;
while (tmp != head) {
struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
struct urb *u = up->urb;
tmp = tmp->next;
spin_lock(&u->lock);
/* Check if the FSBR timed out */
if (up->fsbr && !up->fsbr_timeout && time_after_eq(jiffies, up->fsbrtime + IDLE_TIMEOUT))
uhci_fsbr_timeout(uhci, u);
/* Check if the URB timed out */
if (u->timeout && time_after_eq(jiffies, up->inserttime + u->timeout)) {
list_del(&up->urb_list);
list_add_tail(&up->urb_list, &list);
}
spin_unlock(&u->lock);
}
spin_unlock_irqrestore(&uhci->urb_list_lock, flags);
head = &list;
tmp = head->next;
while (tmp != head) {
struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
struct urb *u = up->urb;
tmp = tmp->next;
u->transfer_flags |= USB_ASYNC_UNLINK | USB_TIMEOUT_KILLED;
uhci_urb_dequeue(hcd, u);
}
/* Really disable FSBR */
if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies, uhci->fsbrtimeout)) {
uhci->fsbrtimeout = 0;
uhci->skel_term_qh->link = UHCI_PTR_TERM;
}
/* enter global suspend if nothing connected */
if (!uhci->is_suspended && !ports_active(uhci))
suspend_hc(uhci);
init_stall_timer(hcd);
}
static int init_stall_timer(struct usb_hcd *hcd)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
init_timer(&uhci->stall_timer);
uhci->stall_timer.function = stall_callback;
uhci->stall_timer.data = (unsigned long)hcd;
uhci->stall_timer.expires = jiffies + (HZ / 10);
add_timer(&uhci->stall_timer);
return 0;
}
static void uhci_free_pending_qhs(struct uhci_hcd *uhci)
{
struct list_head *tmp, *head;
unsigned long flags;
spin_lock_irqsave(&uhci->qh_remove_list_lock, flags);
head = &uhci->qh_remove_list;
tmp = head->next;
while (tmp != head) {
struct uhci_qh *qh = list_entry(tmp, struct uhci_qh, remove_list);
tmp = tmp->next;
list_del_init(&qh->remove_list);
uhci_free_qh(uhci, qh);
}
spin_unlock_irqrestore(&uhci->qh_remove_list_lock, flags);
}
static void uhci_finish_urb(struct usb_hcd *hcd, struct urb *urb)
{
struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
struct usb_device *dev = urb->dev;
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
int killed, resubmit_interrupt, status;
unsigned long flags;
spin_lock_irqsave(&urb->lock, flags);
killed = (urb->status == -ENOENT || urb->status == -ECONNABORTED ||
urb->status == -ECONNRESET);
resubmit_interrupt = (usb_pipetype(urb->pipe) == PIPE_INTERRUPT &&
urb->interval);
if (urbp->transfer_buffer_dma_handle)
pci_dma_sync_single(uhci->dev, urbp->transfer_buffer_dma_handle,
urb->transfer_buffer_length, usb_pipein(urb->pipe) ?
PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
if (urbp->setup_packet_dma_handle)
pci_dma_sync_single(uhci->dev, urbp->setup_packet_dma_handle,
sizeof(struct usb_ctrlrequest), PCI_DMA_TODEVICE);
status = urbp->status;
if (!resubmit_interrupt || killed)
/* We don't need urb_priv anymore */
uhci_destroy_urb_priv(uhci, urb);
if (!killed)
urb->status = status;
spin_unlock_irqrestore(&urb->lock, flags);
if (resubmit_interrupt)
urb->complete(urb);
else
usb_hcd_giveback_urb(hcd, urb);
if (resubmit_interrupt)
/* Recheck the status. The completion handler may have */
/* unlinked the resubmitting interrupt URB */
killed = (urb->status == -ENOENT ||
urb->status == -ECONNABORTED ||
urb->status == -ECONNRESET);
if (resubmit_interrupt && !killed) {
urb->dev = dev;
uhci_reset_interrupt(uhci, urb);
}
}
static void uhci_finish_completion(struct usb_hcd *hcd)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
struct list_head *tmp, *head;
unsigned long flags;
spin_lock_irqsave(&uhci->complete_list_lock, flags);
head = &uhci->complete_list;
tmp = head->next;
while (tmp != head) {
struct urb_priv *urbp = list_entry(tmp, struct urb_priv, complete_list);
struct urb *urb = urbp->urb;
list_del_init(&urbp->complete_list);
spin_unlock_irqrestore(&uhci->complete_list_lock, flags);
uhci_finish_urb(hcd, urb);
spin_lock_irqsave(&uhci->complete_list_lock, flags);
head = &uhci->complete_list;
tmp = head->next;
}
spin_unlock_irqrestore(&uhci->complete_list_lock, flags);
}
static void uhci_remove_pending_qhs(struct uhci_hcd *uhci)
{
struct list_head *tmp, *head;
unsigned long flags;
spin_lock_irqsave(&uhci->urb_remove_list_lock, flags);
head = &uhci->urb_remove_list;
tmp = head->next;
while (tmp != head) {
struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list);
struct urb *urb = urbp->urb;
tmp = tmp->next;
list_del_init(&urbp->urb_list);
urbp->status = urb->status = -ECONNRESET;
uhci_add_complete(uhci, urb);
}
spin_unlock_irqrestore(&uhci->urb_remove_list_lock, flags);
}
static void uhci_irq(struct usb_hcd *hcd)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
unsigned int io_addr = uhci->io_addr;
unsigned short status;
struct list_head *tmp, *head;
/*
* Read the interrupt status, and write it back to clear the
* interrupt cause
*/
status = inw(io_addr + USBSTS);
if (!status) /* shared interrupt, not mine */
return;
outw(status, io_addr + USBSTS); /* Clear it */
if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) {
if (status & USBSTS_HSE)
err("%x: host system error, PCI problems?", io_addr);
if (status & USBSTS_HCPE)
err("%x: host controller process error. something bad happened", io_addr);
if ((status & USBSTS_HCH) && !uhci->is_suspended) {
err("%x: host controller halted. very bad", io_addr);
/* FIXME: Reset the controller, fix the offending TD */
}
}
if (status & USBSTS_RD)
wakeup_hc(uhci);
uhci_free_pending_qhs(uhci);
uhci_remove_pending_qhs(uhci);
uhci_clear_next_interrupt(uhci);
/* Walk the list of pending URB's to see which ones completed */
spin_lock(&uhci->urb_list_lock);
head = &uhci->urb_list;
tmp = head->next;
while (tmp != head) {
struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list);
struct urb *urb = urbp->urb;
tmp = tmp->next;
/* Checks the status and does all of the magic necessary */
uhci_transfer_result(uhci, urb);
}
spin_unlock(&uhci->urb_list_lock);
uhci_finish_completion(hcd);
}
static void reset_hc(struct uhci_hcd *uhci)
{
unsigned int io_addr = uhci->io_addr;
/* Global reset for 50ms */
outw(USBCMD_GRESET, io_addr + USBCMD);
wait_ms(50);
outw(0, io_addr + USBCMD);
wait_ms(10);
}
static void suspend_hc(struct uhci_hcd *uhci)
{
unsigned int io_addr = uhci->io_addr;
dbg("%x: suspend_hc", io_addr);
outw(USBCMD_EGSM, io_addr + USBCMD);
uhci->is_suspended = 1;
}
static void wakeup_hc(struct uhci_hcd *uhci)
{
unsigned int io_addr = uhci->io_addr;
unsigned int status;
dbg("%x: wakeup_hc", io_addr);
outw(0, io_addr + USBCMD);
/* wait for EOP to be sent */
status = inw(io_addr + USBCMD);
while (status & USBCMD_FGR)
status = inw(io_addr + USBCMD);
uhci->is_suspended = 0;
/* Run and mark it configured with a 64-byte max packet */
outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, io_addr + USBCMD);
}
static int ports_active(struct uhci_hcd *uhci)
{
unsigned int io_addr = uhci->io_addr;
int connection = 0;
int i;
for (i = 0; i < uhci->rh_numports; i++)
connection |= (inw(io_addr + USBPORTSC1 + i * 2) & 0x1);
return connection;
}
static void start_hc(struct uhci_hcd *uhci)
{
unsigned int io_addr = uhci->io_addr;
int timeout = 1000;
/*
* Reset the HC - this will force us to get a
* new notification of any already connected
* ports due to the virtual disconnect that it
* implies.
*/
outw(USBCMD_HCRESET, io_addr + USBCMD);
while (inw(io_addr + USBCMD) & USBCMD_HCRESET) {
if (!--timeout) {
printk(KERN_ERR "uhci: USBCMD_HCRESET timed out!\n");
break;
}
}
/* Turn on all interrupts */
outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC | USBINTR_SP,
io_addr + USBINTR);
/* Start at frame 0 */
outw(0, io_addr + USBFRNUM);
outl(uhci->fl->dma_handle, io_addr + USBFLBASEADD);
/* Run and mark it configured with a 64-byte max packet */
outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, io_addr + USBCMD);
}
#ifdef CONFIG_PROC_FS
static int uhci_num = 0;
#endif
/*
* De-allocate all resources..
*/
static void release_uhci(struct uhci_hcd *uhci)
{
int i;
#ifdef CONFIG_PROC_FS
char buf[8];
#endif
for (i = 0; i < UHCI_NUM_SKELQH; i++)
if (uhci->skelqh[i]) {
uhci_free_qh(uhci, uhci->skelqh[i]);
uhci->skelqh[i] = NULL;
}
for (i = 0; i < UHCI_NUM_SKELTD; i++)
if (uhci->skeltd[i]) {
uhci_free_td(uhci, uhci->skeltd[i]);
uhci->skeltd[i] = NULL;
}
if (uhci->qh_pool) {
pci_pool_destroy(uhci->qh_pool);
uhci->qh_pool = NULL;
}
if (uhci->td_pool) {
pci_pool_destroy(uhci->td_pool);
uhci->td_pool = NULL;
}
if (uhci->fl) {
pci_free_consistent(uhci->dev, sizeof(*uhci->fl), uhci->fl, uhci->fl->dma_handle);
uhci->fl = NULL;
}
#ifdef CONFIG_PROC_FS
if (uhci->proc_entry) {
sprintf(buf, "hc%d", uhci->num);
remove_proc_entry(buf, uhci_proc_root);
uhci->proc_entry = NULL;
}
#endif
}
/*
* Allocate a frame list, and then setup the skeleton
*
* The hardware doesn't really know any difference
* in the queues, but the order does matter for the
* protocols higher up. The order is:
*
* - any isochronous events handled before any
* of the queues. We don't do that here, because
* we'll create the actual TD entries on demand.
* - The first queue is the interrupt queue.
* - The second queue is the control queue, split into low and high speed
* - The third queue is bulk queue.
* - The fourth queue is the bandwidth reclamation queue, which loops back
* to the high speed control queue.
*/
static int __devinit uhci_start(struct usb_hcd *hcd)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
struct pci_dev *dev = hcd->pdev;
int retval = -EBUSY;
int i, port;
dma_addr_t dma_handle;
#ifdef CONFIG_PROC_FS
char buf[8];
struct proc_dir_entry *ent;
#endif
uhci->dev = dev;
/* Should probably move to core/hcd.c */
if (pci_set_dma_mask(dev, 0xFFFFFFFF)) {
err("couldn't set PCI dma mask");
retval = -ENODEV;
goto err_pci_set_dma_mask;
}
uhci->io_addr = pci_resource_start(dev, hcd->region);
uhci->io_size = pci_resource_len(dev, hcd->region);
#ifdef CONFIG_PROC_FS
uhci->num = uhci_num++;
sprintf(buf, "hc%d", uhci->num);
ent = create_proc_entry(buf, S_IFREG|S_IRUGO|S_IWUSR, uhci_proc_root);
if (!ent) {
err("couldn't create uhci proc entry");
retval = -ENOMEM;
goto err_create_proc_entry;
}
ent->data = uhci;
ent->proc_fops = &uhci_proc_operations;
ent->size = 0;
uhci->proc_entry = ent;
#endif
/* Reset here so we don't get any interrupts from an old setup */
/* or broken setup */
reset_hc(uhci);
uhci->fsbr = 0;
uhci->fsbrtimeout = 0;
uhci->is_suspended = 0;
spin_lock_init(&uhci->qh_remove_list_lock);
INIT_LIST_HEAD(&uhci->qh_remove_list);
spin_lock_init(&uhci->urb_remove_list_lock);
INIT_LIST_HEAD(&uhci->urb_remove_list);
spin_lock_init(&uhci->urb_list_lock);
INIT_LIST_HEAD(&uhci->urb_list);
spin_lock_init(&uhci->complete_list_lock);
INIT_LIST_HEAD(&uhci->complete_list);
spin_lock_init(&uhci->frame_list_lock);
uhci->fl = pci_alloc_consistent(dev, sizeof(*uhci->fl), &dma_handle);
if (!uhci->fl) {
err("unable to allocate consistent memory for frame list");
goto err_alloc_fl;
}
memset((void *)uhci->fl, 0, sizeof(*uhci->fl));
uhci->fl->dma_handle = dma_handle;
uhci->td_pool = pci_pool_create("uhci_td", dev,
sizeof(struct uhci_td), 16, 0, GFP_DMA | GFP_ATOMIC);
if (!uhci->td_pool) {
err("unable to create td pci_pool");
goto err_create_td_pool;
}
uhci->qh_pool = pci_pool_create("uhci_qh", dev,
sizeof(struct uhci_qh), 16, 0, GFP_DMA | GFP_ATOMIC);
if (!uhci->qh_pool) {
err("unable to create qh pci_pool");
goto err_create_qh_pool;
}
/* Initialize the root hub */
/* UHCI specs says devices must have 2 ports, but goes on to say */
/* they may have more but give no way to determine how many they */
/* have. However, according to the UHCI spec, Bit 7 is always set */
/* to 1. So we try to use this to our advantage */
for (port = 0; port < (uhci->io_size - 0x10) / 2; port++) {
unsigned int portstatus;
portstatus = inw(uhci->io_addr + 0x10 + (port * 2));
if (!(portstatus & 0x0080))
break;
}
if (debug)
info("detected %d ports", port);
/* This is experimental so anything less than 2 or greater than 8 is */
/* something weird and we'll ignore it */
if (port < 2 || port > 8) {
info("port count misdetected? forcing to 2 ports");
port = 2;
}
uhci->rh_numports = port;
hcd->self.root_hub = uhci->rh_dev = usb_alloc_dev(NULL, &hcd->self);
if (!uhci->rh_dev) {
err("unable to allocate root hub");
goto err_alloc_root_hub;
}
uhci->skeltd[0] = uhci_alloc_td(uhci, uhci->rh_dev);
if (!uhci->skeltd[0]) {
err("unable to allocate TD 0");
goto err_alloc_skeltd;
}
/*
* 9 Interrupt queues; link int2 to int1, int4 to int2, etc
* then link int1 to control and control to bulk
*/
for (i = 1; i < 9; i++) {
struct uhci_td *td;
td = uhci->skeltd[i] = uhci_alloc_td(uhci, uhci->rh_dev);
if (!td) {
err("unable to allocate TD %d", i);
goto err_alloc_skeltd;
}
uhci_fill_td(td, 0, uhci_explen(UHCI_NULL_DATA_SIZE) |
(0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0);
td->link = cpu_to_le32(uhci->skeltd[i - 1]->dma_handle);
}
uhci->skel_term_td = uhci_alloc_td(uhci, uhci->rh_dev);
if (!uhci->skel_term_td) {
err("unable to allocate skel TD term");
goto err_alloc_skeltd;
}
for (i = 0; i < UHCI_NUM_SKELQH; i++) {
uhci->skelqh[i] = uhci_alloc_qh(uhci, uhci->rh_dev);
if (!uhci->skelqh[i]) {
err("unable to allocate QH %d", i);
goto err_alloc_skelqh;
}
}
uhci_fill_td(uhci->skel_int1_td, 0, (UHCI_NULL_DATA_SIZE << 21) |
(0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0);
uhci->skel_int1_td->link = cpu_to_le32(uhci->skel_ls_control_qh->dma_handle) | UHCI_PTR_QH;
uhci->skel_ls_control_qh->link = cpu_to_le32(uhci->skel_hs_control_qh->dma_handle) | UHCI_PTR_QH;
uhci->skel_ls_control_qh->element = UHCI_PTR_TERM;
uhci->skel_hs_control_qh->link = cpu_to_le32(uhci->skel_bulk_qh->dma_handle) | UHCI_PTR_QH;
uhci->skel_hs_control_qh->element = UHCI_PTR_TERM;
uhci->skel_bulk_qh->link = cpu_to_le32(uhci->skel_term_qh->dma_handle) | UHCI_PTR_QH;
uhci->skel_bulk_qh->element = UHCI_PTR_TERM;
/* This dummy TD is to work around a bug in Intel PIIX controllers */
uhci_fill_td(uhci->skel_term_td, 0, (UHCI_NULL_DATA_SIZE << 21) |
(0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0);
uhci->skel_term_td->link = cpu_to_le32(uhci->skel_term_td->dma_handle);
uhci->skel_term_qh->link = UHCI_PTR_TERM;
uhci->skel_term_qh->element = cpu_to_le32(uhci->skel_term_td->dma_handle);
/*
* Fill the frame list: make all entries point to
* the proper interrupt queue.
*
* This is probably silly, but it's a simple way to
* scatter the interrupt queues in a way that gives
* us a reasonable dynamic range for irq latencies.
*/
for (i = 0; i < UHCI_NUMFRAMES; i++) {
int irq = 0;
if (i & 1) {
irq++;
if (i & 2) {
irq++;
if (i & 4) {
irq++;
if (i & 8) {
irq++;
if (i & 16) {
irq++;
if (i & 32) {
irq++;
if (i & 64)
irq++;
}
}
}
}
}
}
/* Only place we don't use the frame list routines */
uhci->fl->frame[i] = cpu_to_le32(uhci->skeltd[irq]->dma_handle);
}
start_hc(uhci);
init_stall_timer(hcd);
/* disable legacy emulation */
pci_write_config_word(dev, USBLEGSUP, USBLEGSUP_DEFAULT);
hcd->state = USB_STATE_READY;
usb_connect(uhci->rh_dev);
uhci->rh_dev->speed = USB_SPEED_FULL;
if (usb_register_root_hub(uhci->rh_dev, &dev->dev) != 0) {
err("unable to start root hub");
retval = -ENOMEM;
goto err_start_root_hub;
}
return 0;
/*
* error exits:
*/
err_start_root_hub:
reset_hc(uhci);
del_timer(&uhci->stall_timer);
for (i = 0; i < UHCI_NUM_SKELQH; i++)
if (uhci->skelqh[i]) {
uhci_free_qh(uhci, uhci->skelqh[i]);
uhci->skelqh[i] = NULL;
}
err_alloc_skelqh:
for (i = 0; i < UHCI_NUM_SKELTD; i++)
if (uhci->skeltd[i]) {
uhci_free_td(uhci, uhci->skeltd[i]);
uhci->skeltd[i] = NULL;
}
err_alloc_skeltd:
usb_free_dev(uhci->rh_dev);
uhci->rh_dev = NULL;
err_alloc_root_hub:
pci_pool_destroy(uhci->qh_pool);
uhci->qh_pool = NULL;
err_create_qh_pool:
pci_pool_destroy(uhci->td_pool);
uhci->td_pool = NULL;
err_create_td_pool:
pci_free_consistent(dev, sizeof(*uhci->fl), uhci->fl, uhci->fl->dma_handle);
uhci->fl = NULL;
err_alloc_fl:
#ifdef CONFIG_PROC_FS
remove_proc_entry(buf, uhci_proc_root);
uhci->proc_entry = NULL;
err_create_proc_entry:
#endif
err_pci_set_dma_mask:
return retval;
}
static void __devexit uhci_stop(struct usb_hcd *hcd)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
if (uhci->rh_dev)
usb_disconnect(&uhci->rh_dev);
del_timer(&uhci->stall_timer);
/*
* At this point, we're guaranteed that no new connects can be made
* to this bus since there are no more parents
*/
uhci_free_pending_qhs(uhci);
uhci_remove_pending_qhs(uhci);
reset_hc(uhci);
uhci_free_pending_qhs(uhci);
release_uhci(uhci);
}
#ifdef CONFIG_PM
static int uhci_suspend(struct usb_hcd *hcd, u32 state)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
suspend_hc(uhci);
return 0;
}
static int uhci_resume(struct usb_hcd *hcd)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
pci_set_master(uhci->dev);
reset_hc(uhci);
start_hc(uhci);
return 0;
}
#endif
static struct usb_hcd *uhci_hcd_alloc(void)
{
struct uhci_hcd *uhci;
uhci = (struct uhci_hcd *)kmalloc(sizeof(*uhci), GFP_KERNEL);
if (!uhci)
return NULL;
memset(uhci, 0, sizeof(*uhci));
return &uhci->hcd;
}
static void uhci_hcd_free(struct usb_hcd *hcd)
{
kfree(hcd_to_uhci(hcd));
}
static int uhci_hcd_get_frame_number(struct usb_hcd *hcd)
{
return uhci_get_current_frame_number(hcd_to_uhci(hcd));
}
static const char hcd_name[] = "uhci-hcd";
static const struct hc_driver uhci_driver = {
description: hcd_name,
/* Generic hardware linkage */
irq: uhci_irq,
flags: HCD_USB11,
/* Basic lifecycle operations */
start: uhci_start,
#ifdef CONFIG_PM
suspend: uhci_suspend,
resume: uhci_resume,
#endif
stop: uhci_stop,
hcd_alloc: uhci_hcd_alloc,
hcd_free: uhci_hcd_free,
urb_enqueue: uhci_urb_enqueue,
urb_dequeue: uhci_urb_dequeue,
free_config: NULL,
get_frame_number: uhci_hcd_get_frame_number,
hub_status_data: uhci_hub_status_data,
hub_control: uhci_hub_control,
};
static const struct pci_device_id __devinitdata uhci_pci_ids[] = { {
/* handle any USB UHCI controller */
class: ((PCI_CLASS_SERIAL_USB << 8) | 0x00),
class_mask: ~0,
driver_data: (unsigned long) &uhci_driver,
/* no matter who makes it */
vendor: PCI_ANY_ID,
device: PCI_ANY_ID,
subvendor: PCI_ANY_ID,
subdevice: PCI_ANY_ID,
}, { /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(pci, uhci_pci_ids);
static struct pci_driver uhci_pci_driver = {
name: (char *)hcd_name,
id_table: uhci_pci_ids,
probe: usb_hcd_pci_probe,
remove: usb_hcd_pci_remove,
#ifdef CONFIG_PM
suspend: usb_hcd_pci_suspend,
resume: usb_hcd_pci_resume,
#endif /* PM */
};
static int __init uhci_hcd_init(void)
{
int retval = -ENOMEM;
info(DRIVER_DESC " " DRIVER_VERSION);
if (debug) {
errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
if (!errbuf)
goto errbuf_failed;
}
#ifdef CONFIG_PROC_FS
uhci_proc_root = create_proc_entry("driver/uhci", S_IFDIR, 0);
if (!uhci_proc_root)
goto proc_failed;
#endif
uhci_up_cachep = kmem_cache_create("uhci_urb_priv",
sizeof(struct urb_priv), 0, 0, NULL, NULL);
if (!uhci_up_cachep)
goto up_failed;
retval = pci_module_init(&uhci_pci_driver);
if (retval)
goto init_failed;
return 0;
init_failed:
if (kmem_cache_destroy(uhci_up_cachep))
printk(KERN_INFO "uhci: not all urb_priv's were freed\n");
up_failed:
#ifdef CONFIG_PROC_FS
remove_proc_entry("uhci", 0);
proc_failed:
#endif
if (errbuf)
kfree(errbuf);
errbuf_failed:
return retval;
}
static void __exit uhci_hcd_cleanup(void)
{
pci_unregister_driver(&uhci_pci_driver);
if (kmem_cache_destroy(uhci_up_cachep))
printk(KERN_INFO "uhci: not all urb_priv's were freed\n");
#ifdef CONFIG_PROC_FS
remove_proc_entry("uhci", 0);
#endif
if (errbuf)
kfree(errbuf);
}
module_init(uhci_hcd_init);
module_exit(uhci_hcd_cleanup);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
#ifndef __LINUX_UHCI_HCD_H
#define __LINUX_UHCI_HCD_H
#include <linux/list.h>
#include <linux/usb.h>
/*
* Universal Host Controller Interface data structures and defines
*/
/* Command register */
#define USBCMD 0
#define USBCMD_RS 0x0001 /* Run/Stop */
#define USBCMD_HCRESET 0x0002 /* Host reset */
#define USBCMD_GRESET 0x0004 /* Global reset */
#define USBCMD_EGSM 0x0008 /* Global Suspend Mode */
#define USBCMD_FGR 0x0010 /* Force Global Resume */
#define USBCMD_SWDBG 0x0020 /* SW Debug mode */
#define USBCMD_CF 0x0040 /* Config Flag (sw only) */
#define USBCMD_MAXP 0x0080 /* Max Packet (0 = 32, 1 = 64) */
/* Status register */
#define USBSTS 2
#define USBSTS_USBINT 0x0001 /* Interrupt due to IOC */
#define USBSTS_ERROR 0x0002 /* Interrupt due to error */
#define USBSTS_RD 0x0004 /* Resume Detect */
#define USBSTS_HSE 0x0008 /* Host System Error - basically PCI problems */
#define USBSTS_HCPE 0x0010 /* Host Controller Process Error - the scripts were buggy */
#define USBSTS_HCH 0x0020 /* HC Halted */
/* Interrupt enable register */
#define USBINTR 4
#define USBINTR_TIMEOUT 0x0001 /* Timeout/CRC error enable */
#define USBINTR_RESUME 0x0002 /* Resume interrupt enable */
#define USBINTR_IOC 0x0004 /* Interrupt On Complete enable */
#define USBINTR_SP 0x0008 /* Short packet interrupt enable */
#define USBFRNUM 6
#define USBFLBASEADD 8
#define USBSOF 12
/* USB port status and control registers */
#define USBPORTSC1 16
#define USBPORTSC2 18
#define USBPORTSC_CCS 0x0001 /* Current Connect Status ("device present") */
#define USBPORTSC_CSC 0x0002 /* Connect Status Change */
#define USBPORTSC_PE 0x0004 /* Port Enable */
#define USBPORTSC_PEC 0x0008 /* Port Enable Change */
#define USBPORTSC_LS 0x0030 /* Line Status */
#define USBPORTSC_RD 0x0040 /* Resume Detect */
#define USBPORTSC_LSDA 0x0100 /* Low Speed Device Attached */
#define USBPORTSC_PR 0x0200 /* Port Reset */
#define USBPORTSC_SUSP 0x1000 /* Suspend */
/* Legacy support register */
#define USBLEGSUP 0xc0
#define USBLEGSUP_DEFAULT 0x2000 /* only PIRQ enable set */
#define UHCI_NULL_DATA_SIZE 0x7FF /* for UHCI controller TD */
#define UHCI_PTR_BITS cpu_to_le32(0x000F)
#define UHCI_PTR_TERM cpu_to_le32(0x0001)
#define UHCI_PTR_QH cpu_to_le32(0x0002)
#define UHCI_PTR_DEPTH cpu_to_le32(0x0004)
#define UHCI_NUMFRAMES 1024 /* in the frame list [array] */
#define UHCI_MAX_SOF_NUMBER 2047 /* in an SOF packet */
#define CAN_SCHEDULE_FRAMES 1000 /* how far future frames can be scheduled */
struct uhci_frame_list {
__u32 frame[UHCI_NUMFRAMES];
void *frame_cpu[UHCI_NUMFRAMES];
dma_addr_t dma_handle;
};
struct urb_priv;
struct uhci_qh {
/* Hardware fields */
__u32 link; /* Next queue */
__u32 element; /* Queue element pointer */
/* Software fields */
dma_addr_t dma_handle;
struct usb_device *dev;
struct urb_priv *urbp;
struct list_head list; /* P: uhci->frame_list_lock */
struct list_head remove_list; /* P: uhci->remove_list_lock */
} __attribute__((aligned(16)));
/*
* for TD <status>:
*/
#define td_status(td) le32_to_cpu((td)->status)
#define TD_CTRL_SPD (1 << 29) /* Short Packet Detect */
#define TD_CTRL_C_ERR_MASK (3 << 27) /* Error Counter bits */
#define TD_CTRL_C_ERR_SHIFT 27
#define TD_CTRL_LS (1 << 26) /* Low Speed Device */
#define TD_CTRL_IOS (1 << 25) /* Isochronous Select */
#define TD_CTRL_IOC (1 << 24) /* Interrupt on Complete */
#define TD_CTRL_ACTIVE (1 << 23) /* TD Active */
#define TD_CTRL_STALLED (1 << 22) /* TD Stalled */
#define TD_CTRL_DBUFERR (1 << 21) /* Data Buffer Error */
#define TD_CTRL_BABBLE (1 << 20) /* Babble Detected */
#define TD_CTRL_NAK (1 << 19) /* NAK Received */
#define TD_CTRL_CRCTIMEO (1 << 18) /* CRC/Time Out Error */
#define TD_CTRL_BITSTUFF (1 << 17) /* Bit Stuff Error */
#define TD_CTRL_ACTLEN_MASK 0x7FF /* actual length, encoded as n - 1 */
#define TD_CTRL_ANY_ERROR (TD_CTRL_STALLED | TD_CTRL_DBUFERR | \
TD_CTRL_BABBLE | TD_CTRL_CRCTIME | TD_CTRL_BITSTUFF)
#define uhci_maxerr(err) ((err) << TD_CTRL_C_ERR_SHIFT)
#define uhci_status_bits(ctrl_sts) ((ctrl_sts) & 0xFE0000)
#define uhci_actual_length(ctrl_sts) (((ctrl_sts) + 1) & TD_CTRL_ACTLEN_MASK) /* 1-based */
/*
* for TD <info>: (a.k.a. Token)
*/
#define td_token(td) le32_to_cpu((td)->token)
#define TD_TOKEN_DEVADDR_SHIFT 8
#define TD_TOKEN_TOGGLE_SHIFT 19
#define TD_TOKEN_TOGGLE (1 << 19)
#define TD_TOKEN_EXPLEN_SHIFT 21
#define TD_TOKEN_EXPLEN_MASK 0x7FF /* expected length, encoded as n - 1 */
#define TD_TOKEN_PID_MASK 0xFF
#define uhci_explen(len) ((len) << TD_TOKEN_EXPLEN_SHIFT)
#define uhci_expected_length(token) ((((token) >> 21) + 1) & TD_TOKEN_EXPLEN_MASK)
#define uhci_toggle(token) (((token) >> TD_TOKEN_TOGGLE_SHIFT) & 1)
#define uhci_endpoint(token) (((token) >> 15) & 0xf)
#define uhci_devaddr(token) (((token) >> TD_TOKEN_DEVADDR_SHIFT) & 0x7f)
#define uhci_devep(token) (((token) >> TD_TOKEN_DEVADDR_SHIFT) & 0x7ff)
#define uhci_packetid(token) ((token) & TD_TOKEN_PID_MASK)
#define uhci_packetout(token) (uhci_packetid(token) != USB_PID_IN)
#define uhci_packetin(token) (uhci_packetid(token) == USB_PID_IN)
/*
* The documentation says "4 words for hardware, 4 words for software".
*
* That's silly, the hardware doesn't care. The hardware only cares that
* the hardware words are 16-byte aligned, and we can have any amount of
* sw space after the TD entry as far as I can tell.
*
* But let's just go with the documentation, at least for 32-bit machines.
* On 64-bit machines we probably want to take advantage of the fact that
* hw doesn't really care about the size of the sw-only area.
*
* Alas, not anymore, we have more than 4 words for software, woops.
* Everything still works tho, surprise! -jerdfelt
*/
struct uhci_td {
/* Hardware fields */
__u32 link;
__u32 status;
__u32 token;
__u32 buffer;
/* Software fields */
dma_addr_t dma_handle;
struct usb_device *dev;
struct urb *urb;
struct list_head list; /* P: urb->lock */
int frame;
struct list_head fl_list; /* P: uhci->frame_list_lock */
} __attribute__((aligned(16)));
/*
* There are various standard queues. We set up several different
* queues for each of the three basic queue types: interrupt,
* control, and bulk.
*
* - There are various different interrupt latencies: ranging from
* every other USB frame (2 ms apart) to every 256 USB frames (ie
* 256 ms apart). Make your choice according to how obnoxious you
* want to be on the wire, vs how critical latency is for you.
* - The control list is done every frame.
* - There are 4 bulk lists, so that up to four devices can have a
* bulk list of their own and when run concurrently all four lists
* will be be serviced.
*
* This is a bit misleading, there are various interrupt latencies, but they
* vary a bit, interrupt2 isn't exactly 2ms, it can vary up to 4ms since the
* other queues can "override" it. interrupt4 can vary up to 8ms, etc. Minor
* problem
*
* In the case of the root hub, these QH's are just head's of qh's. Don't
* be scared, it kinda makes sense. Look at this wonderful picture care of
* Linus:
*
* generic- -> dev1- -> generic- -> dev1- -> control- -> bulk- -> ...
* iso-QH iso-QH irq-QH irq-QH QH QH
* | | | | | |
* End dev1-iso-TD1 End dev1-irq-TD1 ... ...
* |
* dev1-iso-TD2
* |
* ....
*
* This may vary a bit (the UHCI docs don't explicitly say you can put iso
* transfers in QH's and all of their pictures don't have that either) but
* other than that, that is what we're doing now
*
* And now we don't put Iso transfers in QH's, so we don't waste one on it
* --jerdfelt
*
* To keep with Linus' nomenclature, this is called the QH skeleton. These
* labels (below) are only signficant to the root hub's QH's
*/
#define UHCI_NUM_SKELTD 10
#define skel_int1_td skeltd[0]
#define skel_int2_td skeltd[1]
#define skel_int4_td skeltd[2]
#define skel_int8_td skeltd[3]
#define skel_int16_td skeltd[4]
#define skel_int32_td skeltd[5]
#define skel_int64_td skeltd[6]
#define skel_int128_td skeltd[7]
#define skel_int256_td skeltd[8]
#define skel_term_td skeltd[9] /* To work around PIIX UHCI bug */
#define UHCI_NUM_SKELQH 4
#define skel_ls_control_qh skelqh[0]
#define skel_hs_control_qh skelqh[1]
#define skel_bulk_qh skelqh[2]
#define skel_term_qh skelqh[3]
/*
* Search tree for determining where <interval> fits in the
* skelqh[] skeleton.
*
* An interrupt request should be placed into the slowest skelqh[]
* which meets the interval/period/frequency requirement.
* An interrupt request is allowed to be faster than <interval> but not slower.
*
* For a given <interval>, this function returns the appropriate/matching
* skelqh[] index value.
*
* NOTE: For UHCI, we don't really need int256_qh since the maximum interval
* is 255 ms. However, we do need an int1_qh since 1 is a valid interval
* and we should meet that frequency when requested to do so.
* This will require some change(s) to the UHCI skeleton.
*/
static inline int __interval_to_skel(int interval)
{
if (interval < 16) {
if (interval < 4) {
if (interval < 2)
return 0; /* int1 for 0-1 ms */
return 1; /* int2 for 2-3 ms */
}
if (interval < 8)
return 2; /* int4 for 4-7 ms */
return 3; /* int8 for 8-15 ms */
}
if (interval < 64) {
if (interval < 32)
return 4; /* int16 for 16-31 ms */
return 5; /* int32 for 32-63 ms */
}
if (interval < 128)
return 6; /* int64 for 64-127 ms */
return 7; /* int128 for 128-255 ms (Max.) */
}
#define hcd_to_uhci(hcd_ptr) list_entry(hcd_ptr, struct uhci_hcd, hcd)
/*
* This describes the full uhci information.
*
* Note how the "proper" USB information is just
* a subset of what the full implementation needs.
*/
struct uhci_hcd {
struct usb_hcd hcd;
struct pci_dev *dev;
#ifdef CONFIG_PROC_FS
/* procfs */
int num;
struct proc_dir_entry *proc_entry;
#endif
/* Grabbed from PCI */
int irq;
unsigned int io_addr;
unsigned int io_size;
struct pci_pool *qh_pool;
struct pci_pool *td_pool;
struct usb_bus *bus;
struct uhci_td *skeltd[UHCI_NUM_SKELTD]; /* Skeleton TD's */
struct uhci_qh *skelqh[UHCI_NUM_SKELQH]; /* Skeleton QH's */
spinlock_t frame_list_lock;
struct uhci_frame_list *fl; /* P: uhci->frame_list_lock */
int fsbr; /* Full speed bandwidth reclamation */
unsigned long fsbrtimeout; /* FSBR delay */
int is_suspended;
/* Main list of URB's currently controlled by this HC */
spinlock_t urb_list_lock;
struct list_head urb_list; /* P: uhci->urb_list_lock */
/* List of QH's that are done, but waiting to be unlinked (race) */
spinlock_t qh_remove_list_lock;
struct list_head qh_remove_list; /* P: uhci->qh_remove_list_lock */
/* List of asynchronously unlinked URB's */
spinlock_t urb_remove_list_lock;
struct list_head urb_remove_list; /* P: uhci->urb_remove_list_lock */
/* List of URB's awaiting completion callback */
spinlock_t complete_list_lock;
struct list_head complete_list; /* P: uhci->complete_list_lock */
struct usb_device *rh_dev; /* Root hub */
int rh_numports;
struct timer_list stall_timer;
};
struct urb_priv {
struct list_head urb_list;
struct urb *urb;
struct usb_device *dev;
dma_addr_t setup_packet_dma_handle;
dma_addr_t transfer_buffer_dma_handle;
struct uhci_qh *qh; /* QH for this URB */
struct list_head td_list; /* P: urb->lock */
int fsbr : 1; /* URB turned on FSBR */
int fsbr_timeout : 1; /* URB timed out on FSBR */
int queued : 1; /* QH was queued (not linked in) */
int short_control_packet : 1; /* If we get a short packet during */
/* a control transfer, retrigger */
/* the status phase */
int status; /* Final status */
unsigned long inserttime; /* In jiffies */
unsigned long fsbrtime; /* In jiffies */
struct list_head queue_list; /* P: uhci->frame_list_lock */
struct list_head complete_list; /* P: uhci->complete_list_lock */
};
/*
* Locking in uhci.c
*
* spinlocks are used extensively to protect the many lists and data
* structures we have. It's not that pretty, but it's necessary. We
* need to be done with all of the locks (except complete_list_lock) when
* we call urb->complete. I've tried to make it simple enough so I don't
* have to spend hours racking my brain trying to figure out if the
* locking is safe.
*
* Here's the safe locking order to prevent deadlocks:
*
* #1 uhci->urb_list_lock
* #2 urb->lock
* #3 uhci->urb_remove_list_lock, uhci->frame_list_lock,
* uhci->qh_remove_list_lock
* #4 uhci->complete_list_lock
*
* If you're going to grab 2 or more locks at once, ALWAYS grab the lock
* at the lowest level FIRST and NEVER grab locks at the same level at the
* same time.
*
* So, if you need uhci->urb_list_lock, grab it before you grab urb->lock
*/
#endif
/*
* Universal Host Controller Interface driver for USB.
*
* Maintainer: Johannes Erdfelt <johannes@erdfelt.com>
*
* (C) Copyright 1999 Linus Torvalds
* (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
* (C) Copyright 1999 Randy Dunlap
* (C) Copyright 1999 Georg Acher, acher@in.tum.de
* (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
* (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
*/
static __u8 root_hub_hub_des[] =
{
0x09, /* __u8 bLength; */
0x29, /* __u8 bDescriptorType; Hub-descriptor */
0x02, /* __u8 bNbrPorts; */
0x00, /* __u16 wHubCharacteristics; */
0x00,
0x01, /* __u8 bPwrOn2pwrGood; 2ms */
0x00, /* __u8 bHubContrCurrent; 0 mA */
0x00, /* __u8 DeviceRemovable; *** 7 Ports max *** */
0xff /* __u8 PortPwrCtrlMask; *** 7 ports max *** */
};
static int uhci_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
unsigned int io_addr = uhci->io_addr;
int i, len = 1;
*buf = 0;
for (i = 0; i < uhci->rh_numports; i++) {
*buf |= ((inw(io_addr + USBPORTSC1 + i * 2) & 0xa) > 0 ? (1 << (i + 1)) : 0);
len = (i + 1) / 8 + 1;
}
return !!*buf;
}
#define OK(x) len = (x); break
#define CLR_RH_PORTSTAT(x) \
status = inw(io_addr + USBPORTSC1 + 2 * (wIndex-1)); \
status = (status & 0xfff5) & ~(x); \
outw(status, io_addr + USBPORTSC1 + 2 * (wIndex-1))
#define SET_RH_PORTSTAT(x) \
status = inw(io_addr + USBPORTSC1 + 2 * (wIndex-1)); \
status = (status & 0xfff5) | (x); \
outw(status, io_addr + USBPORTSC1 + 2 * (wIndex-1))
// FIXME: Shouldn't this return the length of the data too?
static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
int i, status, retval = 0, len = 0;
unsigned int io_addr = uhci->io_addr;
__u16 cstatus;
char c_p_r[8];
for (i = 0; i < 8; i++)
c_p_r[i] = 0;
switch (typeReq) {
/* Request Destination:
without flags: Device,
RH_INTERFACE: interface,
RH_ENDPOINT: endpoint,
RH_CLASS means HUB here,
RH_OTHER | RH_CLASS almost ever means HUB_PORT here
*/
case GetHubStatus:
*(__u32 *)buf = cpu_to_le32(0);
OK(4); /* hub power */
case GetPortStatus:
status = inw(io_addr + USBPORTSC1 + 2 * (wIndex - 1));
cstatus = ((status & USBPORTSC_CSC) >> (1 - 0)) |
((status & USBPORTSC_PEC) >> (3 - 1)) |
(c_p_r[wIndex - 1] << (0 + 4));
status = (status & USBPORTSC_CCS) |
((status & USBPORTSC_PE) >> (2 - 1)) |
((status & USBPORTSC_SUSP) >> (12 - 2)) |
((status & USBPORTSC_PR) >> (9 - 4)) |
(1 << 8) | /* power on */
((status & USBPORTSC_LSDA) << (-8 + 9));
*(__u16 *)buf = cpu_to_le16(status);
*(__u16 *)(buf + 2) = cpu_to_le16(cstatus);
OK(4);
case SetHubFeature:
switch (wValue) {
case C_HUB_OVER_CURRENT:
case C_HUB_LOCAL_POWER:
break;
default:
goto err;
}
break;
case ClearHubFeature:
switch (wValue) {
case C_HUB_OVER_CURRENT:
OK(0); /* hub power over current */
default:
goto err;
}
break;
case SetPortFeature:
if (!wIndex || wIndex > uhci->rh_numports)
goto err;
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
SET_RH_PORTSTAT(USBPORTSC_SUSP);
OK(0);
case USB_PORT_FEAT_RESET:
SET_RH_PORTSTAT(USBPORTSC_PR);
mdelay(50); /* USB v1.1 7.1.7.3 */
c_p_r[wIndex - 1] = 1;
CLR_RH_PORTSTAT(USBPORTSC_PR);
udelay(10);
SET_RH_PORTSTAT(USBPORTSC_PE);
mdelay(10);
SET_RH_PORTSTAT(0xa);
OK(0);
case USB_PORT_FEAT_POWER:
OK(0); /* port power ** */
case USB_PORT_FEAT_ENABLE:
SET_RH_PORTSTAT(USBPORTSC_PE);
OK(0);
default:
goto err;
}
break;
case ClearPortFeature:
if (!wIndex || wIndex > uhci->rh_numports)
goto err;
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
CLR_RH_PORTSTAT(USBPORTSC_PE);
OK(0);
case USB_PORT_FEAT_C_ENABLE:
SET_RH_PORTSTAT(USBPORTSC_PEC);
OK(0);
case USB_PORT_FEAT_SUSPEND:
CLR_RH_PORTSTAT(USBPORTSC_SUSP);
OK(0);
case USB_PORT_FEAT_C_SUSPEND:
/*** WR_RH_PORTSTAT(RH_PS_PSSC); */
OK(0);
case USB_PORT_FEAT_POWER:
OK(0); /* port power */
case USB_PORT_FEAT_C_CONNECTION:
SET_RH_PORTSTAT(USBPORTSC_CSC);
OK(0);
case USB_PORT_FEAT_C_OVER_CURRENT:
OK(0); /* port power over current */
case USB_PORT_FEAT_C_RESET:
c_p_r[wIndex - 1] = 0;
OK(0);
default:
goto err;
}
break;
case GetHubDescriptor:
len = min_t(unsigned int, wLength,
min_t(unsigned int, sizeof(root_hub_hub_des), wLength));
memcpy(buf, root_hub_hub_des, len);
if (len > 2)
buf[2] = uhci->rh_numports;
OK(len);
default:
err:
retval = -EPIPE;
}
return retval;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment