Commit 08150c53 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'usb-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6

* 'usb-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6: (43 commits)
  Revert "USB: isp1760-hcd: move imask clear after pending work is done"
  xHCI: Implement AMD PLL quirk
  xhci: Tell USB core both roothubs lost power.
  usbcore: Bug fix: system can't suspend with USB3.0 device connected to USB3.0 hub
  USB: Fix unplug of device with active streams
  USB: xhci - also free streams when resetting devices
  xhci: Fix NULL pointer deref in handle_port_status()
  USB: xhci - fix math in xhci_get_endpoint_interval()
  USB: xhci: simplify logic of skipping missed isoc TDs
  USB: xhci - remove excessive 'inline' markings
  USB: xhci: unsigned char never equals -1
  USB: xhci - fix unsafe macro definitions
  USB: fix formatting of SuperSpeed endpoints in /proc/bus/usb/devices
  USB: isp1760-hcd: move imask clear after pending work is done
  USB: fsl_qe_udc: send ZLP when zero flag and length % maxpacket == 0
  usb: qcserial add missing errorpath kfrees
  usb: qcserial avoid pointing to freed memory
  usb: Fix qcserial memory leak on rmmod
  USB: ftdi_sio: add ids for Hameg HO720 and HO730
  USB: option: Added support for Samsung GT-B3730/GT-B3710 LTE USB modem.
  ...
parents fdfc552a 753d8534
......@@ -6,7 +6,6 @@ config MICROBLAZE
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD
select USB_ARCH_HAS_EHCI
select ARCH_WANT_OPTIONAL_GPIOLIB
select HAVE_OPROFILE
select HAVE_ARCH_KGDB
......
......@@ -66,6 +66,7 @@ config USB_ARCH_HAS_EHCI
default y if ARCH_VT8500
default y if PLAT_SPEAR
default y if ARCH_MSM
default y if MICROBLAZE
default PCI
# ARM SA1111 chips have a non-PCI based "OHCI-compatible" USB host interface.
......
......@@ -221,7 +221,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
break;
case USB_ENDPOINT_XFER_INT:
type = "Int.";
if (speed == USB_SPEED_HIGH)
if (speed == USB_SPEED_HIGH || speed == USB_SPEED_SUPER)
interval = 1 << (desc->bInterval - 1);
else
interval = desc->bInterval;
......@@ -229,7 +229,8 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
default: /* "can't happen" */
return start;
}
interval *= (speed == USB_SPEED_HIGH) ? 125 : 1000;
interval *= (speed == USB_SPEED_HIGH ||
speed == USB_SPEED_SUPER) ? 125 : 1000;
if (interval % 1000)
unit = 'u';
else {
......@@ -542,8 +543,9 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes,
if (level == 0) {
int max;
/* high speed reserves 80%, full/low reserves 90% */
if (usbdev->speed == USB_SPEED_HIGH)
/* super/high speed reserves 80%, full/low reserves 90% */
if (usbdev->speed == USB_SPEED_HIGH ||
usbdev->speed == USB_SPEED_SUPER)
max = 800;
else
max = FRAME_TIME_MAX_USECS_ALLOC;
......
......@@ -1908,7 +1908,7 @@ void usb_free_streams(struct usb_interface *interface,
/* Streams only apply to bulk endpoints. */
for (i = 0; i < num_eps; i++)
if (!usb_endpoint_xfer_bulk(&eps[i]->desc))
if (!eps[i] || !usb_endpoint_xfer_bulk(&eps[i]->desc))
return;
hcd->driver->free_streams(hcd, dev, eps, num_eps, mem_flags);
......
......@@ -2285,7 +2285,17 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
}
/* see 7.1.7.6 */
status = set_port_feature(hub->hdev, port1, USB_PORT_FEAT_SUSPEND);
/* Clear PORT_POWER if it's a USB3.0 device connected to USB 3.0
* external hub.
* FIXME: this is a temporary workaround to make the system able
* to suspend/resume.
*/
if ((hub->hdev->parent != NULL) && hub_is_superspeed(hub->hdev))
status = clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_POWER);
else
status = set_port_feature(hub->hdev, port1,
USB_PORT_FEAT_SUSPEND);
if (status) {
dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n",
port1, status);
......
......@@ -706,6 +706,7 @@ f_audio_unbind(struct usb_configuration *c, struct usb_function *f)
struct f_audio *audio = func_to_audio(f);
usb_free_descriptors(f->descriptors);
usb_free_descriptors(f->hs_descriptors);
kfree(audio);
}
......
......@@ -314,6 +314,9 @@ eem_unbind(struct usb_configuration *c, struct usb_function *f)
static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req)
{
struct sk_buff *skb = (struct sk_buff *)req->context;
dev_kfree_skb_any(skb);
}
/*
......@@ -428,10 +431,11 @@ static int eem_unwrap(struct gether *port,
skb_trim(skb2, len);
put_unaligned_le16(BIT(15) | BIT(11) | len,
skb_push(skb2, 2));
skb_copy_bits(skb, 0, req->buf, skb->len);
req->length = skb->len;
skb_copy_bits(skb2, 0, req->buf, skb2->len);
req->length = skb2->len;
req->complete = eem_cmd_complete;
req->zero = 1;
req->context = skb2;
if (usb_ep_queue(port->in_ep, req, GFP_ATOMIC))
DBG(cdev, "echo response queue fail\n");
break;
......
......@@ -1148,6 +1148,12 @@ static int qe_ep_tx(struct qe_ep *ep, struct qe_frame *frame)
static int txcomplete(struct qe_ep *ep, unsigned char restart)
{
if (ep->tx_req != NULL) {
struct qe_req *req = ep->tx_req;
unsigned zlp = 0, last_len = 0;
last_len = min_t(unsigned, req->req.length - ep->sent,
ep->ep.maxpacket);
if (!restart) {
int asent = ep->last;
ep->sent += asent;
......@@ -1156,9 +1162,18 @@ static int txcomplete(struct qe_ep *ep, unsigned char restart)
ep->last = 0;
}
/* zlp needed when req->re.zero is set */
if (req->req.zero) {
if (last_len == 0 ||
(req->req.length % ep->ep.maxpacket) != 0)
zlp = 0;
else
zlp = 1;
} else
zlp = 0;
/* a request already were transmitted completely */
if ((ep->tx_req->req.length - ep->sent) <= 0) {
ep->tx_req->req.actual = (unsigned int)ep->sent;
if (((ep->tx_req->req.length - ep->sent) <= 0) && !zlp) {
done(ep, ep->tx_req, 0);
ep->tx_req = NULL;
ep->last = 0;
......@@ -1191,6 +1206,7 @@ static int qe_usb_senddata(struct qe_ep *ep, struct qe_frame *frame)
buf = (u8 *)ep->tx_req->req.buf + ep->sent;
if (buf && size) {
ep->last = size;
ep->tx_req->req.actual += size;
frame_set_data(frame, buf);
frame_set_length(frame, size);
frame_set_status(frame, FRAME_OK);
......
......@@ -386,8 +386,10 @@ ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
/* halt any endpoint by doing a "wrong direction" i/o call */
if (usb_endpoint_dir_in(&data->desc)) {
if (usb_endpoint_xfer_isoc(&data->desc))
if (usb_endpoint_xfer_isoc(&data->desc)) {
mutex_unlock(&data->lock);
return -EINVAL;
}
DBG (data->dev, "%s halt\n", data->name);
spin_lock_irq (&data->dev->lock);
if (likely (data->ep != NULL))
......
......@@ -1608,7 +1608,7 @@ static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
return -EINVAL;
if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
return -ESHUTDOWN;
spin_lock_irqsave(&ep->dev->lock, iflags);
spin_lock_irqsave(&dev->lock, iflags);
/* map the buffer for dma */
if (usbreq->length &&
((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
......@@ -1625,8 +1625,10 @@ static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
DMA_FROM_DEVICE);
} else {
req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
if (!req->buf)
return -ENOMEM;
if (!req->buf) {
retval = -ENOMEM;
goto probe_end;
}
if (ep->in) {
memcpy(req->buf, usbreq->buf, usbreq->length);
req->dma = dma_map_single(&dev->pdev->dev,
......
......@@ -1083,7 +1083,9 @@ static void irq_device_state(struct r8a66597 *r8a66597)
if (dvsq == DS_DFLT) {
/* bus reset */
spin_unlock(&r8a66597->lock);
r8a66597->driver->disconnect(&r8a66597->gadget);
spin_lock(&r8a66597->lock);
r8a66597_update_usb_speed(r8a66597);
}
if (r8a66597->old_dvsq == DS_CNFG && dvsq != DS_CNFG)
......
......@@ -1247,24 +1247,27 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
static void scan_async (struct ehci_hcd *ehci)
{
bool stopped;
struct ehci_qh *qh;
enum ehci_timer_action action = TIMER_IO_WATCHDOG;
ehci->stamp = ehci_readl(ehci, &ehci->regs->frame_index);
timer_action_done (ehci, TIMER_ASYNC_SHRINK);
rescan:
stopped = !HC_IS_RUNNING(ehci_to_hcd(ehci)->state);
qh = ehci->async->qh_next.qh;
if (likely (qh != NULL)) {
do {
/* clean any finished work for this qh */
if (!list_empty (&qh->qtd_list)
&& qh->stamp != ehci->stamp) {
if (!list_empty(&qh->qtd_list) && (stopped ||
qh->stamp != ehci->stamp)) {
int temp;
/* unlinks could happen here; completion
* reporting drops the lock. rescan using
* the latest schedule, but don't rescan
* qhs we already finished (no looping).
* qhs we already finished (no looping)
* unless the controller is stopped.
*/
qh = qh_get (qh);
qh->stamp = ehci->stamp;
......@@ -1285,9 +1288,9 @@ static void scan_async (struct ehci_hcd *ehci)
*/
if (list_empty(&qh->qtd_list)
&& qh->qh_state == QH_STATE_LINKED) {
if (!ehci->reclaim
&& ((ehci->stamp - qh->stamp) & 0x1fff)
>= (EHCI_SHRINK_FRAMES * 8))
if (!ehci->reclaim && (stopped ||
((ehci->stamp - qh->stamp) & 0x1fff)
>= EHCI_SHRINK_FRAMES * 8))
start_unlink_async(ehci, qh);
else
action = TIMER_ASYNC_SHRINK;
......
......@@ -295,7 +295,7 @@ static void alloc_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
}
dev_err(hcd->self.controller,
"%s: Can not allocate %lu bytes of memory\n"
"%s: Cannot allocate %zu bytes of memory\n"
"Current memory map:\n",
__func__, qtd->length);
for (i = 0; i < BLOCKS; i++) {
......
......@@ -33,7 +33,7 @@
#ifdef __LITTLE_ENDIAN
#define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C)
#elif __BIG_ENDIAN
#elif defined(__BIG_ENDIAN)
#define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C | \
USBH_ENABLE_BE)
#else
......
......@@ -84,65 +84,92 @@ int usb_amd_find_chipset_info(void)
{
u8 rev = 0;
unsigned long flags;
struct amd_chipset_info info;
int ret;
spin_lock_irqsave(&amd_lock, flags);
amd_chipset.probe_count++;
/* probe only once */
if (amd_chipset.probe_count > 1) {
if (amd_chipset.probe_count > 0) {
amd_chipset.probe_count++;
spin_unlock_irqrestore(&amd_lock, flags);
return amd_chipset.probe_result;
}
memset(&info, 0, sizeof(info));
spin_unlock_irqrestore(&amd_lock, flags);
amd_chipset.smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
if (amd_chipset.smbus_dev) {
rev = amd_chipset.smbus_dev->revision;
info.smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
if (info.smbus_dev) {
rev = info.smbus_dev->revision;
if (rev >= 0x40)
amd_chipset.sb_type = 1;
info.sb_type = 1;
else if (rev >= 0x30 && rev <= 0x3b)
amd_chipset.sb_type = 3;
info.sb_type = 3;
} else {
amd_chipset.smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
0x780b, NULL);
if (!amd_chipset.smbus_dev) {
spin_unlock_irqrestore(&amd_lock, flags);
return 0;
info.smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
0x780b, NULL);
if (!info.smbus_dev) {
ret = 0;
goto commit;
}
rev = amd_chipset.smbus_dev->revision;
rev = info.smbus_dev->revision;
if (rev >= 0x11 && rev <= 0x18)
amd_chipset.sb_type = 2;
info.sb_type = 2;
}
if (amd_chipset.sb_type == 0) {
if (amd_chipset.smbus_dev) {
pci_dev_put(amd_chipset.smbus_dev);
amd_chipset.smbus_dev = NULL;
if (info.sb_type == 0) {
if (info.smbus_dev) {
pci_dev_put(info.smbus_dev);
info.smbus_dev = NULL;
}
spin_unlock_irqrestore(&amd_lock, flags);
return 0;
ret = 0;
goto commit;
}
amd_chipset.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL);
if (amd_chipset.nb_dev) {
amd_chipset.nb_type = 1;
info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL);
if (info.nb_dev) {
info.nb_type = 1;
} else {
amd_chipset.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
0x1510, NULL);
if (amd_chipset.nb_dev) {
amd_chipset.nb_type = 2;
} else {
amd_chipset.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
0x9600, NULL);
if (amd_chipset.nb_dev)
amd_chipset.nb_type = 3;
info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
if (info.nb_dev) {
info.nb_type = 2;
} else {
info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
0x9600, NULL);
if (info.nb_dev)
info.nb_type = 3;
}
}
amd_chipset.probe_result = 1;
ret = info.probe_result = 1;
printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
spin_unlock_irqrestore(&amd_lock, flags);
return amd_chipset.probe_result;
commit:
spin_lock_irqsave(&amd_lock, flags);
if (amd_chipset.probe_count > 0) {
/* race - someone else was faster - drop devices */
/* Mark that we where here */
amd_chipset.probe_count++;
ret = amd_chipset.probe_result;
spin_unlock_irqrestore(&amd_lock, flags);
if (info.nb_dev)
pci_dev_put(info.nb_dev);
if (info.smbus_dev)
pci_dev_put(info.smbus_dev);
} else {
/* no race - commit the result */
info.probe_count++;
amd_chipset = info;
spin_unlock_irqrestore(&amd_lock, flags);
}
return ret;
}
EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
......@@ -284,6 +311,7 @@ EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable);
void usb_amd_dev_put(void)
{
struct pci_dev *nb, *smbus;
unsigned long flags;
spin_lock_irqsave(&amd_lock, flags);
......@@ -294,20 +322,23 @@ void usb_amd_dev_put(void)
return;
}
if (amd_chipset.nb_dev) {
pci_dev_put(amd_chipset.nb_dev);
amd_chipset.nb_dev = NULL;
}
if (amd_chipset.smbus_dev) {
pci_dev_put(amd_chipset.smbus_dev);
amd_chipset.smbus_dev = NULL;
}
/* save them to pci_dev_put outside of spinlock */
nb = amd_chipset.nb_dev;
smbus = amd_chipset.smbus_dev;
amd_chipset.nb_dev = NULL;
amd_chipset.smbus_dev = NULL;
amd_chipset.nb_type = 0;
amd_chipset.sb_type = 0;
amd_chipset.isoc_reqs = 0;
amd_chipset.probe_result = 0;
spin_unlock_irqrestore(&amd_lock, flags);
if (nb)
pci_dev_put(nb);
if (smbus)
pci_dev_put(smbus);
}
EXPORT_SYMBOL_GPL(usb_amd_dev_put);
......
......@@ -846,7 +846,7 @@ static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
* Skip ports that don't have known speeds, or have duplicate
* Extended Capabilities port speed entries.
*/
if (port_speed == 0 || port_speed == -1)
if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
continue;
/*
......@@ -974,6 +974,47 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
return 0;
}
/*
* Convert interval expressed as 2^(bInterval - 1) == interval into
* straight exponent value 2^n == interval.
*
*/
static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
unsigned int interval;
interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
if (interval != ep->desc.bInterval - 1)
dev_warn(&udev->dev,
"ep %#x - rounding interval to %d microframes\n",
ep->desc.bEndpointAddress,
1 << interval);
return interval;
}
/*
* Convert bInterval expressed in frames (in 1-255 range) to exponent of
* microframes, rounded down to nearest power of 2.
*/
static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
unsigned int interval;
interval = fls(8 * ep->desc.bInterval) - 1;
interval = clamp_val(interval, 3, 10);
if ((1 << interval) != 8 * ep->desc.bInterval)
dev_warn(&udev->dev,
"ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
ep->desc.bEndpointAddress,
1 << interval,
8 * ep->desc.bInterval);
return interval;
}
/* Return the polling or NAK interval.
*
* The polling interval is expressed in "microframes". If xHCI's Interval field
......@@ -982,7 +1023,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
* The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
* is set to 0.
*/
static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
unsigned int interval = 0;
......@@ -991,45 +1032,38 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
case USB_SPEED_HIGH:
/* Max NAK rate */
if (usb_endpoint_xfer_control(&ep->desc) ||
usb_endpoint_xfer_bulk(&ep->desc))
usb_endpoint_xfer_bulk(&ep->desc)) {
interval = ep->desc.bInterval;
break;
}
/* Fall through - SS and HS isoc/int have same decoding */
case USB_SPEED_SUPER:
if (usb_endpoint_xfer_int(&ep->desc) ||
usb_endpoint_xfer_isoc(&ep->desc)) {
if (ep->desc.bInterval == 0)
interval = 0;
else
interval = ep->desc.bInterval - 1;
if (interval > 15)
interval = 15;
if (interval != ep->desc.bInterval + 1)
dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
ep->desc.bEndpointAddress, 1 << interval);
usb_endpoint_xfer_isoc(&ep->desc)) {
interval = xhci_parse_exponent_interval(udev, ep);
}
break;
/* Convert bInterval (in 1-255 frames) to microframes and round down to
* nearest power of 2.
*/
case USB_SPEED_FULL:
if (usb_endpoint_xfer_int(&ep->desc)) {
interval = xhci_parse_exponent_interval(udev, ep);
break;
}
/*
* Fall through for isochronous endpoint interval decoding
* since it uses the same rules as low speed interrupt
* endpoints.
*/
case USB_SPEED_LOW:
if (usb_endpoint_xfer_int(&ep->desc) ||
usb_endpoint_xfer_isoc(&ep->desc)) {
interval = fls(8*ep->desc.bInterval) - 1;
if (interval > 10)
interval = 10;
if (interval < 3)
interval = 3;
if ((1 << interval) != 8*ep->desc.bInterval)
dev_warn(&udev->dev,
"ep %#x - rounding interval"
" to %d microframes, "
"ep desc says %d microframes\n",
ep->desc.bEndpointAddress,
1 << interval,
8*ep->desc.bInterval);
usb_endpoint_xfer_isoc(&ep->desc)) {
interval = xhci_parse_frame_interval(udev, ep);
}
break;
default:
BUG();
}
......@@ -1041,7 +1075,7 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
* transaction opportunities per microframe", but that goes in the Max Burst
* endpoint context field.
*/
static inline u32 xhci_get_endpoint_mult(struct usb_device *udev,
static u32 xhci_get_endpoint_mult(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
if (udev->speed != USB_SPEED_SUPER ||
......@@ -1050,7 +1084,7 @@ static inline u32 xhci_get_endpoint_mult(struct usb_device *udev,
return ep->ss_ep_comp.bmAttributes;
}
static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
static u32 xhci_get_endpoint_type(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
int in;
......@@ -1084,7 +1118,7 @@ static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
* Basically, this is the maxpacket size, multiplied by the burst size
* and mult size.
*/
static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
struct usb_device *udev,
struct usb_host_endpoint *ep)
{
......@@ -1727,12 +1761,12 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
* found a similar duplicate.
*/
if (xhci->port_array[i] != major_revision &&
xhci->port_array[i] != (u8) -1) {
xhci->port_array[i] != DUPLICATE_ENTRY) {
if (xhci->port_array[i] == 0x03)
xhci->num_usb3_ports--;
else
xhci->num_usb2_ports--;
xhci->port_array[i] = (u8) -1;
xhci->port_array[i] = DUPLICATE_ENTRY;
}
/* FIXME: Should we disable the port? */
continue;
......@@ -1831,7 +1865,7 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
for (i = 0; i < num_ports; i++) {
if (xhci->port_array[i] == 0x03 ||
xhci->port_array[i] == 0 ||
xhci->port_array[i] == -1)
xhci->port_array[i] == DUPLICATE_ENTRY)
continue;
xhci->usb2_ports[port_index] =
......
......@@ -114,6 +114,10 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
if (pdev->vendor == PCI_VENDOR_ID_NEC)
xhci->quirks |= XHCI_NEC_HOST;
/* AMD PLL quirk */
if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
xhci->quirks |= XHCI_AMD_PLL_FIX;
/* Make sure the HC is halted. */
retval = xhci_halt(xhci);
if (retval)
......
......@@ -93,7 +93,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
/* Does this link TRB point to the first segment in a ring,
* or was the previous TRB the last TRB on the last segment in the ERST?
*/
static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
struct xhci_segment *seg, union xhci_trb *trb)
{
if (ring == xhci->event_ring)
......@@ -107,7 +107,7 @@ static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring
* segment? I.e. would the updated event TRB pointer step off the end of the
* event seg?
*/
static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
struct xhci_segment *seg, union xhci_trb *trb)
{
if (ring == xhci->event_ring)
......@@ -116,7 +116,7 @@ static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK);
}
static inline int enqueue_is_link_trb(struct xhci_ring *ring)
static int enqueue_is_link_trb(struct xhci_ring *ring)
{
struct xhci_link_trb *link = &ring->enqueue->link;
return ((link->control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK));
......@@ -592,7 +592,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
ep->ep_state |= SET_DEQ_PENDING;
}
static inline void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
struct xhci_virt_ep *ep)
{
ep->ep_state &= ~EP_HALT_PENDING;
......@@ -619,6 +619,13 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
/* Only giveback urb when this is the last td in urb */
if (urb_priv->td_cnt == urb_priv->length) {
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
if (xhci->quirks & XHCI_AMD_PLL_FIX)
usb_amd_quirk_pll_enable();
}
}
usb_hcd_unlink_urb_from_ep(hcd, urb);
xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb);
......@@ -1209,7 +1216,7 @@ static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
* Skip ports that don't have known speeds, or have duplicate
* Extended Capabilities port speed entries.
*/
if (port_speed == 0 || port_speed == -1)
if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
continue;
/*
......@@ -1235,6 +1242,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
u8 major_revision;
struct xhci_bus_state *bus_state;
u32 __iomem **port_array;
bool bogus_port_status = false;
/* Port status change events always have a successful completion code */
if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) {
......@@ -1247,6 +1255,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
if ((port_id <= 0) || (port_id > max_ports)) {
xhci_warn(xhci, "Invalid port id %d\n", port_id);
bogus_port_status = true;
goto cleanup;
}
......@@ -1258,12 +1267,14 @@ static void handle_port_status(struct xhci_hcd *xhci,
xhci_warn(xhci, "Event for port %u not in "
"Extended Capabilities, ignoring.\n",
port_id);
bogus_port_status = true;
goto cleanup;
}
if (major_revision == (u8) -1) {
if (major_revision == DUPLICATE_ENTRY) {
xhci_warn(xhci, "Event for port %u duplicated in"
"Extended Capabilities, ignoring.\n",
port_id);
bogus_port_status = true;
goto cleanup;
}
......@@ -1335,6 +1346,13 @@ static void handle_port_status(struct xhci_hcd *xhci,
/* Update event ring dequeue pointer before dropping the lock */
inc_deq(xhci, xhci->event_ring, true);
/* Don't make the USB core poll the roothub if we got a bad port status
* change event. Besides, at that point we can't tell which roothub
* (USB 2.0 or USB 3.0) to kick.
*/
if (bogus_port_status)
return;
spin_unlock(&xhci->lock);
/* Pass this up to the core */
usb_hcd_poll_rh_status(hcd);
......@@ -1554,8 +1572,17 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
urb_priv->td_cnt++;
/* Giveback the urb when all the tds are completed */
if (urb_priv->td_cnt == urb_priv->length)
if (urb_priv->td_cnt == urb_priv->length) {
ret = 1;
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs
== 0) {
if (xhci->quirks & XHCI_AMD_PLL_FIX)
usb_amd_quirk_pll_enable();
}
}
}
}
return ret;
......@@ -1675,71 +1702,52 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
struct urb_priv *urb_priv;
int idx;
int len = 0;
int skip_td = 0;
union xhci_trb *cur_trb;
struct xhci_segment *cur_seg;
struct usb_iso_packet_descriptor *frame;
u32 trb_comp_code;
bool skip_td = false;
ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
trb_comp_code = GET_COMP_CODE(event->transfer_len);
urb_priv = td->urb->hcpriv;
idx = urb_priv->td_cnt;
frame = &td->urb->iso_frame_desc[idx];
if (ep->skip) {
/* The transfer is partly done */
*status = -EXDEV;
td->urb->iso_frame_desc[idx].status = -EXDEV;
} else {
/* handle completion code */
switch (trb_comp_code) {
case COMP_SUCCESS:
td->urb->iso_frame_desc[idx].status = 0;
xhci_dbg(xhci, "Successful isoc transfer!\n");
break;
case COMP_SHORT_TX:
if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
td->urb->iso_frame_desc[idx].status =
-EREMOTEIO;
else
td->urb->iso_frame_desc[idx].status = 0;
break;
case COMP_BW_OVER:
td->urb->iso_frame_desc[idx].status = -ECOMM;
skip_td = 1;
break;
case COMP_BUFF_OVER:
case COMP_BABBLE:
td->urb->iso_frame_desc[idx].status = -EOVERFLOW;
skip_td = 1;
break;
case COMP_STALL:
td->urb->iso_frame_desc[idx].status = -EPROTO;
skip_td = 1;
break;
case COMP_STOP:
case COMP_STOP_INVAL:
break;
default:
td->urb->iso_frame_desc[idx].status = -1;
break;
}
}
/* calc actual length */
if (ep->skip) {
td->urb->iso_frame_desc[idx].actual_length = 0;
/* Update ring dequeue pointer */
while (ep_ring->dequeue != td->last_trb)
inc_deq(xhci, ep_ring, false);
inc_deq(xhci, ep_ring, false);
return finish_td(xhci, td, event_trb, event, ep, status, true);
/* handle completion code */
switch (trb_comp_code) {
case COMP_SUCCESS:
frame->status = 0;
xhci_dbg(xhci, "Successful isoc transfer!\n");
break;
case COMP_SHORT_TX:
frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
-EREMOTEIO : 0;
break;
case COMP_BW_OVER:
frame->status = -ECOMM;
skip_td = true;
break;
case COMP_BUFF_OVER:
case COMP_BABBLE:
frame->status = -EOVERFLOW;
skip_td = true;
break;
case COMP_STALL:
frame->status = -EPROTO;
skip_td = true;
break;
case COMP_STOP:
case COMP_STOP_INVAL:
break;
default:
frame->status = -1;
break;
}
if (trb_comp_code == COMP_SUCCESS || skip_td == 1) {
td->urb->iso_frame_desc[idx].actual_length =
td->urb->iso_frame_desc[idx].length;
td->urb->actual_length +=
td->urb->iso_frame_desc[idx].length;
if (trb_comp_code == COMP_SUCCESS || skip_td) {
frame->actual_length = frame->length;
td->urb->actual_length += frame->length;
} else {
for (cur_trb = ep_ring->dequeue,
cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
......@@ -1755,7 +1763,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
TRB_LEN(event->transfer_len);
if (trb_comp_code != COMP_STOP_INVAL) {
td->urb->iso_frame_desc[idx].actual_length = len;
frame->actual_length = len;
td->urb->actual_length += len;
}
}
......@@ -1766,6 +1774,35 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
return finish_td(xhci, td, event_trb, event, ep, status, false);
}
static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
struct xhci_transfer_event *event,
struct xhci_virt_ep *ep, int *status)
{
struct xhci_ring *ep_ring;
struct urb_priv *urb_priv;
struct usb_iso_packet_descriptor *frame;
int idx;
ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
urb_priv = td->urb->hcpriv;
idx = urb_priv->td_cnt;
frame = &td->urb->iso_frame_desc[idx];
/* The transfer is partly done */
*status = -EXDEV;
frame->status = -EXDEV;
/* calc actual length */
frame->actual_length = 0;
/* Update ring dequeue pointer */
while (ep_ring->dequeue != td->last_trb)
inc_deq(xhci, ep_ring, false);
inc_deq(xhci, ep_ring, false);
return finish_td(xhci, td, NULL, event, ep, status, true);
}
/*
* Process bulk and interrupt tds, update urb status and actual_length.
*/
......@@ -2024,36 +2061,42 @@ static int handle_tx_event(struct xhci_hcd *xhci,
}
td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
/* Is this a TRB in the currently executing TD? */
event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
td->last_trb, event_dma);
if (event_seg && ep->skip) {
if (!event_seg) {
if (!ep->skip ||
!usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
/* HC is busted, give up! */
xhci_err(xhci,
"ERROR Transfer event TRB DMA ptr not "
"part of current TD\n");
return -ESHUTDOWN;
}
ret = skip_isoc_td(xhci, td, event, ep, &status);
goto cleanup;
}
if (ep->skip) {
xhci_dbg(xhci, "Found td. Clear skip flag.\n");
ep->skip = false;
}
if (!event_seg &&
(!ep->skip || !usb_endpoint_xfer_isoc(&td->urb->ep->desc))) {
/* HC is busted, give up! */
xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not "
"part of current TD\n");
return -ESHUTDOWN;
}
if (event_seg) {
event_trb = &event_seg->trbs[(event_dma -
event_seg->dma) / sizeof(*event_trb)];
/*
* No-op TRB should not trigger interrupts.
* If event_trb is a no-op TRB, it means the
* corresponding TD has been cancelled. Just ignore
* the TD.
*/
if ((event_trb->generic.field[3] & TRB_TYPE_BITMASK)
== TRB_TYPE(TRB_TR_NOOP)) {
xhci_dbg(xhci, "event_trb is a no-op TRB. "
"Skip it\n");
goto cleanup;
}
event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
sizeof(*event_trb)];
/*
* No-op TRB should not trigger interrupts.
* If event_trb is a no-op TRB, it means the
* corresponding TD has been cancelled. Just ignore
* the TD.
*/
if ((event_trb->generic.field[3] & TRB_TYPE_BITMASK)
== TRB_TYPE(TRB_TR_NOOP)) {
xhci_dbg(xhci,
"event_trb is a no-op TRB. Skip it\n");
goto cleanup;
}
/* Now update the urb's actual_length and give back to
......@@ -3126,6 +3169,12 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
}
if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
if (xhci->quirks & XHCI_AMD_PLL_FIX)
usb_amd_quirk_pll_disable();
}
xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
start_cycle, start_trb);
return 0;
......
......@@ -550,6 +550,9 @@ void xhci_stop(struct usb_hcd *hcd)
del_timer_sync(&xhci->event_ring_timer);
#endif
if (xhci->quirks & XHCI_AMD_PLL_FIX)
usb_amd_dev_put();
xhci_dbg(xhci, "// Disabling event ring interrupts\n");
temp = xhci_readl(xhci, &xhci->op_regs->status);
xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
......@@ -771,7 +774,9 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
/* If restore operation fails, re-initialize the HC during resume */
if ((temp & STS_SRE) || hibernated) {
usb_root_hub_lost_power(hcd->self.root_hub);
/* Let the USB core know _both_ roothubs lost power. */
usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
xhci_dbg(xhci, "Stop HCD\n");
xhci_halt(xhci);
......@@ -2386,10 +2391,18 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
/* Everything but endpoint 0 is disabled, so free or cache the rings. */
last_freed_endpoint = 1;
for (i = 1; i < 31; ++i) {
if (!virt_dev->eps[i].ring)
continue;
xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
last_freed_endpoint = i;
struct xhci_virt_ep *ep = &virt_dev->eps[i];
if (ep->ep_state & EP_HAS_STREAMS) {
xhci_free_stream_info(xhci, ep->stream_info);
ep->stream_info = NULL;
ep->ep_state &= ~EP_HAS_STREAMS;
}
if (ep->ring) {
xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
last_freed_endpoint = i;
}
}
xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
......
......@@ -30,6 +30,7 @@
/* Code sharing between pci-quirks and xhci hcd */
#include "xhci-ext-caps.h"
#include "pci-quirks.h"
/* xHCI PCI Configuration Registers */
#define XHCI_SBRN_OFFSET (0x60)
......@@ -232,7 +233,7 @@ struct xhci_op_regs {
* notification type that matches a bit set in this bit field.
*/
#define DEV_NOTE_MASK (0xffff)
#define ENABLE_DEV_NOTE(x) (1 << x)
#define ENABLE_DEV_NOTE(x) (1 << (x))
/* Most of the device notification types should only be used for debug.
* SW does need to pay attention to function wake notifications.
*/
......@@ -348,6 +349,9 @@ struct xhci_op_regs {
/* Initiate a warm port reset - complete when PORT_WRC is '1' */
#define PORT_WR (1 << 31)
/* We mark duplicate entries with -1 */
#define DUPLICATE_ENTRY ((u8)(-1))
/* Port Power Management Status and Control - port_power_base bitmasks */
/* Inactivity timer value for transitions into U1, in microseconds.
* Timeout can be up to 127us. 0xFF means an infinite timeout.
......@@ -601,11 +605,11 @@ struct xhci_ep_ctx {
#define EP_STATE_STOPPED 3
#define EP_STATE_ERROR 4
/* Mult - Max number of burtst within an interval, in EP companion desc. */
#define EP_MULT(p) ((p & 0x3) << 8)
#define EP_MULT(p) (((p) & 0x3) << 8)
/* bits 10:14 are Max Primary Streams */
/* bit 15 is Linear Stream Array */
/* Interval - period between requests to an endpoint - 125u increments. */
#define EP_INTERVAL(p) ((p & 0xff) << 16)
#define EP_INTERVAL(p) (((p) & 0xff) << 16)
#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff))
#define EP_MAXPSTREAMS_MASK (0x1f << 10)
#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK)
......@@ -1276,6 +1280,7 @@ struct xhci_hcd {
#define XHCI_LINK_TRB_QUIRK (1 << 0)
#define XHCI_RESET_EP_QUIRK (1 << 1)
#define XHCI_NEC_HOST (1 << 2)
#define XHCI_AMD_PLL_FIX (1 << 3)
/* There are two roothubs to keep track of bus suspend info for */
struct xhci_bus_state bus_state[2];
/* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */
......
......@@ -14,7 +14,7 @@ config USB_MUSB_HDRC
select TWL4030_USB if MACH_OMAP_3430SDP
select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA
select USB_OTG_UTILS
tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
bool 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
help
Say Y here if your system has a dual role high speed USB
controller based on the Mentor Graphics silicon IP. Then
......@@ -30,8 +30,8 @@ config USB_MUSB_HDRC
If you do not know what this is, please say N.
To compile this driver as a module, choose M here; the
module will be called "musb-hdrc".
# To compile this driver as a module, choose M here; the
# module will be called "musb-hdrc".
choice
prompt "Platform Glue Layer"
......
......@@ -21,6 +21,7 @@
#include <asm/cacheflush.h>
#include "musb_core.h"
#include "musbhsdma.h"
#include "blackfin.h"
struct bfin_glue {
......@@ -332,6 +333,27 @@ static int bfin_musb_set_mode(struct musb *musb, u8 musb_mode)
return -EIO;
}
static int bfin_musb_adjust_channel_params(struct dma_channel *channel,
u16 packet_sz, u8 *mode,
dma_addr_t *dma_addr, u32 *len)
{
struct musb_dma_channel *musb_channel = channel->private_data;
/*
* Anomaly 05000450 might cause data corruption when using DMA
* MODE 1 transmits with short packet. So to work around this,
* we truncate all MODE 1 transfers down to a multiple of the
* max packet size, and then do the last short packet transfer
* (if there is any) using MODE 0.
*/
if (ANOMALY_05000450) {
if (musb_channel->transmit && *mode == 1)
*len = *len - (*len % packet_sz);
}
return 0;
}
static void bfin_musb_reg_init(struct musb *musb)
{
if (ANOMALY_05000346) {
......@@ -430,6 +452,8 @@ static const struct musb_platform_ops bfin_ops = {
.vbus_status = bfin_musb_vbus_status,
.set_vbus = bfin_musb_set_vbus,
.adjust_channel_params = bfin_musb_adjust_channel_params,
};
static u64 bfin_dmamask = DMA_BIT_MASK(32);
......
......@@ -597,12 +597,12 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
length = min(n_bds * maxpacket, length);
}
DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n",
DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%llx len %u\n",
tx->index,
maxpacket,
rndis ? "rndis" : "transparent",
n_bds,
addr, length);
(unsigned long long)addr, length);
cppi_rndis_update(tx, 0, musb->ctrl_base, rndis);
......@@ -820,7 +820,7 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
length = min(n_bds * maxpacket, length);
DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
"dma 0x%x len %u %u/%u\n",
"dma 0x%llx len %u %u/%u\n",
rx->index, maxpacket,
onepacket
? (is_rndis ? "rndis" : "onepacket")
......@@ -829,7 +829,8 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
musb_readl(tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
& 0xffff,
addr, length, rx->channel.actual_len, rx->buf_len);
(unsigned long long)addr, length,
rx->channel.actual_len, rx->buf_len);
/* only queue one segment at a time, since the hardware prevents
* correct queue shutdown after unexpected short packets
......@@ -1039,9 +1040,9 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
if (!completed && (bd->hw_options & CPPI_OWN_SET))
break;
DBG(5, "C/RXBD %08x: nxt %08x buf %08x "
DBG(5, "C/RXBD %llx: nxt %08x buf %08x "
"off.len %08x opt.len %08x (%d)\n",
bd->dma, bd->hw_next, bd->hw_bufp,
(unsigned long long)bd->dma, bd->hw_next, bd->hw_bufp,
bd->hw_off_len, bd->hw_options,
rx->channel.actual_len);
......@@ -1111,11 +1112,12 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
musb_ep_select(cppi->mregs, rx->index + 1);
csr = musb_readw(regs, MUSB_RXCSR);
if (csr & MUSB_RXCSR_DMAENAB) {
DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n",
DBG(4, "list%d %p/%p, last %llx%s, csr %04x\n",
rx->index,
rx->head, rx->tail,
rx->last_processed
? rx->last_processed->dma
? (unsigned long long)
rx->last_processed->dma
: 0,
completed ? ", completed" : "",
csr);
......@@ -1167,8 +1169,11 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
if (!tx && !rx)
if (!tx && !rx) {
if (cppi->irq)
spin_unlock_irqrestore(&musb->lock, flags);
return IRQ_NONE;
}
DBG(4, "CPPI IRQ Tx%x Rx%x\n", tx, rx);
......@@ -1199,7 +1204,7 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
*/
if (NULL == bd) {
DBG(1, "null BD\n");
tx_ram->tx_complete = 0;
musb_writel(&tx_ram->tx_complete, 0, 0);
continue;
}
......@@ -1452,7 +1457,7 @@ static int cppi_channel_abort(struct dma_channel *channel)
* compare mode by writing 1 to the tx_complete register.
*/
cppi_reset_tx(tx_ram, 1);
cppi_ch->head = 0;
cppi_ch->head = NULL;
musb_writel(&tx_ram->tx_complete, 0, 1);
cppi_dump_tx(5, cppi_ch, " (done teardown)");
......
......@@ -1030,6 +1030,7 @@ static void musb_shutdown(struct platform_device *pdev)
struct musb *musb = dev_to_musb(&pdev->dev);
unsigned long flags;
pm_runtime_get_sync(musb->controller);
spin_lock_irqsave(&musb->lock, flags);
musb_platform_disable(musb);
musb_generic_disable(musb);
......@@ -1040,6 +1041,7 @@ static void musb_shutdown(struct platform_device *pdev)
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
musb_platform_exit(musb);
pm_runtime_put(musb->controller);
/* FIXME power down */
}
......
......@@ -261,6 +261,7 @@ enum musb_g_ep0_state {
* @try_ilde: tries to idle the IP
* @vbus_status: returns vbus status if possible
* @set_vbus: forces vbus status
* @channel_program: pre check for standard dma channel_program func
*/
struct musb_platform_ops {
int (*init)(struct musb *musb);
......@@ -274,6 +275,10 @@ struct musb_platform_ops {
int (*vbus_status)(struct musb *musb);
void (*set_vbus)(struct musb *musb, int on);
int (*adjust_channel_params)(struct dma_channel *channel,
u16 packet_sz, u8 *mode,
dma_addr_t *dma_addr, u32 *len);
};
/*
......
......@@ -535,7 +535,7 @@ void musb_g_tx(struct musb *musb, u8 epnum)
is_dma = 1;
csr |= MUSB_TXCSR_P_WZC_BITS;
csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
MUSB_TXCSR_TXPKTRDY);
MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
musb_writew(epio, MUSB_TXCSR, csr);
/* Ensure writebuffer is empty. */
csr = musb_readw(epio, MUSB_TXCSR);
......@@ -1296,7 +1296,7 @@ static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
}
/* if the hardware doesn't have the request, easy ... */
if (musb_ep->req_list.next != &request->list || musb_ep->busy)
if (musb_ep->req_list.next != &req->list || musb_ep->busy)
musb_g_giveback(musb_ep, request, -ECONNRESET);
/* ... else abort the dma transfer ... */
......
......@@ -169,6 +169,14 @@ static int dma_channel_program(struct dma_channel *channel,
BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
channel->status == MUSB_DMA_STATUS_BUSY);
/* Let targets check/tweak the arguments */
if (musb->ops->adjust_channel_params) {
int ret = musb->ops->adjust_channel_params(channel,
packet_sz, &mode, &dma_addr, &len);
if (ret)
return ret;
}
/*
* The DMA engine in RTL1.8 and above cannot handle
* DMA addresses that are not aligned to a 4 byte boundary.
......
......@@ -259,9 +259,10 @@ static int musb_otg_notifications(struct notifier_block *nb,
case USB_EVENT_VBUS:
DBG(4, "VBUS Connect\n");
#ifdef CONFIG_USB_GADGET_MUSB_HDRC
if (musb->gadget_driver)
pm_runtime_get_sync(musb->controller);
#endif
otg_init(musb->xceiv);
break;
......
......@@ -93,6 +93,8 @@ static int __init ux500_probe(struct platform_device *pdev)
}
musb->dev.parent = &pdev->dev;
musb->dev.dma_mask = pdev->dev.dma_mask;
musb->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask;
glue->dev = &pdev->dev;
glue->musb = musb;
......
......@@ -151,6 +151,8 @@ static struct ftdi_sio_quirk ftdi_stmclite_quirk = {
* /sys/bus/usb/ftdi_sio/new_id, then send patch/report!
*/
static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
......@@ -525,6 +527,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_8_PID) },
{ USB_DEVICE(IDTECH_VID, IDTECH_IDT1221U_PID) },
{ USB_DEVICE(OCT_VID, OCT_US101_PID) },
{ USB_DEVICE(OCT_VID, OCT_DK201_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_HE_TIRA1_PID),
.driver_info = (kernel_ulong_t)&ftdi_HE_TIRA1_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_USB_UIRT_PID),
......@@ -787,6 +790,8 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) },
{ USB_DEVICE(FTDI_VID, HAMEG_HO720_PID) },
{ USB_DEVICE(FTDI_VID, HAMEG_HO730_PID) },
{ USB_DEVICE(FTDI_VID, HAMEG_HO870_PID) },
{ USB_DEVICE(FTDI_VID, MJSG_GENERIC_PID) },
{ USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) },
......
......@@ -300,6 +300,8 @@
* Hameg HO820 and HO870 interface (using VID 0x0403)
*/
#define HAMEG_HO820_PID 0xed74
#define HAMEG_HO730_PID 0xed73
#define HAMEG_HO720_PID 0xed72
#define HAMEG_HO870_PID 0xed71
/*
......@@ -572,6 +574,7 @@
/* Note: OCT US101 is also rebadged as Dick Smith Electronics (NZ) XH6381 */
/* Also rebadged as Dick Smith Electronics (Aus) XH6451 */
/* Also rebadged as SIIG Inc. model US2308 hardware version 1 */
#define OCT_DK201_PID 0x0103 /* OCT DK201 USB docking station */
#define OCT_US101_PID 0x0421 /* OCT US101 USB to RS-232 */
/*
......@@ -1141,3 +1144,12 @@
#define QIHARDWARE_VID 0x20B7
#define MILKYMISTONE_JTAGSERIAL_PID 0x0713
/*
* CTI GmbH RS485 Converter http://www.cti-lean.com/
*/
/* USB-485-Mini*/
#define FTDI_CTI_MINI_PID 0xF608
/* USB-Nano-485*/
#define FTDI_CTI_NANO_PID 0xF60B
......@@ -407,6 +407,10 @@ static void option_instat_callback(struct urb *urb);
/* ONDA MT825UP HSDPA 14.2 modem */
#define ONDA_MT825UP 0x000b
/* Samsung products */
#define SAMSUNG_VENDOR_ID 0x04e8
#define SAMSUNG_PRODUCT_GT_B3730 0x6889
/* some devices interfaces need special handling due to a number of reasons */
enum option_blacklist_reason {
OPTION_BLACKLIST_NONE = 0,
......@@ -968,6 +972,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
{ USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
{ USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
{ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730/GT-B3710 LTE USB modem.*/
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
......
......@@ -111,7 +111,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
ifnum = intf->desc.bInterfaceNumber;
dbg("This Interface = %d", ifnum);
data = serial->private = kzalloc(sizeof(struct usb_wwan_intf_private),
data = kzalloc(sizeof(struct usb_wwan_intf_private),
GFP_KERNEL);
if (!data)
return -ENOMEM;
......@@ -134,8 +134,10 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
usb_endpoint_is_bulk_out(&intf->endpoint[1].desc)) {
dbg("QDL port found");
if (serial->interface->num_altsetting == 1)
return 0;
if (serial->interface->num_altsetting == 1) {
retval = 0; /* Success */
break;
}
retval = usb_set_interface(serial->dev, ifnum, 1);
if (retval < 0) {
......@@ -145,7 +147,6 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
retval = -ENODEV;
kfree(data);
}
return retval;
}
break;
......@@ -166,6 +167,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
"Could not set interface, error %d\n",
retval);
retval = -ENODEV;
kfree(data);
}
} else if (ifnum == 2) {
dbg("Modem port found");
......@@ -177,7 +179,6 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
retval = -ENODEV;
kfree(data);
}
return retval;
} else if (ifnum==3) {
/*
* NMEA (serial line 9600 8N1)
......@@ -191,6 +192,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
"Could not set interface, error %d\n",
retval);
retval = -ENODEV;
kfree(data);
}
}
break;
......@@ -199,12 +201,27 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
dev_err(&serial->dev->dev,
"unknown number of interfaces: %d\n", nintf);
kfree(data);
return -ENODEV;
retval = -ENODEV;
}
/* Set serial->private if not returning -ENODEV */
if (retval != -ENODEV)
usb_set_serial_data(serial, data);
return retval;
}
static void qc_release(struct usb_serial *serial)
{
struct usb_wwan_intf_private *priv = usb_get_serial_data(serial);
dbg("%s", __func__);
/* Call usb_wwan release & free the private data allocated in qcprobe */
usb_wwan_release(serial);
usb_set_serial_data(serial, NULL);
kfree(priv);
}
static struct usb_serial_driver qcdevice = {
.driver = {
.owner = THIS_MODULE,
......@@ -222,7 +239,7 @@ static struct usb_serial_driver qcdevice = {
.chars_in_buffer = usb_wwan_chars_in_buffer,
.attach = usb_wwan_startup,
.disconnect = usb_wwan_disconnect,
.release = usb_wwan_release,
.release = qc_release,
#ifdef CONFIG_PM
.suspend = usb_wwan_suspend,
.resume = usb_wwan_resume,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment