Commit 7de8b926 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6: (34 commits)
  USB: xhci: Stall handling bug fixes.
  USB: xhci: Support for 64-byte contexts
  USB: xhci: Always align output device contexts to 64 bytes.
  USB: xhci: Scratchpad buffer allocation
  USB: Fix parsing of SuperSpeed Endpoint Companion descriptor.
  USB: xhci: Fail gracefully if there's no SS ep companion descriptor.
  USB: xhci: Handle babble errors on transfers.
  USB: xhci: Setup HW retries correctly.
  USB: xhci: Check if the host controller died in IRQ handler.
  USB: xhci: Don't oops if the host doesn't halt.
  USB: xhci: Make debugging more verbose.
  USB: xhci: Correct Event Handler Busy flag usage.
  USB: xhci: Handle short control packets correctly.
  USB: xhci: Represent 64-bit addresses with one u64.
  USB: xhci: Use GFP_ATOMIC while holding spinlocks.
  USB: xhci: Deal with stalled endpoints.
  USB: xhci: Set TD size in transfer TRB.
  USB: xhci: fix less- and greater than confusion
  USB: usbtest: no need for USB_DEVICEFS
  USB: musb: fix CONFIGDATA register read issue
  ...
parents e043e42b c92bcfa7
...@@ -80,38 +80,18 @@ static int usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, ...@@ -80,38 +80,18 @@ static int usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
int max_tx; int max_tx;
int i; int i;
/* Allocate space for the SS endpoint companion descriptor */
ep->ss_ep_comp = kzalloc(sizeof(struct usb_host_ss_ep_comp),
GFP_KERNEL);
if (!ep->ss_ep_comp)
return -ENOMEM;
desc = (struct usb_ss_ep_comp_descriptor *) buffer; desc = (struct usb_ss_ep_comp_descriptor *) buffer;
if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP) { if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP) {
dev_warn(ddev, "No SuperSpeed endpoint companion for config %d " dev_warn(ddev, "No SuperSpeed endpoint companion for config %d "
" interface %d altsetting %d ep %d: " " interface %d altsetting %d ep %d: "
"using minimum values\n", "using minimum values\n",
cfgno, inum, asnum, ep->desc.bEndpointAddress); cfgno, inum, asnum, ep->desc.bEndpointAddress);
ep->ss_ep_comp->desc.bLength = USB_DT_SS_EP_COMP_SIZE;
ep->ss_ep_comp->desc.bDescriptorType = USB_DT_SS_ENDPOINT_COMP;
ep->ss_ep_comp->desc.bMaxBurst = 0;
/*
* Leave bmAttributes as zero, which will mean no streams for
* bulk, and isoc won't support multiple bursts of packets.
* With bursts of only one packet, and a Mult of 1, the max
* amount of data moved per endpoint service interval is one
* packet.
*/
if (usb_endpoint_xfer_isoc(&ep->desc) ||
usb_endpoint_xfer_int(&ep->desc))
ep->ss_ep_comp->desc.wBytesPerInterval =
ep->desc.wMaxPacketSize;
/* /*
* The next descriptor is for an Endpoint or Interface, * The next descriptor is for an Endpoint or Interface,
* no extra descriptors to copy into the companion structure, * no extra descriptors to copy into the companion structure,
* and we didn't eat up any of the buffer. * and we didn't eat up any of the buffer.
*/ */
retval = 0; return 0;
goto valid;
} }
memcpy(&ep->ss_ep_comp->desc, desc, USB_DT_SS_EP_COMP_SIZE); memcpy(&ep->ss_ep_comp->desc, desc, USB_DT_SS_EP_COMP_SIZE);
desc = &ep->ss_ep_comp->desc; desc = &ep->ss_ep_comp->desc;
...@@ -320,6 +300,28 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, ...@@ -320,6 +300,28 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
buffer += i; buffer += i;
size -= i; size -= i;
/* Allocate space for the SS endpoint companion descriptor */
endpoint->ss_ep_comp = kzalloc(sizeof(struct usb_host_ss_ep_comp),
GFP_KERNEL);
if (!endpoint->ss_ep_comp)
return -ENOMEM;
/* Fill in some default values (may be overwritten later) */
endpoint->ss_ep_comp->desc.bLength = USB_DT_SS_EP_COMP_SIZE;
endpoint->ss_ep_comp->desc.bDescriptorType = USB_DT_SS_ENDPOINT_COMP;
endpoint->ss_ep_comp->desc.bMaxBurst = 0;
/*
* Leave bmAttributes as zero, which will mean no streams for
* bulk, and isoc won't support multiple bursts of packets.
* With bursts of only one packet, and a Mult of 1, the max
* amount of data moved per endpoint service interval is one
* packet.
*/
if (usb_endpoint_xfer_isoc(&endpoint->desc) ||
usb_endpoint_xfer_int(&endpoint->desc))
endpoint->ss_ep_comp->desc.wBytesPerInterval =
endpoint->desc.wMaxPacketSize;
if (size > 0) { if (size > 0) {
retval = usb_parse_ss_endpoint_companion(ddev, cfgno, retval = usb_parse_ss_endpoint_companion(ddev, cfgno,
inum, asnum, endpoint, num_ep, buffer, inum, asnum, endpoint, num_ep, buffer,
...@@ -329,6 +331,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, ...@@ -329,6 +331,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
retval = buffer - buffer0; retval = buffer - buffer0;
} }
} else { } else {
dev_warn(ddev, "config %d interface %d altsetting %d "
"endpoint 0x%X has no "
"SuperSpeed companion descriptor\n",
cfgno, inum, asnum, d->bEndpointAddress);
retval = buffer - buffer0; retval = buffer - buffer0;
} }
} else { } else {
......
...@@ -105,6 +105,7 @@ static int ehci_orion_setup(struct usb_hcd *hcd) ...@@ -105,6 +105,7 @@ static int ehci_orion_setup(struct usb_hcd *hcd)
struct ehci_hcd *ehci = hcd_to_ehci(hcd); struct ehci_hcd *ehci = hcd_to_ehci(hcd);
int retval; int retval;
ehci_reset(ehci);
retval = ehci_halt(ehci); retval = ehci_halt(ehci);
if (retval) if (retval)
return retval; return retval;
...@@ -118,7 +119,6 @@ static int ehci_orion_setup(struct usb_hcd *hcd) ...@@ -118,7 +119,6 @@ static int ehci_orion_setup(struct usb_hcd *hcd)
hcd->has_tt = 1; hcd->has_tt = 1;
ehci_reset(ehci);
ehci_port_power(ehci, 0); ehci_port_power(ehci, 0);
return retval; return retval;
......
...@@ -282,6 +282,7 @@ static int ohci_omap_init(struct usb_hcd *hcd) ...@@ -282,6 +282,7 @@ static int ohci_omap_init(struct usb_hcd *hcd)
static void ohci_omap_stop(struct usb_hcd *hcd) static void ohci_omap_stop(struct usb_hcd *hcd)
{ {
dev_dbg(hcd->self.controller, "stopping USB Controller\n"); dev_dbg(hcd->self.controller, "stopping USB Controller\n");
ohci_stop(hcd);
omap_ohci_clock_power(0); omap_ohci_clock_power(0);
} }
......
...@@ -173,6 +173,7 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int ...@@ -173,6 +173,7 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int
{ {
void *addr; void *addr;
u32 temp; u32 temp;
u64 temp_64;
addr = &ir_set->irq_pending; addr = &ir_set->irq_pending;
temp = xhci_readl(xhci, addr); temp = xhci_readl(xhci, addr);
...@@ -200,25 +201,15 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int ...@@ -200,25 +201,15 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int
xhci_dbg(xhci, " WARN: %p: ir_set.rsvd = 0x%x\n", xhci_dbg(xhci, " WARN: %p: ir_set.rsvd = 0x%x\n",
addr, (unsigned int)temp); addr, (unsigned int)temp);
addr = &ir_set->erst_base[0]; addr = &ir_set->erst_base;
temp = xhci_readl(xhci, addr); temp_64 = xhci_read_64(xhci, addr);
xhci_dbg(xhci, " %p: ir_set.erst_base[0] = 0x%x\n", xhci_dbg(xhci, " %p: ir_set.erst_base = @%08llx\n",
addr, (unsigned int) temp); addr, temp_64);
addr = &ir_set->erst_base[1];
temp = xhci_readl(xhci, addr);
xhci_dbg(xhci, " %p: ir_set.erst_base[1] = 0x%x\n",
addr, (unsigned int) temp);
addr = &ir_set->erst_dequeue[0]; addr = &ir_set->erst_dequeue;
temp = xhci_readl(xhci, addr); temp_64 = xhci_read_64(xhci, addr);
xhci_dbg(xhci, " %p: ir_set.erst_dequeue[0] = 0x%x\n", xhci_dbg(xhci, " %p: ir_set.erst_dequeue = @%08llx\n",
addr, (unsigned int) temp); addr, temp_64);
addr = &ir_set->erst_dequeue[1];
temp = xhci_readl(xhci, addr);
xhci_dbg(xhci, " %p: ir_set.erst_dequeue[1] = 0x%x\n",
addr, (unsigned int) temp);
} }
void xhci_print_run_regs(struct xhci_hcd *xhci) void xhci_print_run_regs(struct xhci_hcd *xhci)
...@@ -268,8 +259,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) ...@@ -268,8 +259,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
xhci_dbg(xhci, "Link TRB:\n"); xhci_dbg(xhci, "Link TRB:\n");
xhci_print_trb_offsets(xhci, trb); xhci_print_trb_offsets(xhci, trb);
address = trb->link.segment_ptr[0] + address = trb->link.segment_ptr;
(((u64) trb->link.segment_ptr[1]) << 32);
xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address); xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address);
xhci_dbg(xhci, "Interrupter target = 0x%x\n", xhci_dbg(xhci, "Interrupter target = 0x%x\n",
...@@ -282,8 +272,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) ...@@ -282,8 +272,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
(unsigned int) (trb->link.control & TRB_NO_SNOOP)); (unsigned int) (trb->link.control & TRB_NO_SNOOP));
break; break;
case TRB_TYPE(TRB_TRANSFER): case TRB_TYPE(TRB_TRANSFER):
address = trb->trans_event.buffer[0] + address = trb->trans_event.buffer;
(((u64) trb->trans_event.buffer[1]) << 32);
/* /*
* FIXME: look at flags to figure out if it's an address or if * FIXME: look at flags to figure out if it's an address or if
* the data is directly in the buffer field. * the data is directly in the buffer field.
...@@ -291,8 +280,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb) ...@@ -291,8 +280,7 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address); xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address);
break; break;
case TRB_TYPE(TRB_COMPLETION): case TRB_TYPE(TRB_COMPLETION):
address = trb->event_cmd.cmd_trb[0] + address = trb->event_cmd.cmd_trb;
(((u64) trb->event_cmd.cmd_trb[1]) << 32);
xhci_dbg(xhci, "Command TRB pointer = %llu\n", address); xhci_dbg(xhci, "Command TRB pointer = %llu\n", address);
xhci_dbg(xhci, "Completion status = %u\n", xhci_dbg(xhci, "Completion status = %u\n",
(unsigned int) GET_COMP_CODE(trb->event_cmd.status)); (unsigned int) GET_COMP_CODE(trb->event_cmd.status));
...@@ -328,8 +316,8 @@ void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg) ...@@ -328,8 +316,8 @@ void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
for (i = 0; i < TRBS_PER_SEGMENT; ++i) { for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
trb = &seg->trbs[i]; trb = &seg->trbs[i];
xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr, xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr,
(unsigned int) trb->link.segment_ptr[0], lower_32_bits(trb->link.segment_ptr),
(unsigned int) trb->link.segment_ptr[1], upper_32_bits(trb->link.segment_ptr),
(unsigned int) trb->link.intr_target, (unsigned int) trb->link.intr_target,
(unsigned int) trb->link.control); (unsigned int) trb->link.control);
addr += sizeof(*trb); addr += sizeof(*trb);
...@@ -386,8 +374,8 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) ...@@ -386,8 +374,8 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
entry = &erst->entries[i]; entry = &erst->entries[i];
xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n",
(unsigned int) addr, (unsigned int) addr,
(unsigned int) entry->seg_addr[0], lower_32_bits(entry->seg_addr),
(unsigned int) entry->seg_addr[1], upper_32_bits(entry->seg_addr),
(unsigned int) entry->seg_size, (unsigned int) entry->seg_size,
(unsigned int) entry->rsvd); (unsigned int) entry->rsvd);
addr += sizeof(*entry); addr += sizeof(*entry);
...@@ -396,90 +384,147 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) ...@@ -396,90 +384,147 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci) void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
{ {
u32 val; u64 val;
val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]); val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = 0x%x\n", val); xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n",
val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[1]); lower_32_bits(val));
xhci_dbg(xhci, "// xHC command ring deq ptr high bits = 0x%x\n", val); xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n",
upper_32_bits(val));
} }
void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep) /* Print the last 32 bytes for 64-byte contexts */
static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma)
{
int i;
for (i = 0; i < 4; ++i) {
xhci_dbg(xhci, "@%p (virt) @%08llx "
"(dma) %#08llx - rsvd64[%d]\n",
&ctx[4 + i], (unsigned long long)dma,
ctx[4 + i], i);
dma += 8;
}
}
void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
{ {
int i, j;
int last_ep_ctx = 31;
/* Fields are 32 bits wide, DMA addresses are in bytes */ /* Fields are 32 bits wide, DMA addresses are in bytes */
int field_size = 32 / 8; int field_size = 32 / 8;
int i;
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n", struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
&ctx->drop_flags, (unsigned long long)dma, dma_addr_t dma = ctx->dma + ((unsigned long)slot_ctx - (unsigned long)ctx);
ctx->drop_flags); int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
dma += field_size;
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
&ctx->add_flags, (unsigned long long)dma,
ctx->add_flags);
dma += field_size;
for (i = 0; i > 6; ++i) {
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
&ctx->rsvd[i], (unsigned long long)dma,
ctx->rsvd[i], i);
dma += field_size;
}
xhci_dbg(xhci, "Slot Context:\n"); xhci_dbg(xhci, "Slot Context:\n");
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n", xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n",
&ctx->slot.dev_info, &slot_ctx->dev_info,
(unsigned long long)dma, ctx->slot.dev_info); (unsigned long long)dma, slot_ctx->dev_info);
dma += field_size; dma += field_size;
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n", xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n",
&ctx->slot.dev_info2, &slot_ctx->dev_info2,
(unsigned long long)dma, ctx->slot.dev_info2); (unsigned long long)dma, slot_ctx->dev_info2);
dma += field_size; dma += field_size;
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n", xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n",
&ctx->slot.tt_info, &slot_ctx->tt_info,
(unsigned long long)dma, ctx->slot.tt_info); (unsigned long long)dma, slot_ctx->tt_info);
dma += field_size; dma += field_size;
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n", xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n",
&ctx->slot.dev_state, &slot_ctx->dev_state,
(unsigned long long)dma, ctx->slot.dev_state); (unsigned long long)dma, slot_ctx->dev_state);
dma += field_size; dma += field_size;
for (i = 0; i > 4; ++i) { for (i = 0; i < 4; ++i) {
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
&ctx->slot.reserved[i], (unsigned long long)dma, &slot_ctx->reserved[i], (unsigned long long)dma,
ctx->slot.reserved[i], i); slot_ctx->reserved[i], i);
dma += field_size; dma += field_size;
} }
if (csz)
dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
}
void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx,
unsigned int last_ep)
{
int i, j;
int last_ep_ctx = 31;
/* Fields are 32 bits wide, DMA addresses are in bytes */
int field_size = 32 / 8;
int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
if (last_ep < 31) if (last_ep < 31)
last_ep_ctx = last_ep + 1; last_ep_ctx = last_ep + 1;
for (i = 0; i < last_ep_ctx; ++i) { for (i = 0; i < last_ep_ctx; ++i) {
struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i);
dma_addr_t dma = ctx->dma +
((unsigned long)ep_ctx - (unsigned long)ctx);
xhci_dbg(xhci, "Endpoint %02d Context:\n", i); xhci_dbg(xhci, "Endpoint %02d Context:\n", i);
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n", xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
&ctx->ep[i].ep_info, &ep_ctx->ep_info,
(unsigned long long)dma, ctx->ep[i].ep_info); (unsigned long long)dma, ep_ctx->ep_info);
dma += field_size; dma += field_size;
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n", xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n",
&ctx->ep[i].ep_info2, &ep_ctx->ep_info2,
(unsigned long long)dma, ctx->ep[i].ep_info2); (unsigned long long)dma, ep_ctx->ep_info2);
dma += field_size;
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[0]\n",
&ctx->ep[i].deq[0],
(unsigned long long)dma, ctx->ep[i].deq[0]);
dma += field_size;
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - deq[1]\n",
&ctx->ep[i].deq[1],
(unsigned long long)dma, ctx->ep[i].deq[1]);
dma += field_size; dma += field_size;
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08llx - deq\n",
&ep_ctx->deq,
(unsigned long long)dma, ep_ctx->deq);
dma += 2*field_size;
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n", xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
&ctx->ep[i].tx_info, &ep_ctx->tx_info,
(unsigned long long)dma, ctx->ep[i].tx_info); (unsigned long long)dma, ep_ctx->tx_info);
dma += field_size; dma += field_size;
for (j = 0; j < 3; ++j) { for (j = 0; j < 3; ++j) {
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
&ctx->ep[i].reserved[j], &ep_ctx->reserved[j],
(unsigned long long)dma, (unsigned long long)dma,
ctx->ep[i].reserved[j], j); ep_ctx->reserved[j], j);
dma += field_size;
}
if (csz)
dbg_rsvd64(xhci, (u64 *)ep_ctx, dma);
}
}
void xhci_dbg_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx,
unsigned int last_ep)
{
int i;
/* Fields are 32 bits wide, DMA addresses are in bytes */
int field_size = 32 / 8;
struct xhci_slot_ctx *slot_ctx;
dma_addr_t dma = ctx->dma;
int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
if (ctx->type == XHCI_CTX_TYPE_INPUT) {
struct xhci_input_control_ctx *ctrl_ctx =
xhci_get_input_control_ctx(xhci, ctx);
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
&ctrl_ctx->drop_flags, (unsigned long long)dma,
ctrl_ctx->drop_flags);
dma += field_size;
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
&ctrl_ctx->add_flags, (unsigned long long)dma,
ctrl_ctx->add_flags);
dma += field_size;
for (i = 0; i < 6; ++i) {
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd2[%d]\n",
&ctrl_ctx->rsvd2[i], (unsigned long long)dma,
ctrl_ctx->rsvd2[i], i);
dma += field_size; dma += field_size;
} }
if (csz)
dbg_rsvd64(xhci, (u64 *)ctrl_ctx, dma);
} }
slot_ctx = xhci_get_slot_ctx(xhci, ctx);
xhci_dbg_slot_ctx(xhci, ctx);
xhci_dbg_ep_ctx(xhci, ctx, last_ep);
} }
...@@ -103,7 +103,10 @@ int xhci_reset(struct xhci_hcd *xhci) ...@@ -103,7 +103,10 @@ int xhci_reset(struct xhci_hcd *xhci)
u32 state; u32 state;
state = xhci_readl(xhci, &xhci->op_regs->status); state = xhci_readl(xhci, &xhci->op_regs->status);
BUG_ON((state & STS_HALT) == 0); if ((state & STS_HALT) == 0) {
xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
return 0;
}
xhci_dbg(xhci, "// Reset the HC\n"); xhci_dbg(xhci, "// Reset the HC\n");
command = xhci_readl(xhci, &xhci->op_regs->command); command = xhci_readl(xhci, &xhci->op_regs->command);
...@@ -226,6 +229,7 @@ int xhci_init(struct usb_hcd *hcd) ...@@ -226,6 +229,7 @@ int xhci_init(struct usb_hcd *hcd)
static void xhci_work(struct xhci_hcd *xhci) static void xhci_work(struct xhci_hcd *xhci)
{ {
u32 temp; u32 temp;
u64 temp_64;
/* /*
* Clear the op reg interrupt status first, * Clear the op reg interrupt status first,
...@@ -248,9 +252,9 @@ static void xhci_work(struct xhci_hcd *xhci) ...@@ -248,9 +252,9 @@ static void xhci_work(struct xhci_hcd *xhci)
/* FIXME this should be a delayed service routine that clears the EHB */ /* FIXME this should be a delayed service routine that clears the EHB */
xhci_handle_event(xhci); xhci_handle_event(xhci);
/* Clear the event handler busy flag; the event ring should be empty. */ /* Clear the event handler busy flag (RW1C); the event ring should be empty. */
temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
xhci_writel(xhci, temp & ~ERST_EHB, &xhci->ir_set->erst_dequeue[0]); xhci_write_64(xhci, temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue);
/* Flush posted writes -- FIXME is this necessary? */ /* Flush posted writes -- FIXME is this necessary? */
xhci_readl(xhci, &xhci->ir_set->irq_pending); xhci_readl(xhci, &xhci->ir_set->irq_pending);
} }
...@@ -266,19 +270,34 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) ...@@ -266,19 +270,34 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
{ {
struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct xhci_hcd *xhci = hcd_to_xhci(hcd);
u32 temp, temp2; u32 temp, temp2;
union xhci_trb *trb;
spin_lock(&xhci->lock); spin_lock(&xhci->lock);
trb = xhci->event_ring->dequeue;
/* Check if the xHC generated the interrupt, or the irq is shared */ /* Check if the xHC generated the interrupt, or the irq is shared */
temp = xhci_readl(xhci, &xhci->op_regs->status); temp = xhci_readl(xhci, &xhci->op_regs->status);
temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending); temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending);
if (temp == 0xffffffff && temp2 == 0xffffffff)
goto hw_died;
if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) { if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) {
spin_unlock(&xhci->lock); spin_unlock(&xhci->lock);
return IRQ_NONE; return IRQ_NONE;
} }
xhci_dbg(xhci, "op reg status = %08x\n", temp);
xhci_dbg(xhci, "ir set irq_pending = %08x\n", temp2);
xhci_dbg(xhci, "Event ring dequeue ptr:\n");
xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
(unsigned long long)xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
lower_32_bits(trb->link.segment_ptr),
upper_32_bits(trb->link.segment_ptr),
(unsigned int) trb->link.intr_target,
(unsigned int) trb->link.control);
if (temp & STS_FATAL) { if (temp & STS_FATAL) {
xhci_warn(xhci, "WARNING: Host System Error\n"); xhci_warn(xhci, "WARNING: Host System Error\n");
xhci_halt(xhci); xhci_halt(xhci);
hw_died:
xhci_to_hcd(xhci)->state = HC_STATE_HALT; xhci_to_hcd(xhci)->state = HC_STATE_HALT;
spin_unlock(&xhci->lock); spin_unlock(&xhci->lock);
return -ESHUTDOWN; return -ESHUTDOWN;
...@@ -295,6 +314,7 @@ void xhci_event_ring_work(unsigned long arg) ...@@ -295,6 +314,7 @@ void xhci_event_ring_work(unsigned long arg)
{ {
unsigned long flags; unsigned long flags;
int temp; int temp;
u64 temp_64;
struct xhci_hcd *xhci = (struct xhci_hcd *) arg; struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
int i, j; int i, j;
...@@ -311,9 +331,9 @@ void xhci_event_ring_work(unsigned long arg) ...@@ -311,9 +331,9 @@ void xhci_event_ring_work(unsigned long arg)
xhci_dbg(xhci, "Event ring:\n"); xhci_dbg(xhci, "Event ring:\n");
xhci_debug_segment(xhci, xhci->event_ring->deq_seg); xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
xhci_dbg_ring_ptrs(xhci, xhci->event_ring); xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
temp &= ERST_PTR_MASK; temp_64 &= ~ERST_PTR_MASK;
xhci_dbg(xhci, "ERST deq = 0x%x\n", temp); xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
xhci_dbg(xhci, "Command ring:\n"); xhci_dbg(xhci, "Command ring:\n");
xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg); xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
...@@ -356,6 +376,7 @@ void xhci_event_ring_work(unsigned long arg) ...@@ -356,6 +376,7 @@ void xhci_event_ring_work(unsigned long arg)
int xhci_run(struct usb_hcd *hcd) int xhci_run(struct usb_hcd *hcd)
{ {
u32 temp; u32 temp;
u64 temp_64;
struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct xhci_hcd *xhci = hcd_to_xhci(hcd);
void (*doorbell)(struct xhci_hcd *) = NULL; void (*doorbell)(struct xhci_hcd *) = NULL;
...@@ -382,6 +403,20 @@ int xhci_run(struct usb_hcd *hcd) ...@@ -382,6 +403,20 @@ int xhci_run(struct usb_hcd *hcd)
add_timer(&xhci->event_ring_timer); add_timer(&xhci->event_ring_timer);
#endif #endif
xhci_dbg(xhci, "Command ring memory map follows:\n");
xhci_debug_ring(xhci, xhci->cmd_ring);
xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
xhci_dbg_cmd_ptrs(xhci);
xhci_dbg(xhci, "ERST memory map follows:\n");
xhci_dbg_erst(xhci, &xhci->erst);
xhci_dbg(xhci, "Event ring:\n");
xhci_debug_ring(xhci, xhci->event_ring);
xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
temp_64 &= ~ERST_PTR_MASK;
xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
xhci_dbg(xhci, "// Set the interrupt modulation register\n"); xhci_dbg(xhci, "// Set the interrupt modulation register\n");
temp = xhci_readl(xhci, &xhci->ir_set->irq_control); temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
temp &= ~ER_IRQ_INTERVAL_MASK; temp &= ~ER_IRQ_INTERVAL_MASK;
...@@ -406,22 +441,6 @@ int xhci_run(struct usb_hcd *hcd) ...@@ -406,22 +441,6 @@ int xhci_run(struct usb_hcd *hcd)
if (NUM_TEST_NOOPS > 0) if (NUM_TEST_NOOPS > 0)
doorbell = xhci_setup_one_noop(xhci); doorbell = xhci_setup_one_noop(xhci);
xhci_dbg(xhci, "Command ring memory map follows:\n");
xhci_debug_ring(xhci, xhci->cmd_ring);
xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
xhci_dbg_cmd_ptrs(xhci);
xhci_dbg(xhci, "ERST memory map follows:\n");
xhci_dbg_erst(xhci, &xhci->erst);
xhci_dbg(xhci, "Event ring:\n");
xhci_debug_ring(xhci, xhci->event_ring);
xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
temp &= ERST_PTR_MASK;
xhci_dbg(xhci, "ERST deq = 0x%x\n", temp);
temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[1]);
xhci_dbg(xhci, "ERST deq upper = 0x%x\n", temp);
temp = xhci_readl(xhci, &xhci->op_regs->command); temp = xhci_readl(xhci, &xhci->op_regs->command);
temp |= (CMD_RUN); temp |= (CMD_RUN);
xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
...@@ -601,10 +620,13 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) ...@@ -601,10 +620,13 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
goto exit; goto exit;
} }
if (usb_endpoint_xfer_control(&urb->ep->desc)) if (usb_endpoint_xfer_control(&urb->ep->desc))
ret = xhci_queue_ctrl_tx(xhci, mem_flags, urb, /* We have a spinlock and interrupts disabled, so we must pass
* atomic context to this function, which may allocate memory.
*/
ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
slot_id, ep_index); slot_id, ep_index);
else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) else if (usb_endpoint_xfer_bulk(&urb->ep->desc))
ret = xhci_queue_bulk_tx(xhci, mem_flags, urb, ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
slot_id, ep_index); slot_id, ep_index);
else else
ret = -EINVAL; ret = -EINVAL;
...@@ -661,8 +683,12 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) ...@@ -661,8 +683,12 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
goto done; goto done;
xhci_dbg(xhci, "Cancel URB %p\n", urb); xhci_dbg(xhci, "Cancel URB %p\n", urb);
xhci_dbg(xhci, "Event ring:\n");
xhci_debug_ring(xhci, xhci->event_ring);
ep_index = xhci_get_endpoint_index(&urb->ep->desc); ep_index = xhci_get_endpoint_index(&urb->ep->desc);
ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index]; ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index];
xhci_dbg(xhci, "Endpoint ring:\n");
xhci_debug_ring(xhci, ep_ring);
td = (struct xhci_td *) urb->hcpriv; td = (struct xhci_td *) urb->hcpriv;
ep_ring->cancels_pending++; ep_ring->cancels_pending++;
...@@ -696,7 +722,9 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, ...@@ -696,7 +722,9 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep) struct usb_host_endpoint *ep)
{ {
struct xhci_hcd *xhci; struct xhci_hcd *xhci;
struct xhci_device_control *in_ctx; struct xhci_container_ctx *in_ctx, *out_ctx;
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_slot_ctx *slot_ctx;
unsigned int last_ctx; unsigned int last_ctx;
unsigned int ep_index; unsigned int ep_index;
struct xhci_ep_ctx *ep_ctx; struct xhci_ep_ctx *ep_ctx;
...@@ -724,31 +752,34 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, ...@@ -724,31 +752,34 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
} }
in_ctx = xhci->devs[udev->slot_id]->in_ctx; in_ctx = xhci->devs[udev->slot_id]->in_ctx;
out_ctx = xhci->devs[udev->slot_id]->out_ctx;
ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
ep_index = xhci_get_endpoint_index(&ep->desc); ep_index = xhci_get_endpoint_index(&ep->desc);
ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index]; ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
/* If the HC already knows the endpoint is disabled, /* If the HC already knows the endpoint is disabled,
* or the HCD has noted it is disabled, ignore this request * or the HCD has noted it is disabled, ignore this request
*/ */
if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED || if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED ||
in_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) { ctrl_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
__func__, ep); __func__, ep);
return 0; return 0;
} }
in_ctx->drop_flags |= drop_flag; ctrl_ctx->drop_flags |= drop_flag;
new_drop_flags = in_ctx->drop_flags; new_drop_flags = ctrl_ctx->drop_flags;
in_ctx->add_flags = ~drop_flag; ctrl_ctx->add_flags = ~drop_flag;
new_add_flags = in_ctx->add_flags; new_add_flags = ctrl_ctx->add_flags;
last_ctx = xhci_last_valid_endpoint(in_ctx->add_flags); last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags);
slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
/* Update the last valid endpoint context, if we deleted the last one */ /* Update the last valid endpoint context, if we deleted the last one */
if ((in_ctx->slot.dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) { if ((slot_ctx->dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) {
in_ctx->slot.dev_info &= ~LAST_CTX_MASK; slot_ctx->dev_info &= ~LAST_CTX_MASK;
in_ctx->slot.dev_info |= LAST_CTX(last_ctx); slot_ctx->dev_info |= LAST_CTX(last_ctx);
} }
new_slot_info = in_ctx->slot.dev_info; new_slot_info = slot_ctx->dev_info;
xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
...@@ -778,17 +809,22 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, ...@@ -778,17 +809,22 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep) struct usb_host_endpoint *ep)
{ {
struct xhci_hcd *xhci; struct xhci_hcd *xhci;
struct xhci_device_control *in_ctx; struct xhci_container_ctx *in_ctx, *out_ctx;
unsigned int ep_index; unsigned int ep_index;
struct xhci_ep_ctx *ep_ctx; struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
struct xhci_input_control_ctx *ctrl_ctx;
u32 added_ctxs; u32 added_ctxs;
unsigned int last_ctx; unsigned int last_ctx;
u32 new_add_flags, new_drop_flags, new_slot_info; u32 new_add_flags, new_drop_flags, new_slot_info;
int ret = 0; int ret = 0;
ret = xhci_check_args(hcd, udev, ep, 1, __func__); ret = xhci_check_args(hcd, udev, ep, 1, __func__);
if (ret <= 0) if (ret <= 0) {
/* So we won't queue a reset ep command for a root hub */
ep->hcpriv = NULL;
return ret; return ret;
}
xhci = hcd_to_xhci(hcd); xhci = hcd_to_xhci(hcd);
added_ctxs = xhci_get_endpoint_flag(&ep->desc); added_ctxs = xhci_get_endpoint_flag(&ep->desc);
...@@ -810,12 +846,14 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, ...@@ -810,12 +846,14 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
} }
in_ctx = xhci->devs[udev->slot_id]->in_ctx; in_ctx = xhci->devs[udev->slot_id]->in_ctx;
out_ctx = xhci->devs[udev->slot_id]->out_ctx;
ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
ep_index = xhci_get_endpoint_index(&ep->desc); ep_index = xhci_get_endpoint_index(&ep->desc);
ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index]; ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
/* If the HCD has already noted the endpoint is enabled, /* If the HCD has already noted the endpoint is enabled,
* ignore this request. * ignore this request.
*/ */
if (in_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) { if (ctrl_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
__func__, ep); __func__, ep);
return 0; return 0;
...@@ -833,8 +871,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, ...@@ -833,8 +871,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
return -ENOMEM; return -ENOMEM;
} }
in_ctx->add_flags |= added_ctxs; ctrl_ctx->add_flags |= added_ctxs;
new_add_flags = in_ctx->add_flags; new_add_flags = ctrl_ctx->add_flags;
/* If xhci_endpoint_disable() was called for this endpoint, but the /* If xhci_endpoint_disable() was called for this endpoint, but the
* xHC hasn't been notified yet through the check_bandwidth() call, * xHC hasn't been notified yet through the check_bandwidth() call,
...@@ -842,14 +880,18 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, ...@@ -842,14 +880,18 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
* descriptors. We must drop and re-add this endpoint, so we leave the * descriptors. We must drop and re-add this endpoint, so we leave the
* drop flags alone. * drop flags alone.
*/ */
new_drop_flags = in_ctx->drop_flags; new_drop_flags = ctrl_ctx->drop_flags;
slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
/* Update the last valid endpoint context, if we just added one past */ /* Update the last valid endpoint context, if we just added one past */
if ((in_ctx->slot.dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) { if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) {
in_ctx->slot.dev_info &= ~LAST_CTX_MASK; slot_ctx->dev_info &= ~LAST_CTX_MASK;
in_ctx->slot.dev_info |= LAST_CTX(last_ctx); slot_ctx->dev_info |= LAST_CTX(last_ctx);
} }
new_slot_info = in_ctx->slot.dev_info; new_slot_info = slot_ctx->dev_info;
/* Store the usb_device pointer for later use */
ep->hcpriv = udev;
xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
(unsigned int) ep->desc.bEndpointAddress, (unsigned int) ep->desc.bEndpointAddress,
...@@ -860,9 +902,11 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, ...@@ -860,9 +902,11 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
return 0; return 0;
} }
static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev) static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
{ {
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_ep_ctx *ep_ctx; struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
int i; int i;
/* When a device's add flag and drop flag are zero, any subsequent /* When a device's add flag and drop flag are zero, any subsequent
...@@ -870,17 +914,18 @@ static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev) ...@@ -870,17 +914,18 @@ static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev)
* untouched. Make sure we don't leave any old state in the input * untouched. Make sure we don't leave any old state in the input
* endpoint contexts. * endpoint contexts.
*/ */
virt_dev->in_ctx->drop_flags = 0; ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
virt_dev->in_ctx->add_flags = 0; ctrl_ctx->drop_flags = 0;
virt_dev->in_ctx->slot.dev_info &= ~LAST_CTX_MASK; ctrl_ctx->add_flags = 0;
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
slot_ctx->dev_info &= ~LAST_CTX_MASK;
/* Endpoint 0 is always valid */ /* Endpoint 0 is always valid */
virt_dev->in_ctx->slot.dev_info |= LAST_CTX(1); slot_ctx->dev_info |= LAST_CTX(1);
for (i = 1; i < 31; ++i) { for (i = 1; i < 31; ++i) {
ep_ctx = &virt_dev->in_ctx->ep[i]; ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
ep_ctx->ep_info = 0; ep_ctx->ep_info = 0;
ep_ctx->ep_info2 = 0; ep_ctx->ep_info2 = 0;
ep_ctx->deq[0] = 0; ep_ctx->deq = 0;
ep_ctx->deq[1] = 0;
ep_ctx->tx_info = 0; ep_ctx->tx_info = 0;
} }
} }
...@@ -903,6 +948,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ...@@ -903,6 +948,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
unsigned long flags; unsigned long flags;
struct xhci_hcd *xhci; struct xhci_hcd *xhci;
struct xhci_virt_device *virt_dev; struct xhci_virt_device *virt_dev;
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_slot_ctx *slot_ctx;
ret = xhci_check_args(hcd, udev, NULL, 0, __func__); ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
if (ret <= 0) if (ret <= 0)
...@@ -918,16 +965,18 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ...@@ -918,16 +965,18 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
virt_dev = xhci->devs[udev->slot_id]; virt_dev = xhci->devs[udev->slot_id];
/* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
virt_dev->in_ctx->add_flags |= SLOT_FLAG; ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
virt_dev->in_ctx->add_flags &= ~EP0_FLAG; ctrl_ctx->add_flags |= SLOT_FLAG;
virt_dev->in_ctx->drop_flags &= ~SLOT_FLAG; ctrl_ctx->add_flags &= ~EP0_FLAG;
virt_dev->in_ctx->drop_flags &= ~EP0_FLAG; ctrl_ctx->drop_flags &= ~SLOT_FLAG;
ctrl_ctx->drop_flags &= ~EP0_FLAG;
xhci_dbg(xhci, "New Input Control Context:\n"); xhci_dbg(xhci, "New Input Control Context:\n");
xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info)); xhci_dbg_ctx(xhci, virt_dev->in_ctx,
LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
spin_lock_irqsave(&xhci->lock, flags); spin_lock_irqsave(&xhci->lock, flags);
ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx_dma, ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx->dma,
udev->slot_id); udev->slot_id);
if (ret < 0) { if (ret < 0) {
spin_unlock_irqrestore(&xhci->lock, flags); spin_unlock_irqrestore(&xhci->lock, flags);
...@@ -982,10 +1031,10 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ...@@ -982,10 +1031,10 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
} }
xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, xhci_dbg_ctx(xhci, virt_dev->out_ctx,
LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info)); LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
xhci_zero_in_ctx(virt_dev); xhci_zero_in_ctx(xhci, virt_dev);
/* Free any old rings */ /* Free any old rings */
for (i = 1; i < 31; ++i) { for (i = 1; i < 31; ++i) {
if (virt_dev->new_ep_rings[i]) { if (virt_dev->new_ep_rings[i]) {
...@@ -1023,7 +1072,67 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ...@@ -1023,7 +1072,67 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
virt_dev->new_ep_rings[i] = NULL; virt_dev->new_ep_rings[i] = NULL;
} }
} }
xhci_zero_in_ctx(virt_dev); xhci_zero_in_ctx(xhci, virt_dev);
}
/* Deal with stalled endpoints. The core should have sent the control message
* to clear the halt condition. However, we need to make the xHCI hardware
* reset its sequence number, since a device will expect a sequence number of
* zero after the halt condition is cleared.
* Context: in_interrupt
*/
void xhci_endpoint_reset(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
struct xhci_hcd *xhci;
struct usb_device *udev;
unsigned int ep_index;
unsigned long flags;
int ret;
struct xhci_dequeue_state deq_state;
struct xhci_ring *ep_ring;
xhci = hcd_to_xhci(hcd);
udev = (struct usb_device *) ep->hcpriv;
/* Called with a root hub endpoint (or an endpoint that wasn't added
* with xhci_add_endpoint()
*/
if (!ep->hcpriv)
return;
ep_index = xhci_get_endpoint_index(&ep->desc);
ep_ring = xhci->devs[udev->slot_id]->ep_rings[ep_index];
if (!ep_ring->stopped_td) {
xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
ep->desc.bEndpointAddress);
return;
}
xhci_dbg(xhci, "Queueing reset endpoint command\n");
spin_lock_irqsave(&xhci->lock, flags);
ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
/*
* Can't change the ring dequeue pointer until it's transitioned to the
* stopped state, which is only upon a successful reset endpoint
* command. Better hope that last command worked!
*/
if (!ret) {
xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
/* We need to move the HW's dequeue pointer past this TD,
* or it will attempt to resend it on the next doorbell ring.
*/
xhci_find_new_dequeue_state(xhci, udev->slot_id,
ep_index, ep_ring->stopped_td, &deq_state);
xhci_dbg(xhci, "Queueing new dequeue state\n");
xhci_queue_new_dequeue_state(xhci, ep_ring,
udev->slot_id,
ep_index, &deq_state);
kfree(ep_ring->stopped_td);
xhci_ring_cmd_db(xhci);
}
spin_unlock_irqrestore(&xhci->lock, flags);
if (ret)
xhci_warn(xhci, "FIXME allocate a new ring segment\n");
} }
/* /*
...@@ -1120,7 +1229,9 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) ...@@ -1120,7 +1229,9 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
struct xhci_virt_device *virt_dev; struct xhci_virt_device *virt_dev;
int ret = 0; int ret = 0;
struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct xhci_hcd *xhci = hcd_to_xhci(hcd);
u32 temp; struct xhci_slot_ctx *slot_ctx;
struct xhci_input_control_ctx *ctrl_ctx;
u64 temp_64;
if (!udev->slot_id) { if (!udev->slot_id) {
xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
...@@ -1133,9 +1244,11 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) ...@@ -1133,9 +1244,11 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
if (!udev->config) if (!udev->config)
xhci_setup_addressable_virt_dev(xhci, udev); xhci_setup_addressable_virt_dev(xhci, udev);
/* Otherwise, assume the core has the device configured how it wants */ /* Otherwise, assume the core has the device configured how it wants */
xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
spin_lock_irqsave(&xhci->lock, flags); spin_lock_irqsave(&xhci->lock, flags);
ret = xhci_queue_address_device(xhci, virt_dev->in_ctx_dma, ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
udev->slot_id); udev->slot_id);
if (ret) { if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags); spin_unlock_irqrestore(&xhci->lock, flags);
...@@ -1176,41 +1289,37 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) ...@@ -1176,41 +1289,37 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
default: default:
xhci_err(xhci, "ERROR: unexpected command completion " xhci_err(xhci, "ERROR: unexpected command completion "
"code 0x%x.\n", virt_dev->cmd_status); "code 0x%x.\n", virt_dev->cmd_status);
xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
ret = -EINVAL; ret = -EINVAL;
break; break;
} }
if (ret) { if (ret) {
return ret; return ret;
} }
temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[0]); temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
xhci_dbg(xhci, "Op regs DCBAA ptr[0] = %#08x\n", temp); xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[1]); xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
xhci_dbg(xhci, "Op regs DCBAA ptr[1] = %#08x\n", temp);
xhci_dbg(xhci, "Slot ID %d dcbaa entry[0] @%p = %#08x\n",
udev->slot_id,
&xhci->dcbaa->dev_context_ptrs[2*udev->slot_id],
xhci->dcbaa->dev_context_ptrs[2*udev->slot_id]);
xhci_dbg(xhci, "Slot ID %d dcbaa entry[1] @%p = %#08x\n",
udev->slot_id, udev->slot_id,
&xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1], &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1]); (unsigned long long)
xhci->dcbaa->dev_context_ptrs[udev->slot_id]);
xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
(unsigned long long)virt_dev->out_ctx_dma); (unsigned long long)virt_dev->out_ctx->dma);
xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2); xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 2); xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
/* /*
* USB core uses address 1 for the roothubs, so we add one to the * USB core uses address 1 for the roothubs, so we add one to the
* address given back to us by the HC. * address given back to us by the HC.
*/ */
udev->devnum = (virt_dev->out_ctx->slot.dev_state & DEV_ADDR_MASK) + 1; slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
udev->devnum = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1;
/* Zero the input context control for later use */ /* Zero the input context control for later use */
virt_dev->in_ctx->add_flags = 0; ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
virt_dev->in_ctx->drop_flags = 0; ctrl_ctx->add_flags = 0;
/* Mirror flags in the output context for future ep enable/disable */ ctrl_ctx->drop_flags = 0;
virt_dev->out_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
virt_dev->out_ctx->drop_flags = 0;
xhci_dbg(xhci, "Device address = %d\n", udev->devnum); xhci_dbg(xhci, "Device address = %d\n", udev->devnum);
/* XXX Meh, not sure if anyone else but choose_address uses this. */ /* XXX Meh, not sure if anyone else but choose_address uses this. */
...@@ -1252,7 +1361,6 @@ static int __init xhci_hcd_init(void) ...@@ -1252,7 +1361,6 @@ static int __init xhci_hcd_init(void)
/* xhci_device_control has eight fields, and also /* xhci_device_control has eight fields, and also
* embeds one xhci_slot_ctx and 31 xhci_ep_ctx * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
*/ */
BUILD_BUG_ON(sizeof(struct xhci_device_control) != (8+8+8*31)*32/8);
BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
......
...@@ -88,7 +88,7 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, ...@@ -88,7 +88,7 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
return; return;
prev->next = next; prev->next = next;
if (link_trbs) { if (link_trbs) {
prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr[0] = next->dma; prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma;
/* Set the last TRB in the segment to have a TRB type ID of Link TRB */ /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; val = prev->trbs[TRBS_PER_SEGMENT-1].link.control;
...@@ -189,6 +189,63 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, ...@@ -189,6 +189,63 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
return 0; return 0;
} }
#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
int type, gfp_t flags)
{
struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags);
if (!ctx)
return NULL;
BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
ctx->type = type;
ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
if (type == XHCI_CTX_TYPE_INPUT)
ctx->size += CTX_SIZE(xhci->hcc_params);
ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
memset(ctx->bytes, 0, ctx->size);
return ctx;
}
void xhci_free_container_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx)
{
dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
kfree(ctx);
}
struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx)
{
BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
return (struct xhci_input_control_ctx *)ctx->bytes;
}
struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx)
{
if (ctx->type == XHCI_CTX_TYPE_DEVICE)
return (struct xhci_slot_ctx *)ctx->bytes;
return (struct xhci_slot_ctx *)
(ctx->bytes + CTX_SIZE(xhci->hcc_params));
}
struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx,
unsigned int ep_index)
{
/* increment ep index by offset of start of ep ctx array */
ep_index++;
if (ctx->type == XHCI_CTX_TYPE_INPUT)
ep_index++;
return (struct xhci_ep_ctx *)
(ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
}
/* All the xhci_tds in the ring's TD list should be freed at this point */ /* All the xhci_tds in the ring's TD list should be freed at this point */
void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
{ {
...@@ -200,8 +257,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) ...@@ -200,8 +257,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
return; return;
dev = xhci->devs[slot_id]; dev = xhci->devs[slot_id];
xhci->dcbaa->dev_context_ptrs[2*slot_id] = 0; xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
if (!dev) if (!dev)
return; return;
...@@ -210,11 +266,10 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) ...@@ -210,11 +266,10 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
xhci_ring_free(xhci, dev->ep_rings[i]); xhci_ring_free(xhci, dev->ep_rings[i]);
if (dev->in_ctx) if (dev->in_ctx)
dma_pool_free(xhci->device_pool, xhci_free_container_ctx(xhci, dev->in_ctx);
dev->in_ctx, dev->in_ctx_dma);
if (dev->out_ctx) if (dev->out_ctx)
dma_pool_free(xhci->device_pool, xhci_free_container_ctx(xhci, dev->out_ctx);
dev->out_ctx, dev->out_ctx_dma);
kfree(xhci->devs[slot_id]); kfree(xhci->devs[slot_id]);
xhci->devs[slot_id] = 0; xhci->devs[slot_id] = 0;
} }
...@@ -222,7 +277,6 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) ...@@ -222,7 +277,6 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
struct usb_device *udev, gfp_t flags) struct usb_device *udev, gfp_t flags)
{ {
dma_addr_t dma;
struct xhci_virt_device *dev; struct xhci_virt_device *dev;
/* Slot ID 0 is reserved */ /* Slot ID 0 is reserved */
...@@ -236,23 +290,21 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, ...@@ -236,23 +290,21 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
return 0; return 0;
dev = xhci->devs[slot_id]; dev = xhci->devs[slot_id];
/* Allocate the (output) device context that will be used in the HC */ /* Allocate the (output) device context that will be used in the HC. */
dev->out_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma); dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
if (!dev->out_ctx) if (!dev->out_ctx)
goto fail; goto fail;
dev->out_ctx_dma = dma;
xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id, xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
(unsigned long long)dma); (unsigned long long)dev->out_ctx->dma);
memset(dev->out_ctx, 0, sizeof(*dev->out_ctx));
/* Allocate the (input) device context for address device command */ /* Allocate the (input) device context for address device command */
dev->in_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma); dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
if (!dev->in_ctx) if (!dev->in_ctx)
goto fail; goto fail;
dev->in_ctx_dma = dma;
xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
(unsigned long long)dma); (unsigned long long)dev->in_ctx->dma);
memset(dev->in_ctx, 0, sizeof(*dev->in_ctx));
/* Allocate endpoint 0 ring */ /* Allocate endpoint 0 ring */
dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags); dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags);
...@@ -261,17 +313,12 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, ...@@ -261,17 +313,12 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
init_completion(&dev->cmd_completion); init_completion(&dev->cmd_completion);
/* /* Point to output device context in dcbaa. */
* Point to output device context in dcbaa; skip the output control xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma;
* context, which is eight 32 bit fields (or 32 bytes long)
*/
xhci->dcbaa->dev_context_ptrs[2*slot_id] =
(u32) dev->out_ctx_dma + (32);
xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
slot_id, slot_id,
&xhci->dcbaa->dev_context_ptrs[2*slot_id], &xhci->dcbaa->dev_context_ptrs[slot_id],
(unsigned long long)dev->out_ctx_dma); (unsigned long long) xhci->dcbaa->dev_context_ptrs[slot_id]);
xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
return 1; return 1;
fail: fail:
...@@ -285,6 +332,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud ...@@ -285,6 +332,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
struct xhci_virt_device *dev; struct xhci_virt_device *dev;
struct xhci_ep_ctx *ep0_ctx; struct xhci_ep_ctx *ep0_ctx;
struct usb_device *top_dev; struct usb_device *top_dev;
struct xhci_slot_ctx *slot_ctx;
struct xhci_input_control_ctx *ctrl_ctx;
dev = xhci->devs[udev->slot_id]; dev = xhci->devs[udev->slot_id];
/* Slot ID 0 is reserved */ /* Slot ID 0 is reserved */
...@@ -293,27 +342,29 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud ...@@ -293,27 +342,29 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
udev->slot_id); udev->slot_id);
return -EINVAL; return -EINVAL;
} }
ep0_ctx = &dev->in_ctx->ep[0]; ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx);
slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
/* 2) New slot context and endpoint 0 context are valid*/ /* 2) New slot context and endpoint 0 context are valid*/
dev->in_ctx->add_flags = SLOT_FLAG | EP0_FLAG; ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
/* 3) Only the control endpoint is valid - one endpoint context */ /* 3) Only the control endpoint is valid - one endpoint context */
dev->in_ctx->slot.dev_info |= LAST_CTX(1); slot_ctx->dev_info |= LAST_CTX(1);
switch (udev->speed) { switch (udev->speed) {
case USB_SPEED_SUPER: case USB_SPEED_SUPER:
dev->in_ctx->slot.dev_info |= (u32) udev->route; slot_ctx->dev_info |= (u32) udev->route;
dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_SS; slot_ctx->dev_info |= (u32) SLOT_SPEED_SS;
break; break;
case USB_SPEED_HIGH: case USB_SPEED_HIGH:
dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_HS; slot_ctx->dev_info |= (u32) SLOT_SPEED_HS;
break; break;
case USB_SPEED_FULL: case USB_SPEED_FULL:
dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_FS; slot_ctx->dev_info |= (u32) SLOT_SPEED_FS;
break; break;
case USB_SPEED_LOW: case USB_SPEED_LOW:
dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_LS; slot_ctx->dev_info |= (u32) SLOT_SPEED_LS;
break; break;
case USB_SPEED_VARIABLE: case USB_SPEED_VARIABLE:
xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
...@@ -327,7 +378,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud ...@@ -327,7 +378,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
for (top_dev = udev; top_dev->parent && top_dev->parent->parent; for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
top_dev = top_dev->parent) top_dev = top_dev->parent)
/* Found device below root hub */; /* Found device below root hub */;
dev->in_ctx->slot.dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum); slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum);
xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum); xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);
/* Is this a LS/FS device under a HS hub? */ /* Is this a LS/FS device under a HS hub? */
...@@ -337,8 +388,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud ...@@ -337,8 +388,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
*/ */
if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) && if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) &&
udev->tt) { udev->tt) {
dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id; slot_ctx->tt_info = udev->tt->hub->slot_id;
dev->in_ctx->slot.tt_info |= udev->ttport << 8; slot_ctx->tt_info |= udev->ttport << 8;
} }
xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
...@@ -360,10 +411,9 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud ...@@ -360,10 +411,9 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
ep0_ctx->ep_info2 |= MAX_BURST(0); ep0_ctx->ep_info2 |= MAX_BURST(0);
ep0_ctx->ep_info2 |= ERROR_COUNT(3); ep0_ctx->ep_info2 |= ERROR_COUNT(3);
ep0_ctx->deq[0] = ep0_ctx->deq =
dev->ep_rings[0]->first_seg->dma; dev->ep_rings[0]->first_seg->dma;
ep0_ctx->deq[0] |= dev->ep_rings[0]->cycle_state; ep0_ctx->deq |= dev->ep_rings[0]->cycle_state;
ep0_ctx->deq[1] = 0;
/* Steps 7 and 8 were done in xhci_alloc_virt_device() */ /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
...@@ -470,25 +520,26 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, ...@@ -470,25 +520,26 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
unsigned int max_burst; unsigned int max_burst;
ep_index = xhci_get_endpoint_index(&ep->desc); ep_index = xhci_get_endpoint_index(&ep->desc);
ep_ctx = &virt_dev->in_ctx->ep[ep_index]; ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
/* Set up the endpoint ring */ /* Set up the endpoint ring */
virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags); virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags);
if (!virt_dev->new_ep_rings[ep_index]) if (!virt_dev->new_ep_rings[ep_index])
return -ENOMEM; return -ENOMEM;
ep_ring = virt_dev->new_ep_rings[ep_index]; ep_ring = virt_dev->new_ep_rings[ep_index];
ep_ctx->deq[0] = ep_ring->first_seg->dma | ep_ring->cycle_state; ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
ep_ctx->deq[1] = 0;
ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
/* FIXME dig Mult and streams info out of ep companion desc */ /* FIXME dig Mult and streams info out of ep companion desc */
/* Allow 3 retries for everything but isoc */ /* Allow 3 retries for everything but isoc;
* error count = 0 means infinite retries.
*/
if (!usb_endpoint_xfer_isoc(&ep->desc)) if (!usb_endpoint_xfer_isoc(&ep->desc))
ep_ctx->ep_info2 = ERROR_COUNT(3); ep_ctx->ep_info2 = ERROR_COUNT(3);
else else
ep_ctx->ep_info2 = ERROR_COUNT(0); ep_ctx->ep_info2 = ERROR_COUNT(1);
ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep); ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep);
...@@ -498,7 +549,12 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, ...@@ -498,7 +549,12 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
max_packet = ep->desc.wMaxPacketSize; max_packet = ep->desc.wMaxPacketSize;
ep_ctx->ep_info2 |= MAX_PACKET(max_packet); ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
/* dig out max burst from ep companion desc */ /* dig out max burst from ep companion desc */
if (!ep->ss_ep_comp) {
xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n");
max_packet = 0;
} else {
max_packet = ep->ss_ep_comp->desc.bMaxBurst; max_packet = ep->ss_ep_comp->desc.bMaxBurst;
}
ep_ctx->ep_info2 |= MAX_BURST(max_packet); ep_ctx->ep_info2 |= MAX_BURST(max_packet);
break; break;
case USB_SPEED_HIGH: case USB_SPEED_HIGH:
...@@ -531,18 +587,114 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci, ...@@ -531,18 +587,114 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci,
struct xhci_ep_ctx *ep_ctx; struct xhci_ep_ctx *ep_ctx;
ep_index = xhci_get_endpoint_index(&ep->desc); ep_index = xhci_get_endpoint_index(&ep->desc);
ep_ctx = &virt_dev->in_ctx->ep[ep_index]; ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
ep_ctx->ep_info = 0; ep_ctx->ep_info = 0;
ep_ctx->ep_info2 = 0; ep_ctx->ep_info2 = 0;
ep_ctx->deq[0] = 0; ep_ctx->deq = 0;
ep_ctx->deq[1] = 0;
ep_ctx->tx_info = 0; ep_ctx->tx_info = 0;
/* Don't free the endpoint ring until the set interface or configuration /* Don't free the endpoint ring until the set interface or configuration
* request succeeds. * request succeeds.
*/ */
} }
/* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
{
int i;
struct device *dev = xhci_to_hcd(xhci)->self.controller;
int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp);
if (!num_sp)
return 0;
xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags);
if (!xhci->scratchpad)
goto fail_sp;
xhci->scratchpad->sp_array =
pci_alloc_consistent(to_pci_dev(dev),
num_sp * sizeof(u64),
&xhci->scratchpad->sp_dma);
if (!xhci->scratchpad->sp_array)
goto fail_sp2;
xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags);
if (!xhci->scratchpad->sp_buffers)
goto fail_sp3;
xhci->scratchpad->sp_dma_buffers =
kzalloc(sizeof(dma_addr_t) * num_sp, flags);
if (!xhci->scratchpad->sp_dma_buffers)
goto fail_sp4;
xhci->dcbaa->dev_context_ptrs[0] = xhci->scratchpad->sp_dma;
for (i = 0; i < num_sp; i++) {
dma_addr_t dma;
void *buf = pci_alloc_consistent(to_pci_dev(dev),
xhci->page_size, &dma);
if (!buf)
goto fail_sp5;
xhci->scratchpad->sp_array[i] = dma;
xhci->scratchpad->sp_buffers[i] = buf;
xhci->scratchpad->sp_dma_buffers[i] = dma;
}
return 0;
fail_sp5:
for (i = i - 1; i >= 0; i--) {
pci_free_consistent(to_pci_dev(dev), xhci->page_size,
xhci->scratchpad->sp_buffers[i],
xhci->scratchpad->sp_dma_buffers[i]);
}
kfree(xhci->scratchpad->sp_dma_buffers);
fail_sp4:
kfree(xhci->scratchpad->sp_buffers);
fail_sp3:
pci_free_consistent(to_pci_dev(dev), num_sp * sizeof(u64),
xhci->scratchpad->sp_array,
xhci->scratchpad->sp_dma);
fail_sp2:
kfree(xhci->scratchpad);
xhci->scratchpad = NULL;
fail_sp:
return -ENOMEM;
}
static void scratchpad_free(struct xhci_hcd *xhci)
{
int num_sp;
int i;
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
if (!xhci->scratchpad)
return;
num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
for (i = 0; i < num_sp; i++) {
pci_free_consistent(pdev, xhci->page_size,
xhci->scratchpad->sp_buffers[i],
xhci->scratchpad->sp_dma_buffers[i]);
}
kfree(xhci->scratchpad->sp_dma_buffers);
kfree(xhci->scratchpad->sp_buffers);
pci_free_consistent(pdev, num_sp * sizeof(u64),
xhci->scratchpad->sp_array,
xhci->scratchpad->sp_dma);
kfree(xhci->scratchpad);
xhci->scratchpad = NULL;
}
void xhci_mem_cleanup(struct xhci_hcd *xhci) void xhci_mem_cleanup(struct xhci_hcd *xhci)
{ {
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
...@@ -551,10 +703,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) ...@@ -551,10 +703,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
/* Free the Event Ring Segment Table and the actual Event Ring */ /* Free the Event Ring Segment Table and the actual Event Ring */
xhci_writel(xhci, 0, &xhci->ir_set->erst_size); xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]); xhci_write_64(xhci, 0, &xhci->ir_set->erst_base);
xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]); xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue);
xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]);
xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
if (xhci->erst.entries) if (xhci->erst.entries)
pci_free_consistent(pdev, size, pci_free_consistent(pdev, size,
...@@ -566,8 +716,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) ...@@ -566,8 +716,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
xhci->event_ring = NULL; xhci->event_ring = NULL;
xhci_dbg(xhci, "Freed event ring\n"); xhci_dbg(xhci, "Freed event ring\n");
xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]); xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]);
if (xhci->cmd_ring) if (xhci->cmd_ring)
xhci_ring_free(xhci, xhci->cmd_ring); xhci_ring_free(xhci, xhci->cmd_ring);
xhci->cmd_ring = NULL; xhci->cmd_ring = NULL;
...@@ -586,8 +735,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) ...@@ -586,8 +735,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
xhci->device_pool = NULL; xhci->device_pool = NULL;
xhci_dbg(xhci, "Freed device context pool\n"); xhci_dbg(xhci, "Freed device context pool\n");
xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]); xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]);
if (xhci->dcbaa) if (xhci->dcbaa)
pci_free_consistent(pdev, sizeof(*xhci->dcbaa), pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
xhci->dcbaa, xhci->dcbaa->dma); xhci->dcbaa, xhci->dcbaa->dma);
...@@ -595,6 +743,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) ...@@ -595,6 +743,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
xhci->page_size = 0; xhci->page_size = 0;
xhci->page_shift = 0; xhci->page_shift = 0;
scratchpad_free(xhci);
} }
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
...@@ -602,6 +751,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) ...@@ -602,6 +751,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
dma_addr_t dma; dma_addr_t dma;
struct device *dev = xhci_to_hcd(xhci)->self.controller; struct device *dev = xhci_to_hcd(xhci)->self.controller;
unsigned int val, val2; unsigned int val, val2;
u64 val_64;
struct xhci_segment *seg; struct xhci_segment *seg;
u32 page_size; u32 page_size;
int i; int i;
...@@ -647,8 +797,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) ...@@ -647,8 +797,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
xhci->dcbaa->dma = dma; xhci->dcbaa->dma = dma;
xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n", xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]); xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]);
/* /*
* Initialize the ring segment pool. The ring must be a contiguous * Initialize the ring segment pool. The ring must be a contiguous
...@@ -658,11 +807,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) ...@@ -658,11 +807,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
*/ */
xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
SEGMENT_SIZE, 64, xhci->page_size); SEGMENT_SIZE, 64, xhci->page_size);
/* See Table 46 and Note on Figure 55 */ /* See Table 46 and Note on Figure 55 */
/* FIXME support 64-byte contexts */
xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
sizeof(struct xhci_device_control), 2112, 64, xhci->page_size);
64, xhci->page_size);
if (!xhci->segment_pool || !xhci->device_pool) if (!xhci->segment_pool || !xhci->device_pool)
goto fail; goto fail;
...@@ -675,14 +823,12 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) ...@@ -675,14 +823,12 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
(unsigned long long)xhci->cmd_ring->first_seg->dma); (unsigned long long)xhci->cmd_ring->first_seg->dma);
/* Set the address in the Command Ring Control register */ /* Set the address in the Command Ring Control register */
val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]); val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
val = (val & ~CMD_RING_ADDR_MASK) | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
(xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) | (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
xhci->cmd_ring->cycle_state; xhci->cmd_ring->cycle_state;
xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val); xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val);
xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]); xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n");
xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]);
xhci_dbg_cmd_ptrs(xhci); xhci_dbg_cmd_ptrs(xhci);
val = xhci_readl(xhci, &xhci->cap_regs->db_off); val = xhci_readl(xhci, &xhci->cap_regs->db_off);
...@@ -722,8 +868,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) ...@@ -722,8 +868,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
/* set ring base address and size for each segment table entry */ /* set ring base address and size for each segment table entry */
for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
struct xhci_erst_entry *entry = &xhci->erst.entries[val]; struct xhci_erst_entry *entry = &xhci->erst.entries[val];
entry->seg_addr[0] = seg->dma; entry->seg_addr = seg->dma;
entry->seg_addr[1] = 0;
entry->seg_size = TRBS_PER_SEGMENT; entry->seg_size = TRBS_PER_SEGMENT;
entry->rsvd = 0; entry->rsvd = 0;
seg = seg->next; seg = seg->next;
...@@ -741,11 +886,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) ...@@ -741,11 +886,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
/* set the segment table base address */ /* set the segment table base address */
xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
(unsigned long long)xhci->erst.erst_dma_addr); (unsigned long long)xhci->erst.erst_dma_addr);
val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]); val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
val &= ERST_PTR_MASK; val_64 &= ERST_PTR_MASK;
val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK); val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]); xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
/* Set the event ring dequeue address */ /* Set the event ring dequeue address */
xhci_set_hc_event_deq(xhci); xhci_set_hc_event_deq(xhci);
...@@ -761,7 +905,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) ...@@ -761,7 +905,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
for (i = 0; i < MAX_HC_SLOTS; ++i) for (i = 0; i < MAX_HC_SLOTS; ++i)
xhci->devs[i] = 0; xhci->devs[i] = 0;
if (scratchpad_alloc(xhci, flags))
goto fail;
return 0; return 0;
fail: fail:
xhci_warn(xhci, "Couldn't initialize memory\n"); xhci_warn(xhci, "Couldn't initialize memory\n");
xhci_mem_cleanup(xhci); xhci_mem_cleanup(xhci);
......
...@@ -117,6 +117,7 @@ static const struct hc_driver xhci_pci_hc_driver = { ...@@ -117,6 +117,7 @@ static const struct hc_driver xhci_pci_hc_driver = {
.free_dev = xhci_free_dev, .free_dev = xhci_free_dev,
.add_endpoint = xhci_add_endpoint, .add_endpoint = xhci_add_endpoint,
.drop_endpoint = xhci_drop_endpoint, .drop_endpoint = xhci_drop_endpoint,
.endpoint_reset = xhci_endpoint_reset,
.check_bandwidth = xhci_check_bandwidth, .check_bandwidth = xhci_check_bandwidth,
.reset_bandwidth = xhci_reset_bandwidth, .reset_bandwidth = xhci_reset_bandwidth,
.address_device = xhci_address_device, .address_device = xhci_address_device,
......
...@@ -135,6 +135,7 @@ static void next_trb(struct xhci_hcd *xhci, ...@@ -135,6 +135,7 @@ static void next_trb(struct xhci_hcd *xhci,
static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer) static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
{ {
union xhci_trb *next = ++(ring->dequeue); union xhci_trb *next = ++(ring->dequeue);
unsigned long long addr;
ring->deq_updates++; ring->deq_updates++;
/* Update the dequeue pointer further if that was a link TRB or we're at /* Update the dequeue pointer further if that was a link TRB or we're at
...@@ -152,6 +153,13 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer ...@@ -152,6 +153,13 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
ring->dequeue = ring->deq_seg->trbs; ring->dequeue = ring->deq_seg->trbs;
next = ring->dequeue; next = ring->dequeue;
} }
addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
if (ring == xhci->event_ring)
xhci_dbg(xhci, "Event ring deq = 0x%llx (DMA)\n", addr);
else if (ring == xhci->cmd_ring)
xhci_dbg(xhci, "Command ring deq = 0x%llx (DMA)\n", addr);
else
xhci_dbg(xhci, "Ring deq = 0x%llx (DMA)\n", addr);
} }
/* /*
...@@ -171,6 +179,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer ...@@ -171,6 +179,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
{ {
u32 chain; u32 chain;
union xhci_trb *next; union xhci_trb *next;
unsigned long long addr;
chain = ring->enqueue->generic.field[3] & TRB_CHAIN; chain = ring->enqueue->generic.field[3] & TRB_CHAIN;
next = ++(ring->enqueue); next = ++(ring->enqueue);
...@@ -204,6 +213,13 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer ...@@ -204,6 +213,13 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
ring->enqueue = ring->enq_seg->trbs; ring->enqueue = ring->enq_seg->trbs;
next = ring->enqueue; next = ring->enqueue;
} }
addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
if (ring == xhci->event_ring)
xhci_dbg(xhci, "Event ring enq = 0x%llx (DMA)\n", addr);
else if (ring == xhci->cmd_ring)
xhci_dbg(xhci, "Command ring enq = 0x%llx (DMA)\n", addr);
else
xhci_dbg(xhci, "Ring enq = 0x%llx (DMA)\n", addr);
} }
/* /*
...@@ -237,7 +253,7 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, ...@@ -237,7 +253,7 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
void xhci_set_hc_event_deq(struct xhci_hcd *xhci) void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
{ {
u32 temp; u64 temp;
dma_addr_t deq; dma_addr_t deq;
deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
...@@ -246,13 +262,15 @@ void xhci_set_hc_event_deq(struct xhci_hcd *xhci) ...@@ -246,13 +262,15 @@ void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
xhci_warn(xhci, "WARN something wrong with SW event ring " xhci_warn(xhci, "WARN something wrong with SW event ring "
"dequeue ptr.\n"); "dequeue ptr.\n");
/* Update HC event ring dequeue pointer */ /* Update HC event ring dequeue pointer */
temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]); temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
temp &= ERST_PTR_MASK; temp &= ERST_PTR_MASK;
if (!in_interrupt()) /* Don't clear the EHB bit (which is RW1C) because
xhci_dbg(xhci, "// Write event ring dequeue pointer\n"); * there might be more events to service.
xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]); */
xhci_writel(xhci, (deq & ~ERST_PTR_MASK) | temp, temp &= ~ERST_EHB;
&xhci->ir_set->erst_dequeue[0]); xhci_dbg(xhci, "// Write event ring dequeue pointer, preserving EHB bit\n");
xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
&xhci->ir_set->erst_dequeue);
} }
/* Ring the host controller doorbell after placing a command on the ring */ /* Ring the host controller doorbell after placing a command on the ring */
...@@ -279,7 +297,8 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci, ...@@ -279,7 +297,8 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci,
/* Don't ring the doorbell for this endpoint if there are pending /* Don't ring the doorbell for this endpoint if there are pending
* cancellations because the we don't want to interrupt processing. * cancellations because the we don't want to interrupt processing.
*/ */
if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING)) { if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING)
&& !(ep_ring->state & EP_HALTED)) {
field = xhci_readl(xhci, db_addr) & DB_MASK; field = xhci_readl(xhci, db_addr) & DB_MASK;
xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr);
/* Flush PCI posted writes - FIXME Matthew Wilcox says this /* Flush PCI posted writes - FIXME Matthew Wilcox says this
...@@ -316,12 +335,6 @@ static struct xhci_segment *find_trb_seg( ...@@ -316,12 +335,6 @@ static struct xhci_segment *find_trb_seg(
return cur_seg; return cur_seg;
} }
struct dequeue_state {
struct xhci_segment *new_deq_seg;
union xhci_trb *new_deq_ptr;
int new_cycle_state;
};
/* /*
* Move the xHC's endpoint ring dequeue pointer past cur_td. * Move the xHC's endpoint ring dequeue pointer past cur_td.
* Record the new state of the xHC's endpoint ring dequeue segment, * Record the new state of the xHC's endpoint ring dequeue segment,
...@@ -336,24 +349,30 @@ struct dequeue_state { ...@@ -336,24 +349,30 @@ struct dequeue_state {
* - Finally we move the dequeue state one TRB further, toggling the cycle bit * - Finally we move the dequeue state one TRB further, toggling the cycle bit
* if we've moved it past a link TRB with the toggle cycle bit set. * if we've moved it past a link TRB with the toggle cycle bit set.
*/ */
static void find_new_dequeue_state(struct xhci_hcd *xhci, void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index, unsigned int slot_id, unsigned int ep_index,
struct xhci_td *cur_td, struct dequeue_state *state) struct xhci_td *cur_td, struct xhci_dequeue_state *state)
{ {
struct xhci_virt_device *dev = xhci->devs[slot_id]; struct xhci_virt_device *dev = xhci->devs[slot_id];
struct xhci_ring *ep_ring = dev->ep_rings[ep_index]; struct xhci_ring *ep_ring = dev->ep_rings[ep_index];
struct xhci_generic_trb *trb; struct xhci_generic_trb *trb;
struct xhci_ep_ctx *ep_ctx;
dma_addr_t addr;
state->new_cycle_state = 0; state->new_cycle_state = 0;
xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
state->new_deq_seg = find_trb_seg(cur_td->start_seg, state->new_deq_seg = find_trb_seg(cur_td->start_seg,
ep_ring->stopped_trb, ep_ring->stopped_trb,
&state->new_cycle_state); &state->new_cycle_state);
if (!state->new_deq_seg) if (!state->new_deq_seg)
BUG(); BUG();
/* Dig out the cycle state saved by the xHC during the stop ep cmd */ /* Dig out the cycle state saved by the xHC during the stop ep cmd */
state->new_cycle_state = 0x1 & dev->out_ctx->ep[ep_index].deq[0]; xhci_dbg(xhci, "Finding endpoint context\n");
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
state->new_cycle_state = 0x1 & ep_ctx->deq;
state->new_deq_ptr = cur_td->last_trb; state->new_deq_ptr = cur_td->last_trb;
xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
state->new_deq_seg = find_trb_seg(state->new_deq_seg, state->new_deq_seg = find_trb_seg(state->new_deq_seg,
state->new_deq_ptr, state->new_deq_ptr,
&state->new_cycle_state); &state->new_cycle_state);
...@@ -367,6 +386,12 @@ static void find_new_dequeue_state(struct xhci_hcd *xhci, ...@@ -367,6 +386,12 @@ static void find_new_dequeue_state(struct xhci_hcd *xhci,
next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
/* Don't update the ring cycle state for the producer (us). */ /* Don't update the ring cycle state for the producer (us). */
xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
state->new_deq_seg);
addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n",
(unsigned long long) addr);
xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n");
ep_ring->dequeue = state->new_deq_ptr; ep_ring->dequeue = state->new_deq_ptr;
ep_ring->deq_seg = state->new_deq_seg; ep_ring->deq_seg = state->new_deq_seg;
} }
...@@ -416,6 +441,30 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, ...@@ -416,6 +441,30 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index, struct xhci_segment *deq_seg, unsigned int ep_index, struct xhci_segment *deq_seg,
union xhci_trb *deq_ptr, u32 cycle_state); union xhci_trb *deq_ptr, u32 cycle_state);
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
struct xhci_ring *ep_ring, unsigned int slot_id,
unsigned int ep_index, struct xhci_dequeue_state *deq_state)
{
xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
"new deq ptr = %p (0x%llx dma), new cycle = %u\n",
deq_state->new_deq_seg,
(unsigned long long)deq_state->new_deq_seg->dma,
deq_state->new_deq_ptr,
(unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
deq_state->new_cycle_state);
queue_set_tr_deq(xhci, slot_id, ep_index,
deq_state->new_deq_seg,
deq_state->new_deq_ptr,
(u32) deq_state->new_cycle_state);
/* Stop the TD queueing code from ringing the doorbell until
* this command completes. The HC won't set the dequeue pointer
* if the ring is running, and ringing the doorbell starts the
* ring running.
*/
ep_ring->state |= SET_DEQ_PENDING;
xhci_ring_cmd_db(xhci);
}
/* /*
* When we get a command completion for a Stop Endpoint Command, we need to * When we get a command completion for a Stop Endpoint Command, we need to
* unlink any cancelled TDs from the ring. There are two ways to do that: * unlink any cancelled TDs from the ring. There are two ways to do that:
...@@ -436,7 +485,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, ...@@ -436,7 +485,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
struct xhci_td *cur_td = 0; struct xhci_td *cur_td = 0;
struct xhci_td *last_unlinked_td; struct xhci_td *last_unlinked_td;
struct dequeue_state deq_state; struct xhci_dequeue_state deq_state;
#ifdef CONFIG_USB_HCD_STAT #ifdef CONFIG_USB_HCD_STAT
ktime_t stop_time = ktime_get(); ktime_t stop_time = ktime_get();
#endif #endif
...@@ -464,7 +513,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, ...@@ -464,7 +513,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
* move the xHC endpoint ring dequeue pointer past this TD. * move the xHC endpoint ring dequeue pointer past this TD.
*/ */
if (cur_td == ep_ring->stopped_td) if (cur_td == ep_ring->stopped_td)
find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td,
&deq_state); &deq_state);
else else
td_to_noop(xhci, ep_ring, cur_td); td_to_noop(xhci, ep_ring, cur_td);
...@@ -480,24 +529,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, ...@@ -480,24 +529,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), " xhci_queue_new_dequeue_state(xhci, ep_ring,
"new deq ptr = %p (0x%llx dma), new cycle = %u\n", slot_id, ep_index, &deq_state);
deq_state.new_deq_seg,
(unsigned long long)deq_state.new_deq_seg->dma,
deq_state.new_deq_ptr,
(unsigned long long)xhci_trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr),
deq_state.new_cycle_state);
queue_set_tr_deq(xhci, slot_id, ep_index,
deq_state.new_deq_seg,
deq_state.new_deq_ptr,
(u32) deq_state.new_cycle_state);
/* Stop the TD queueing code from ringing the doorbell until
* this command completes. The HC won't set the dequeue pointer
* if the ring is running, and ringing the doorbell starts the
* ring running.
*/
ep_ring->state |= SET_DEQ_PENDING;
xhci_ring_cmd_db(xhci);
} else { } else {
/* Otherwise just ring the doorbell to restart the ring */ /* Otherwise just ring the doorbell to restart the ring */
ring_ep_doorbell(xhci, slot_id, ep_index); ring_ep_doorbell(xhci, slot_id, ep_index);
...@@ -551,11 +584,15 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, ...@@ -551,11 +584,15 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
unsigned int ep_index; unsigned int ep_index;
struct xhci_ring *ep_ring; struct xhci_ring *ep_ring;
struct xhci_virt_device *dev; struct xhci_virt_device *dev;
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
dev = xhci->devs[slot_id]; dev = xhci->devs[slot_id];
ep_ring = dev->ep_rings[ep_index]; ep_ring = dev->ep_rings[ep_index];
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
if (GET_COMP_CODE(event->status) != COMP_SUCCESS) { if (GET_COMP_CODE(event->status) != COMP_SUCCESS) {
unsigned int ep_state; unsigned int ep_state;
...@@ -569,9 +606,9 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, ...@@ -569,9 +606,9 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
case COMP_CTX_STATE: case COMP_CTX_STATE:
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due " xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
"to incorrect slot or ep state.\n"); "to incorrect slot or ep state.\n");
ep_state = dev->out_ctx->ep[ep_index].ep_info; ep_state = ep_ctx->ep_info;
ep_state &= EP_STATE_MASK; ep_state &= EP_STATE_MASK;
slot_state = dev->out_ctx->slot.dev_state; slot_state = slot_ctx->dev_state;
slot_state = GET_SLOT_STATE(slot_state); slot_state = GET_SLOT_STATE(slot_state);
xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
slot_state, ep_state); slot_state, ep_state);
...@@ -593,16 +630,33 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, ...@@ -593,16 +630,33 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
* cancelling URBs, which might not be an error... * cancelling URBs, which might not be an error...
*/ */
} else { } else {
xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq[0] = 0x%x, " xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
"deq[1] = 0x%x.\n", ep_ctx->deq);
dev->out_ctx->ep[ep_index].deq[0],
dev->out_ctx->ep[ep_index].deq[1]);
} }
ep_ring->state &= ~SET_DEQ_PENDING; ep_ring->state &= ~SET_DEQ_PENDING;
ring_ep_doorbell(xhci, slot_id, ep_index); ring_ep_doorbell(xhci, slot_id, ep_index);
} }
static void handle_reset_ep_completion(struct xhci_hcd *xhci,
struct xhci_event_cmd *event,
union xhci_trb *trb)
{
int slot_id;
unsigned int ep_index;
slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
/* This command will only fail if the endpoint wasn't halted,
* but we don't care.
*/
xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
(unsigned int) GET_COMP_CODE(event->status));
/* Clear our internal halted state and restart the ring */
xhci->devs[slot_id]->ep_rings[ep_index]->state &= ~EP_HALTED;
ring_ep_doorbell(xhci, slot_id, ep_index);
}
static void handle_cmd_completion(struct xhci_hcd *xhci, static void handle_cmd_completion(struct xhci_hcd *xhci,
struct xhci_event_cmd *event) struct xhci_event_cmd *event)
...@@ -611,7 +665,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, ...@@ -611,7 +665,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
u64 cmd_dma; u64 cmd_dma;
dma_addr_t cmd_dequeue_dma; dma_addr_t cmd_dequeue_dma;
cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0]; cmd_dma = event->cmd_trb;
cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
xhci->cmd_ring->dequeue); xhci->cmd_ring->dequeue);
/* Is the command ring deq ptr out of sync with the deq seg ptr? */ /* Is the command ring deq ptr out of sync with the deq seg ptr? */
...@@ -653,6 +707,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, ...@@ -653,6 +707,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
case TRB_TYPE(TRB_CMD_NOOP): case TRB_TYPE(TRB_CMD_NOOP):
++xhci->noops_handled; ++xhci->noops_handled;
break; break;
case TRB_TYPE(TRB_RESET_EP):
handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
break;
default: default:
/* Skip over unknown commands on the event ring */ /* Skip over unknown commands on the event ring */
xhci->error_bitmask |= 1 << 6; xhci->error_bitmask |= 1 << 6;
...@@ -756,7 +813,9 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -756,7 +813,9 @@ static int handle_tx_event(struct xhci_hcd *xhci,
union xhci_trb *event_trb; union xhci_trb *event_trb;
struct urb *urb = 0; struct urb *urb = 0;
int status = -EINPROGRESS; int status = -EINPROGRESS;
struct xhci_ep_ctx *ep_ctx;
xhci_dbg(xhci, "In %s\n", __func__);
xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)]; xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)];
if (!xdev) { if (!xdev) {
xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
...@@ -765,17 +824,17 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -765,17 +824,17 @@ static int handle_tx_event(struct xhci_hcd *xhci,
/* Endpoint ID is 1 based, our index is zero based */ /* Endpoint ID is 1 based, our index is zero based */
ep_index = TRB_TO_EP_ID(event->flags) - 1; ep_index = TRB_TO_EP_ID(event->flags) - 1;
xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
ep_ring = xdev->ep_rings[ep_index]; ep_ring = xdev->ep_rings[ep_index];
if (!ep_ring || (xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n");
return -ENODEV; return -ENODEV;
} }
event_dma = event->buffer[0]; event_dma = event->buffer;
if (event->buffer[1] != 0)
xhci_warn(xhci, "WARN ignoring upper 32-bits of 64-bit TRB dma address\n");
/* This TRB should be in the TD at the head of this ring's TD list */ /* This TRB should be in the TD at the head of this ring's TD list */
xhci_dbg(xhci, "%s - checking for list empty\n", __func__);
if (list_empty(&ep_ring->td_list)) { if (list_empty(&ep_ring->td_list)) {
xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
TRB_TO_SLOT_ID(event->flags), ep_index); TRB_TO_SLOT_ID(event->flags), ep_index);
...@@ -785,11 +844,14 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -785,11 +844,14 @@ static int handle_tx_event(struct xhci_hcd *xhci,
urb = NULL; urb = NULL;
goto cleanup; goto cleanup;
} }
xhci_dbg(xhci, "%s - getting list entry\n", __func__);
td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
/* Is this a TRB in the currently executing TD? */ /* Is this a TRB in the currently executing TD? */
xhci_dbg(xhci, "%s - looking for TD\n", __func__);
event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
td->last_trb, event_dma); td->last_trb, event_dma);
xhci_dbg(xhci, "%s - found event_seg = %p\n", __func__, event_seg);
if (!event_seg) { if (!event_seg) {
/* HC is busted, give up! */ /* HC is busted, give up! */
xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n"); xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n");
...@@ -798,10 +860,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -798,10 +860,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)]; event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)];
xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
(unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
xhci_dbg(xhci, "Offset 0x00 (buffer[0]) = 0x%x\n", xhci_dbg(xhci, "Offset 0x00 (buffer lo) = 0x%x\n",
(unsigned int) event->buffer[0]); lower_32_bits(event->buffer));
xhci_dbg(xhci, "Offset 0x04 (buffer[0]) = 0x%x\n", xhci_dbg(xhci, "Offset 0x04 (buffer hi) = 0x%x\n",
(unsigned int) event->buffer[1]); upper_32_bits(event->buffer));
xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n", xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n",
(unsigned int) event->transfer_len); (unsigned int) event->transfer_len);
xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n", xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n",
...@@ -823,6 +885,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -823,6 +885,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
break; break;
case COMP_STALL: case COMP_STALL:
xhci_warn(xhci, "WARN: Stalled endpoint\n"); xhci_warn(xhci, "WARN: Stalled endpoint\n");
ep_ring->state |= EP_HALTED;
status = -EPIPE; status = -EPIPE;
break; break;
case COMP_TRB_ERR: case COMP_TRB_ERR:
...@@ -833,6 +896,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -833,6 +896,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
xhci_warn(xhci, "WARN: transfer error on endpoint\n"); xhci_warn(xhci, "WARN: transfer error on endpoint\n");
status = -EPROTO; status = -EPROTO;
break; break;
case COMP_BABBLE:
xhci_warn(xhci, "WARN: babble error on endpoint\n");
status = -EOVERFLOW;
break;
case COMP_DB_ERR: case COMP_DB_ERR:
xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n"); xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
status = -ENOSR; status = -ENOSR;
...@@ -874,15 +941,26 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -874,15 +941,26 @@ static int handle_tx_event(struct xhci_hcd *xhci,
if (event_trb != ep_ring->dequeue) { if (event_trb != ep_ring->dequeue) {
/* The event was for the status stage */ /* The event was for the status stage */
if (event_trb == td->last_trb) { if (event_trb == td->last_trb) {
if (td->urb->actual_length != 0) {
/* Don't overwrite a previously set error code */
if (status == -EINPROGRESS || status == 0)
/* Did we already see a short data stage? */
status = -EREMOTEIO;
} else {
td->urb->actual_length = td->urb->actual_length =
td->urb->transfer_buffer_length; td->urb->transfer_buffer_length;
}
} else { } else {
/* Maybe the event was for the data stage? */ /* Maybe the event was for the data stage? */
if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) {
/* We didn't stop on a link TRB in the middle */ /* We didn't stop on a link TRB in the middle */
td->urb->actual_length = td->urb->actual_length =
td->urb->transfer_buffer_length - td->urb->transfer_buffer_length -
TRB_LEN(event->transfer_len); TRB_LEN(event->transfer_len);
xhci_dbg(xhci, "Waiting for status stage event\n");
urb = NULL;
goto cleanup;
}
} }
} }
} else { } else {
...@@ -929,15 +1007,19 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -929,15 +1007,19 @@ static int handle_tx_event(struct xhci_hcd *xhci,
TRB_LEN(event->transfer_len)); TRB_LEN(event->transfer_len));
td->urb->actual_length = 0; td->urb->actual_length = 0;
} }
/* Don't overwrite a previously set error code */
if (status == -EINPROGRESS) {
if (td->urb->transfer_flags & URB_SHORT_NOT_OK) if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
status = -EREMOTEIO; status = -EREMOTEIO;
else else
status = 0; status = 0;
}
} else { } else {
td->urb->actual_length = td->urb->transfer_buffer_length; td->urb->actual_length = td->urb->transfer_buffer_length;
/* Ignore a short packet completion if the /* Ignore a short packet completion if the
* untransferred length was zero. * untransferred length was zero.
*/ */
if (status == -EREMOTEIO)
status = 0; status = 0;
} }
} else { } else {
...@@ -965,12 +1047,22 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -965,12 +1047,22 @@ static int handle_tx_event(struct xhci_hcd *xhci,
TRB_LEN(event->transfer_len); TRB_LEN(event->transfer_len);
} }
} }
/* The Endpoint Stop Command completion will take care of
* any stopped TDs. A stopped TD may be restarted, so don't update the
* ring dequeue pointer or take this TD off any lists yet.
*/
if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL || if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL ||
GET_COMP_CODE(event->transfer_len) == COMP_STOP) { GET_COMP_CODE(event->transfer_len) == COMP_STOP) {
/* The Endpoint Stop Command completion will take care of any
* stopped TDs. A stopped TD may be restarted, so don't update
* the ring dequeue pointer or take this TD off any lists yet.
*/
ep_ring->stopped_td = td;
ep_ring->stopped_trb = event_trb;
} else {
if (GET_COMP_CODE(event->transfer_len) == COMP_STALL) {
/* The transfer is completed from the driver's
* perspective, but we need to issue a set dequeue
* command for this stalled endpoint to move the dequeue
* pointer past the TD. We can't do that here because
* the halt condition must be cleared first.
*/
ep_ring->stopped_td = td; ep_ring->stopped_td = td;
ep_ring->stopped_trb = event_trb; ep_ring->stopped_trb = event_trb;
} else { } else {
...@@ -978,6 +1070,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -978,6 +1070,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
while (ep_ring->dequeue != td->last_trb) while (ep_ring->dequeue != td->last_trb)
inc_deq(xhci, ep_ring, false); inc_deq(xhci, ep_ring, false);
inc_deq(xhci, ep_ring, false); inc_deq(xhci, ep_ring, false);
}
/* Clean up the endpoint's TD list */ /* Clean up the endpoint's TD list */
urb = td->urb; urb = td->urb;
...@@ -987,7 +1080,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -987,7 +1080,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
list_del(&td->cancelled_td_list); list_del(&td->cancelled_td_list);
ep_ring->cancels_pending--; ep_ring->cancels_pending--;
} }
/* Leave the TD around for the reset endpoint function to use */
if (GET_COMP_CODE(event->transfer_len) != COMP_STALL) {
kfree(td); kfree(td);
}
urb->hcpriv = NULL; urb->hcpriv = NULL;
} }
cleanup: cleanup:
...@@ -997,6 +1093,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, ...@@ -997,6 +1093,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
/* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */ /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */
if (urb) { if (urb) {
usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb); usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
xhci_dbg(xhci, "Giveback URB %p, len = %d, status = %d\n",
urb, td->urb->actual_length, status);
spin_unlock(&xhci->lock); spin_unlock(&xhci->lock);
usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status); usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
spin_lock(&xhci->lock); spin_lock(&xhci->lock);
...@@ -1014,6 +1112,7 @@ void xhci_handle_event(struct xhci_hcd *xhci) ...@@ -1014,6 +1112,7 @@ void xhci_handle_event(struct xhci_hcd *xhci)
int update_ptrs = 1; int update_ptrs = 1;
int ret; int ret;
xhci_dbg(xhci, "In %s\n", __func__);
if (!xhci->event_ring || !xhci->event_ring->dequeue) { if (!xhci->event_ring || !xhci->event_ring->dequeue) {
xhci->error_bitmask |= 1 << 1; xhci->error_bitmask |= 1 << 1;
return; return;
...@@ -1026,18 +1125,25 @@ void xhci_handle_event(struct xhci_hcd *xhci) ...@@ -1026,18 +1125,25 @@ void xhci_handle_event(struct xhci_hcd *xhci)
xhci->error_bitmask |= 1 << 2; xhci->error_bitmask |= 1 << 2;
return; return;
} }
xhci_dbg(xhci, "%s - OS owns TRB\n", __func__);
/* FIXME: Handle more event types. */ /* FIXME: Handle more event types. */
switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) { switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) {
case TRB_TYPE(TRB_COMPLETION): case TRB_TYPE(TRB_COMPLETION):
xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__);
handle_cmd_completion(xhci, &event->event_cmd); handle_cmd_completion(xhci, &event->event_cmd);
xhci_dbg(xhci, "%s - returned from handle_cmd_completion\n", __func__);
break; break;
case TRB_TYPE(TRB_PORT_STATUS): case TRB_TYPE(TRB_PORT_STATUS):
xhci_dbg(xhci, "%s - calling handle_port_status\n", __func__);
handle_port_status(xhci, event); handle_port_status(xhci, event);
xhci_dbg(xhci, "%s - returned from handle_port_status\n", __func__);
update_ptrs = 0; update_ptrs = 0;
break; break;
case TRB_TYPE(TRB_TRANSFER): case TRB_TYPE(TRB_TRANSFER):
xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__);
ret = handle_tx_event(xhci, &event->trans_event); ret = handle_tx_event(xhci, &event->trans_event);
xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__);
if (ret < 0) if (ret < 0)
xhci->error_bitmask |= 1 << 9; xhci->error_bitmask |= 1 << 9;
else else
...@@ -1093,13 +1199,13 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, ...@@ -1093,13 +1199,13 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
*/ */
xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
return -ENOENT; return -ENOENT;
case EP_STATE_HALTED:
case EP_STATE_ERROR: case EP_STATE_ERROR:
xhci_warn(xhci, "WARN waiting for halt or error on ep " xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
"to be cleared\n");
/* FIXME event handling code for error needs to clear it */ /* FIXME event handling code for error needs to clear it */
/* XXX not sure if this should be -ENOENT or not */ /* XXX not sure if this should be -ENOENT or not */
return -EINVAL; return -EINVAL;
case EP_STATE_HALTED:
xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
case EP_STATE_STOPPED: case EP_STATE_STOPPED:
case EP_STATE_RUNNING: case EP_STATE_RUNNING:
break; break;
...@@ -1128,9 +1234,9 @@ static int prepare_transfer(struct xhci_hcd *xhci, ...@@ -1128,9 +1234,9 @@ static int prepare_transfer(struct xhci_hcd *xhci,
gfp_t mem_flags) gfp_t mem_flags)
{ {
int ret; int ret;
struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
ret = prepare_ring(xhci, xdev->ep_rings[ep_index], ret = prepare_ring(xhci, xdev->ep_rings[ep_index],
xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK, ep_ctx->ep_info & EP_STATE_MASK,
num_trbs, mem_flags); num_trbs, mem_flags);
if (ret) if (ret)
return ret; return ret;
...@@ -1285,6 +1391,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -1285,6 +1391,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* Queue the first TRB, even if it's zero-length */ /* Queue the first TRB, even if it's zero-length */
do { do {
u32 field = 0; u32 field = 0;
u32 length_field = 0;
/* Don't change the cycle bit of the first TRB until later */ /* Don't change the cycle bit of the first TRB until later */
if (first_trb) if (first_trb)
...@@ -1314,10 +1421,13 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -1314,10 +1421,13 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
(unsigned int) addr + trb_buff_len); (unsigned int) addr + trb_buff_len);
} }
length_field = TRB_LEN(trb_buff_len) |
TD_REMAINDER(urb->transfer_buffer_length - running_total) |
TRB_INTR_TARGET(0);
queue_trb(xhci, ep_ring, false, queue_trb(xhci, ep_ring, false,
(u32) addr, lower_32_bits(addr),
(u32) ((u64) addr >> 32), upper_32_bits(addr),
TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0), length_field,
/* We always want to know if the TRB was short, /* We always want to know if the TRB was short,
* or we won't get an event when it completes. * or we won't get an event when it completes.
* (Unless we use event data TRBs, which are a * (Unless we use event data TRBs, which are a
...@@ -1365,7 +1475,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -1365,7 +1475,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct xhci_generic_trb *start_trb; struct xhci_generic_trb *start_trb;
bool first_trb; bool first_trb;
int start_cycle; int start_cycle;
u32 field; u32 field, length_field;
int running_total, trb_buff_len, ret; int running_total, trb_buff_len, ret;
u64 addr; u64 addr;
...@@ -1443,10 +1553,13 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -1443,10 +1553,13 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
td->last_trb = ep_ring->enqueue; td->last_trb = ep_ring->enqueue;
field |= TRB_IOC; field |= TRB_IOC;
} }
length_field = TRB_LEN(trb_buff_len) |
TD_REMAINDER(urb->transfer_buffer_length - running_total) |
TRB_INTR_TARGET(0);
queue_trb(xhci, ep_ring, false, queue_trb(xhci, ep_ring, false,
(u32) addr, lower_32_bits(addr),
(u32) ((u64) addr >> 32), upper_32_bits(addr),
TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0), length_field,
/* We always want to know if the TRB was short, /* We always want to know if the TRB was short,
* or we won't get an event when it completes. * or we won't get an event when it completes.
* (Unless we use event data TRBs, which are a * (Unless we use event data TRBs, which are a
...@@ -1478,7 +1591,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -1478,7 +1591,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct usb_ctrlrequest *setup; struct usb_ctrlrequest *setup;
struct xhci_generic_trb *start_trb; struct xhci_generic_trb *start_trb;
int start_cycle; int start_cycle;
u32 field; u32 field, length_field;
struct xhci_td *td; struct xhci_td *td;
ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
...@@ -1528,13 +1641,16 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -1528,13 +1641,16 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* If there's data, queue data TRBs */ /* If there's data, queue data TRBs */
field = 0; field = 0;
length_field = TRB_LEN(urb->transfer_buffer_length) |
TD_REMAINDER(urb->transfer_buffer_length) |
TRB_INTR_TARGET(0);
if (urb->transfer_buffer_length > 0) { if (urb->transfer_buffer_length > 0) {
if (setup->bRequestType & USB_DIR_IN) if (setup->bRequestType & USB_DIR_IN)
field |= TRB_DIR_IN; field |= TRB_DIR_IN;
queue_trb(xhci, ep_ring, false, queue_trb(xhci, ep_ring, false,
lower_32_bits(urb->transfer_dma), lower_32_bits(urb->transfer_dma),
upper_32_bits(urb->transfer_dma), upper_32_bits(urb->transfer_dma),
TRB_LEN(urb->transfer_buffer_length) | TRB_INTR_TARGET(0), length_field,
/* Event on short tx */ /* Event on short tx */
field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state); field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state);
} }
...@@ -1603,7 +1719,8 @@ int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id) ...@@ -1603,7 +1719,8 @@ int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id) u32 slot_id)
{ {
return queue_command(xhci, in_ctx_ptr, 0, 0, return queue_command(xhci, lower_32_bits(in_ctx_ptr),
upper_32_bits(in_ctx_ptr), 0,
TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)); TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id));
} }
...@@ -1611,7 +1728,8 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, ...@@ -1611,7 +1728,8 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id) u32 slot_id)
{ {
return queue_command(xhci, in_ctx_ptr, 0, 0, return queue_command(xhci, lower_32_bits(in_ctx_ptr),
upper_32_bits(in_ctx_ptr), 0,
TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id)); TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id));
} }
...@@ -1639,10 +1757,23 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, ...@@ -1639,10 +1757,23 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
u32 type = TRB_TYPE(TRB_SET_DEQ); u32 type = TRB_TYPE(TRB_SET_DEQ);
addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
if (addr == 0) if (addr == 0) {
xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n", xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
deq_seg, deq_ptr); deq_seg, deq_ptr);
return queue_command(xhci, (u32) addr | cycle_state, 0, 0, return 0;
}
return queue_command(xhci, lower_32_bits(addr) | cycle_state,
upper_32_bits(addr), 0,
trb_slot_id | trb_ep_index | type); trb_slot_id | trb_ep_index | type);
} }
int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index)
{
u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
u32 type = TRB_TYPE(TRB_RESET_EP);
return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type);
}
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/usb.h> #include <linux/usb.h>
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/kernel.h>
#include "../core/hcd.h" #include "../core/hcd.h"
/* Code sharing between pci-quirks and xhci hcd */ /* Code sharing between pci-quirks and xhci hcd */
...@@ -42,14 +43,6 @@ ...@@ -42,14 +43,6 @@
* xHCI register interface. * xHCI register interface.
* This corresponds to the eXtensible Host Controller Interface (xHCI) * This corresponds to the eXtensible Host Controller Interface (xHCI)
* Revision 0.95 specification * Revision 0.95 specification
*
* Registers should always be accessed with double word or quad word accesses.
*
* Some xHCI implementations may support 64-bit address pointers. Registers
* with 64-bit address pointers should be written to with dword accesses by
* writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
* xHCI implementations that do not support 64-bit address pointers will ignore
* the high dword, and write order is irrelevant.
*/ */
/** /**
...@@ -96,6 +89,7 @@ struct xhci_cap_regs { ...@@ -96,6 +89,7 @@ struct xhci_cap_regs {
#define HCS_ERST_MAX(p) (((p) >> 4) & 0xf) #define HCS_ERST_MAX(p) (((p) >> 4) & 0xf)
/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */ /* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
/* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */ /* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */
#define HCS_MAX_SCRATCHPAD(p) (((p) >> 27) & 0x1f)
/* HCSPARAMS3 - hcs_params3 - bitmasks */ /* HCSPARAMS3 - hcs_params3 - bitmasks */
/* bits 0:7, Max U1 to U0 latency for the roothub ports */ /* bits 0:7, Max U1 to U0 latency for the roothub ports */
...@@ -166,10 +160,10 @@ struct xhci_op_regs { ...@@ -166,10 +160,10 @@ struct xhci_op_regs {
u32 reserved1; u32 reserved1;
u32 reserved2; u32 reserved2;
u32 dev_notification; u32 dev_notification;
u32 cmd_ring[2]; u64 cmd_ring;
/* rsvd: offset 0x20-2F */ /* rsvd: offset 0x20-2F */
u32 reserved3[4]; u32 reserved3[4];
u32 dcbaa_ptr[2]; u64 dcbaa_ptr;
u32 config_reg; u32 config_reg;
/* rsvd: offset 0x3C-3FF */ /* rsvd: offset 0x3C-3FF */
u32 reserved4[241]; u32 reserved4[241];
...@@ -254,7 +248,7 @@ struct xhci_op_regs { ...@@ -254,7 +248,7 @@ struct xhci_op_regs {
#define CMD_RING_RUNNING (1 << 3) #define CMD_RING_RUNNING (1 << 3)
/* bits 4:5 reserved and should be preserved */ /* bits 4:5 reserved and should be preserved */
/* Command Ring pointer - bit mask for the lower 32 bits. */ /* Command Ring pointer - bit mask for the lower 32 bits. */
#define CMD_RING_ADDR_MASK (0xffffffc0) #define CMD_RING_RSVD_BITS (0x3f)
/* CONFIG - Configure Register - config_reg bitmasks */ /* CONFIG - Configure Register - config_reg bitmasks */
/* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */ /* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */
...@@ -382,8 +376,8 @@ struct xhci_intr_reg { ...@@ -382,8 +376,8 @@ struct xhci_intr_reg {
u32 irq_control; u32 irq_control;
u32 erst_size; u32 erst_size;
u32 rsvd; u32 rsvd;
u32 erst_base[2]; u64 erst_base;
u32 erst_dequeue[2]; u64 erst_dequeue;
}; };
/* irq_pending bitmasks */ /* irq_pending bitmasks */
...@@ -452,6 +446,27 @@ struct xhci_doorbell_array { ...@@ -452,6 +446,27 @@ struct xhci_doorbell_array {
#define EPI_TO_DB(p) (((p) + 1) & 0xff) #define EPI_TO_DB(p) (((p) + 1) & 0xff)
/**
* struct xhci_container_ctx
* @type: Type of context. Used to calculated offsets to contained contexts.
* @size: Size of the context data
* @bytes: The raw context data given to HW
* @dma: dma address of the bytes
*
* Represents either a Device or Input context. Holds a pointer to the raw
* memory used for the context (bytes) and dma address of it (dma).
*/
struct xhci_container_ctx {
unsigned type;
#define XHCI_CTX_TYPE_DEVICE 0x1
#define XHCI_CTX_TYPE_INPUT 0x2
int size;
u8 *bytes;
dma_addr_t dma;
};
/** /**
* struct xhci_slot_ctx * struct xhci_slot_ctx
* @dev_info: Route string, device speed, hub info, and last valid endpoint * @dev_info: Route string, device speed, hub info, and last valid endpoint
...@@ -538,7 +553,7 @@ struct xhci_slot_ctx { ...@@ -538,7 +553,7 @@ struct xhci_slot_ctx {
struct xhci_ep_ctx { struct xhci_ep_ctx {
u32 ep_info; u32 ep_info;
u32 ep_info2; u32 ep_info2;
u32 deq[2]; u64 deq;
u32 tx_info; u32 tx_info;
/* offset 0x14 - 0x1f reserved for HC internal use */ /* offset 0x14 - 0x1f reserved for HC internal use */
u32 reserved[3]; u32 reserved[3];
...@@ -589,18 +604,16 @@ struct xhci_ep_ctx { ...@@ -589,18 +604,16 @@ struct xhci_ep_ctx {
/** /**
* struct xhci_device_control * struct xhci_input_control_context
* Input/Output context; see section 6.2.5. * Input control context; see section 6.2.5.
* *
* @drop_context: set the bit of the endpoint context you want to disable * @drop_context: set the bit of the endpoint context you want to disable
* @add_context: set the bit of the endpoint context you want to enable * @add_context: set the bit of the endpoint context you want to enable
*/ */
struct xhci_device_control { struct xhci_input_control_ctx {
u32 drop_flags; u32 drop_flags;
u32 add_flags; u32 add_flags;
u32 rsvd[6]; u32 rsvd2[6];
struct xhci_slot_ctx slot;
struct xhci_ep_ctx ep[31];
}; };
/* drop context bitmasks */ /* drop context bitmasks */
...@@ -608,7 +621,6 @@ struct xhci_device_control { ...@@ -608,7 +621,6 @@ struct xhci_device_control {
/* add context bitmasks */ /* add context bitmasks */
#define ADD_EP(x) (0x1 << x) #define ADD_EP(x) (0x1 << x)
struct xhci_virt_device { struct xhci_virt_device {
/* /*
* Commands to the hardware are passed an "input context" that * Commands to the hardware are passed an "input context" that
...@@ -618,11 +630,10 @@ struct xhci_virt_device { ...@@ -618,11 +630,10 @@ struct xhci_virt_device {
* track of input and output contexts separately because * track of input and output contexts separately because
* these commands might fail and we don't trust the hardware. * these commands might fail and we don't trust the hardware.
*/ */
struct xhci_device_control *out_ctx; struct xhci_container_ctx *out_ctx;
dma_addr_t out_ctx_dma;
/* Used for addressing devices and configuration changes */ /* Used for addressing devices and configuration changes */
struct xhci_device_control *in_ctx; struct xhci_container_ctx *in_ctx;
dma_addr_t in_ctx_dma;
/* FIXME when stream support is added */ /* FIXME when stream support is added */
struct xhci_ring *ep_rings[31]; struct xhci_ring *ep_rings[31];
/* Temporary storage in case the configure endpoint command fails and we /* Temporary storage in case the configure endpoint command fails and we
...@@ -641,7 +652,7 @@ struct xhci_virt_device { ...@@ -641,7 +652,7 @@ struct xhci_virt_device {
*/ */
struct xhci_device_context_array { struct xhci_device_context_array {
/* 64-bit device addresses; we only write 32-bit addresses */ /* 64-bit device addresses; we only write 32-bit addresses */
u32 dev_context_ptrs[2*MAX_HC_SLOTS]; u64 dev_context_ptrs[MAX_HC_SLOTS];
/* private xHCD pointers */ /* private xHCD pointers */
dma_addr_t dma; dma_addr_t dma;
}; };
...@@ -654,7 +665,7 @@ struct xhci_device_context_array { ...@@ -654,7 +665,7 @@ struct xhci_device_context_array {
struct xhci_stream_ctx { struct xhci_stream_ctx {
/* 64-bit stream ring address, cycle state, and stream type */ /* 64-bit stream ring address, cycle state, and stream type */
u32 stream_ring[2]; u64 stream_ring;
/* offset 0x14 - 0x1f reserved for HC internal use */ /* offset 0x14 - 0x1f reserved for HC internal use */
u32 reserved[2]; u32 reserved[2];
}; };
...@@ -662,7 +673,7 @@ struct xhci_stream_ctx { ...@@ -662,7 +673,7 @@ struct xhci_stream_ctx {
struct xhci_transfer_event { struct xhci_transfer_event {
/* 64-bit buffer address, or immediate data */ /* 64-bit buffer address, or immediate data */
u32 buffer[2]; u64 buffer;
u32 transfer_len; u32 transfer_len;
/* This field is interpreted differently based on the type of TRB */ /* This field is interpreted differently based on the type of TRB */
u32 flags; u32 flags;
...@@ -744,7 +755,7 @@ struct xhci_transfer_event { ...@@ -744,7 +755,7 @@ struct xhci_transfer_event {
struct xhci_link_trb { struct xhci_link_trb {
/* 64-bit segment pointer*/ /* 64-bit segment pointer*/
u32 segment_ptr[2]; u64 segment_ptr;
u32 intr_target; u32 intr_target;
u32 control; u32 control;
}; };
...@@ -755,7 +766,7 @@ struct xhci_link_trb { ...@@ -755,7 +766,7 @@ struct xhci_link_trb {
/* Command completion event TRB */ /* Command completion event TRB */
struct xhci_event_cmd { struct xhci_event_cmd {
/* Pointer to command TRB, or the value passed by the event data trb */ /* Pointer to command TRB, or the value passed by the event data trb */
u32 cmd_trb[2]; u64 cmd_trb;
u32 status; u32 status;
u32 flags; u32 flags;
}; };
...@@ -848,8 +859,8 @@ union xhci_trb { ...@@ -848,8 +859,8 @@ union xhci_trb {
#define TRB_CONFIG_EP 12 #define TRB_CONFIG_EP 12
/* Evaluate Context Command */ /* Evaluate Context Command */
#define TRB_EVAL_CONTEXT 13 #define TRB_EVAL_CONTEXT 13
/* Reset Transfer Ring Command */ /* Reset Endpoint Command */
#define TRB_RESET_RING 14 #define TRB_RESET_EP 14
/* Stop Transfer Ring Command */ /* Stop Transfer Ring Command */
#define TRB_STOP_RING 15 #define TRB_STOP_RING 15
/* Set Transfer Ring Dequeue Pointer Command */ /* Set Transfer Ring Dequeue Pointer Command */
...@@ -929,6 +940,7 @@ struct xhci_ring { ...@@ -929,6 +940,7 @@ struct xhci_ring {
unsigned int cancels_pending; unsigned int cancels_pending;
unsigned int state; unsigned int state;
#define SET_DEQ_PENDING (1 << 0) #define SET_DEQ_PENDING (1 << 0)
#define EP_HALTED (1 << 1)
/* The TRB that was last reported in a stopped endpoint ring */ /* The TRB that was last reported in a stopped endpoint ring */
union xhci_trb *stopped_trb; union xhci_trb *stopped_trb;
struct xhci_td *stopped_td; struct xhci_td *stopped_td;
...@@ -940,9 +952,15 @@ struct xhci_ring { ...@@ -940,9 +952,15 @@ struct xhci_ring {
u32 cycle_state; u32 cycle_state;
}; };
struct xhci_dequeue_state {
struct xhci_segment *new_deq_seg;
union xhci_trb *new_deq_ptr;
int new_cycle_state;
};
struct xhci_erst_entry { struct xhci_erst_entry {
/* 64-bit event ring segment address */ /* 64-bit event ring segment address */
u32 seg_addr[2]; u64 seg_addr;
u32 seg_size; u32 seg_size;
/* Set to zero */ /* Set to zero */
u32 rsvd; u32 rsvd;
...@@ -957,6 +975,13 @@ struct xhci_erst { ...@@ -957,6 +975,13 @@ struct xhci_erst {
unsigned int erst_size; unsigned int erst_size;
}; };
struct xhci_scratchpad {
u64 *sp_array;
dma_addr_t sp_dma;
void **sp_buffers;
dma_addr_t *sp_dma_buffers;
};
/* /*
* Each segment table entry is 4*32bits long. 1K seems like an ok size: * Each segment table entry is 4*32bits long. 1K seems like an ok size:
* (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table, * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table,
...@@ -1011,6 +1036,9 @@ struct xhci_hcd { ...@@ -1011,6 +1036,9 @@ struct xhci_hcd {
struct xhci_ring *cmd_ring; struct xhci_ring *cmd_ring;
struct xhci_ring *event_ring; struct xhci_ring *event_ring;
struct xhci_erst erst; struct xhci_erst erst;
/* Scratchpad */
struct xhci_scratchpad *scratchpad;
/* slot enabling and address device helpers */ /* slot enabling and address device helpers */
struct completion addr_dev; struct completion addr_dev;
int slot_id; int slot_id;
...@@ -1071,13 +1099,43 @@ static inline unsigned int xhci_readl(const struct xhci_hcd *xhci, ...@@ -1071,13 +1099,43 @@ static inline unsigned int xhci_readl(const struct xhci_hcd *xhci,
static inline void xhci_writel(struct xhci_hcd *xhci, static inline void xhci_writel(struct xhci_hcd *xhci,
const unsigned int val, __u32 __iomem *regs) const unsigned int val, __u32 __iomem *regs)
{ {
if (!in_interrupt())
xhci_dbg(xhci, xhci_dbg(xhci,
"`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n", "`MEM_WRITE_DWORD(3'b000, 32'h%p, 32'h%0x, 4'hf);\n",
regs, val); regs, val);
writel(val, regs); writel(val, regs);
} }
/*
* Registers should always be accessed with double word or quad word accesses.
*
* Some xHCI implementations may support 64-bit address pointers. Registers
* with 64-bit address pointers should be written to with dword accesses by
* writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
* xHCI implementations that do not support 64-bit address pointers will ignore
* the high dword, and write order is irrelevant.
*/
static inline u64 xhci_read_64(const struct xhci_hcd *xhci,
__u64 __iomem *regs)
{
__u32 __iomem *ptr = (__u32 __iomem *) regs;
u64 val_lo = readl(ptr);
u64 val_hi = readl(ptr + 1);
return val_lo + (val_hi << 32);
}
static inline void xhci_write_64(struct xhci_hcd *xhci,
const u64 val, __u64 __iomem *regs)
{
__u32 __iomem *ptr = (__u32 __iomem *) regs;
u32 val_lo = lower_32_bits(val);
u32 val_hi = upper_32_bits(val);
xhci_dbg(xhci,
"`MEM_WRITE_DWORD(3'b000, 64'h%p, 64'h%0lx, 4'hf);\n",
regs, (long unsigned int) val);
writel(val_lo, ptr);
writel(val_hi, ptr + 1);
}
/* xHCI debugging */ /* xHCI debugging */
void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num); void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num);
void xhci_print_registers(struct xhci_hcd *xhci); void xhci_print_registers(struct xhci_hcd *xhci);
...@@ -1090,7 +1148,7 @@ void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring); ...@@ -1090,7 +1148,7 @@ void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring);
void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst); void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci); void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring); void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep); void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep);
/* xHCI memory managment */ /* xHCI memory managment */
void xhci_mem_cleanup(struct xhci_hcd *xhci); void xhci_mem_cleanup(struct xhci_hcd *xhci);
...@@ -1128,6 +1186,7 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags); ...@@ -1128,6 +1186,7 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags);
int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status); int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep);
int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
...@@ -1148,10 +1207,23 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, ...@@ -1148,10 +1207,23 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
int slot_id, unsigned int ep_index); int slot_id, unsigned int ep_index);
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id); u32 slot_id);
int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index);
void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_td *cur_td, struct xhci_dequeue_state *state);
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
struct xhci_ring *ep_ring, unsigned int slot_id,
unsigned int ep_index, struct xhci_dequeue_state *deq_state);
/* xHCI roothub code */ /* xHCI roothub code */
int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
char *buf, u16 wLength); char *buf, u16 wLength);
int xhci_hub_status_data(struct usb_hcd *hcd, char *buf); int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
/* xHCI contexts */
struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index);
#endif /* __LINUX_XHCI_HCD_H */ #endif /* __LINUX_XHCI_HCD_H */
...@@ -220,7 +220,7 @@ config USB_IOWARRIOR ...@@ -220,7 +220,7 @@ config USB_IOWARRIOR
config USB_TEST config USB_TEST
tristate "USB testing driver" tristate "USB testing driver"
depends on USB && USB_DEVICEFS depends on USB
help help
This driver is for testing host controller software. It is used This driver is for testing host controller software. It is used
with specialized device firmware for regression and stress testing, with specialized device firmware for regression and stress testing,
......
...@@ -1326,7 +1326,6 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb) ...@@ -1326,7 +1326,6 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
int i; int i;
/* log core options (read using indexed model) */ /* log core options (read using indexed model) */
musb_ep_select(mbase, 0);
reg = musb_read_configdata(mbase); reg = musb_read_configdata(mbase);
strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8"); strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
...@@ -1990,7 +1989,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) ...@@ -1990,7 +1989,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
if (status < 0) if (status < 0)
goto fail2; goto fail2;
#ifdef CONFIG_USB_OTG #ifdef CONFIG_USB_MUSB_OTG
setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb); setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb);
#endif #endif
......
...@@ -407,7 +407,7 @@ __acquires(musb->lock) ...@@ -407,7 +407,7 @@ __acquires(musb->lock)
csr |= MUSB_RXCSR_P_SENDSTALL csr |= MUSB_RXCSR_P_SENDSTALL
| MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_FLUSHFIFO
| MUSB_RXCSR_CLRDATATOG | MUSB_RXCSR_CLRDATATOG
| MUSB_TXCSR_P_WZC_BITS; | MUSB_RXCSR_P_WZC_BITS;
musb_writew(regs, MUSB_RXCSR, musb_writew(regs, MUSB_RXCSR,
csr); csr);
} }
......
...@@ -323,6 +323,7 @@ static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off) ...@@ -323,6 +323,7 @@ static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off)
static inline u8 musb_read_configdata(void __iomem *mbase) static inline u8 musb_read_configdata(void __iomem *mbase)
{ {
musb_writeb(mbase, MUSB_INDEX, 0);
return musb_readb(mbase, 0x10 + MUSB_CONFIGDATA); return musb_readb(mbase, 0x10 + MUSB_CONFIGDATA);
} }
......
...@@ -80,6 +80,7 @@ static struct usb_device_id id_table [] = { ...@@ -80,6 +80,7 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */ { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */
{ USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */ { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */
{ USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */ { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */
{ USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */
{ USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
{ USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
{ USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
...@@ -96,7 +97,9 @@ static struct usb_device_id id_table [] = { ...@@ -96,7 +97,9 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */ { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */
{ USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
{ USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
{ USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
......
...@@ -698,6 +698,7 @@ static struct usb_device_id id_table_combined [] = { ...@@ -698,6 +698,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID), { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) }, { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) },
{ USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) },
{ }, /* Optional parameter entry */ { }, /* Optional parameter entry */
{ } /* Terminating entry */ { } /* Terminating entry */
}; };
......
...@@ -946,6 +946,13 @@ ...@@ -946,6 +946,13 @@
#define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmBH */ #define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmBH */
/*
* GN Otometrics (http://www.otometrics.com)
* Submitted by Ville Sundberg.
*/
#define GN_OTOMETRICS_VID 0x0c33 /* Vendor ID */
#define AURICAL_USB_PID 0x0010 /* Aurical USB Audiometer */
/* /*
* BmRequestType: 1100 0000b * BmRequestType: 1100 0000b
* bRequest: FTDI_E2_READ * bRequest: FTDI_E2_READ
......
...@@ -124,10 +124,13 @@ ...@@ -124,10 +124,13 @@
#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 #define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 #define BANDB_DEVICE_ID_USOPTL4_2 0xAC42
/* This driver also supports the ATEN UC2324 device since it is mos7840 based /* This driver also supports
* - if I knew the device id it would also support the ATEN UC2322 */ * ATEN UC2324 device using Moschip MCS7840
* ATEN UC2322 device using Moschip MCS7820
*/
#define USB_VENDOR_ID_ATENINTL 0x0557 #define USB_VENDOR_ID_ATENINTL 0x0557
#define ATENINTL_DEVICE_ID_UC2324 0x2011 #define ATENINTL_DEVICE_ID_UC2324 0x2011
#define ATENINTL_DEVICE_ID_UC2322 0x7820
/* Interrupt Routine Defines */ /* Interrupt Routine Defines */
...@@ -177,6 +180,7 @@ static struct usb_device_id moschip_port_id_table[] = { ...@@ -177,6 +180,7 @@ static struct usb_device_id moschip_port_id_table[] = {
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
{} /* terminating entry */ {} /* terminating entry */
}; };
...@@ -186,6 +190,7 @@ static __devinitdata struct usb_device_id moschip_id_table_combined[] = { ...@@ -186,6 +190,7 @@ static __devinitdata struct usb_device_id moschip_id_table_combined[] = {
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
{} /* terminating entry */ {} /* terminating entry */
}; };
......
...@@ -66,8 +66,10 @@ static int option_tiocmget(struct tty_struct *tty, struct file *file); ...@@ -66,8 +66,10 @@ static int option_tiocmget(struct tty_struct *tty, struct file *file);
static int option_tiocmset(struct tty_struct *tty, struct file *file, static int option_tiocmset(struct tty_struct *tty, struct file *file,
unsigned int set, unsigned int clear); unsigned int set, unsigned int clear);
static int option_send_setup(struct usb_serial_port *port); static int option_send_setup(struct usb_serial_port *port);
#ifdef CONFIG_PM
static int option_suspend(struct usb_serial *serial, pm_message_t message); static int option_suspend(struct usb_serial *serial, pm_message_t message);
static int option_resume(struct usb_serial *serial); static int option_resume(struct usb_serial *serial);
#endif
/* Vendor and product IDs */ /* Vendor and product IDs */
#define OPTION_VENDOR_ID 0x0AF0 #define OPTION_VENDOR_ID 0x0AF0
...@@ -205,6 +207,7 @@ static int option_resume(struct usb_serial *serial); ...@@ -205,6 +207,7 @@ static int option_resume(struct usb_serial *serial);
#define NOVATELWIRELESS_PRODUCT_MC727 0x4100 #define NOVATELWIRELESS_PRODUCT_MC727 0x4100
#define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 #define NOVATELWIRELESS_PRODUCT_MC950D 0x4400
#define NOVATELWIRELESS_PRODUCT_U727 0x5010 #define NOVATELWIRELESS_PRODUCT_U727 0x5010
#define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100
#define NOVATELWIRELESS_PRODUCT_MC760 0x6000 #define NOVATELWIRELESS_PRODUCT_MC760 0x6000
#define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002 #define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002
...@@ -259,11 +262,6 @@ static int option_resume(struct usb_serial *serial); ...@@ -259,11 +262,6 @@ static int option_resume(struct usb_serial *serial);
#define AXESSTEL_VENDOR_ID 0x1726 #define AXESSTEL_VENDOR_ID 0x1726
#define AXESSTEL_PRODUCT_MV110H 0x1000 #define AXESSTEL_PRODUCT_MV110H 0x1000
#define ONDA_VENDOR_ID 0x19d2
#define ONDA_PRODUCT_MSA501HS 0x0001
#define ONDA_PRODUCT_ET502HS 0x0002
#define ONDA_PRODUCT_MT503HS 0x2000
#define BANDRICH_VENDOR_ID 0x1A8D #define BANDRICH_VENDOR_ID 0x1A8D
#define BANDRICH_PRODUCT_C100_1 0x1002 #define BANDRICH_PRODUCT_C100_1 0x1002
#define BANDRICH_PRODUCT_C100_2 0x1003 #define BANDRICH_PRODUCT_C100_2 0x1003
...@@ -301,6 +299,7 @@ static int option_resume(struct usb_serial *serial); ...@@ -301,6 +299,7 @@ static int option_resume(struct usb_serial *serial);
#define ZTE_PRODUCT_MF628 0x0015 #define ZTE_PRODUCT_MF628 0x0015
#define ZTE_PRODUCT_MF626 0x0031 #define ZTE_PRODUCT_MF626 0x0031
#define ZTE_PRODUCT_CDMA_TECH 0xfffe #define ZTE_PRODUCT_CDMA_TECH 0xfffe
#define ZTE_PRODUCT_AC8710 0xfff1
#define BENQ_VENDOR_ID 0x04a5 #define BENQ_VENDOR_ID 0x04a5
#define BENQ_PRODUCT_H10 0x4068 #define BENQ_PRODUCT_H10 0x4068
...@@ -322,6 +321,11 @@ static int option_resume(struct usb_serial *serial); ...@@ -322,6 +321,11 @@ static int option_resume(struct usb_serial *serial);
#define ALINK_VENDOR_ID 0x1e0e #define ALINK_VENDOR_ID 0x1e0e
#define ALINK_PRODUCT_3GU 0x9200 #define ALINK_PRODUCT_3GU 0x9200
/* ALCATEL PRODUCTS */
#define ALCATEL_VENDOR_ID 0x1bbb
#define ALCATEL_PRODUCT_X060S 0x0000
static struct usb_device_id option_ids[] = { static struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
...@@ -438,6 +442,7 @@ static struct usb_device_id option_ids[] = { ...@@ -438,6 +442,7 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727_NEW) }, /* Novatel MC727/U727/USB727 refresh */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, /* Novatel Ovation MC760 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, /* Novatel Ovation MC760 */
...@@ -474,42 +479,6 @@ static struct usb_device_id option_ids[] = { ...@@ -474,42 +479,6 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
{ USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) }, { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) },
{ USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_MSA501HS) },
{ USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_ET502HS) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0003) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0004) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0005) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0006) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0007) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0008) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0009) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x000a) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x000b) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x000c) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x000d) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x000e) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x000f) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0010) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0011) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0012) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0013) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0014) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0015) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0016) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0017) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0018) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0019) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0020) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0021) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0022) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0023) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0024) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0025) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0026) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0027) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0028) },
{ USB_DEVICE(ONDA_VENDOR_ID, 0x0029) },
{ USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_MT503HS) },
{ USB_DEVICE(YISO_VENDOR_ID, YISO_PRODUCT_U893) }, { USB_DEVICE(YISO_VENDOR_ID, YISO_PRODUCT_U893) },
{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) },
{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) },
...@@ -534,10 +503,75 @@ static struct usb_device_id option_ids[] = { ...@@ -534,10 +503,75 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0004, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0005, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0006, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0007, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0008, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0009, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000a, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000b, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000c, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000d, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000e, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x000f, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0010, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0022, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0069, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0076, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0078, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0082, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
{ USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
{ USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
{ USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) }, { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) },
...@@ -547,6 +581,7 @@ static struct usb_device_id option_ids[] = { ...@@ -547,6 +581,7 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */ { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */
{ USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
{ USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) },
{ } /* Terminating entry */ { } /* Terminating entry */
}; };
MODULE_DEVICE_TABLE(usb, option_ids); MODULE_DEVICE_TABLE(usb, option_ids);
...@@ -555,8 +590,10 @@ static struct usb_driver option_driver = { ...@@ -555,8 +590,10 @@ static struct usb_driver option_driver = {
.name = "option", .name = "option",
.probe = usb_serial_probe, .probe = usb_serial_probe,
.disconnect = usb_serial_disconnect, .disconnect = usb_serial_disconnect,
#ifdef CONFIG_PM
.suspend = usb_serial_suspend, .suspend = usb_serial_suspend,
.resume = usb_serial_resume, .resume = usb_serial_resume,
#endif
.id_table = option_ids, .id_table = option_ids,
.no_dynamic_id = 1, .no_dynamic_id = 1,
}; };
...@@ -588,8 +625,10 @@ static struct usb_serial_driver option_1port_device = { ...@@ -588,8 +625,10 @@ static struct usb_serial_driver option_1port_device = {
.disconnect = option_disconnect, .disconnect = option_disconnect,
.release = option_release, .release = option_release,
.read_int_callback = option_instat_callback, .read_int_callback = option_instat_callback,
#ifdef CONFIG_PM
.suspend = option_suspend, .suspend = option_suspend,
.resume = option_resume, .resume = option_resume,
#endif
}; };
static int debug; static int debug;
...@@ -831,7 +870,6 @@ static void option_instat_callback(struct urb *urb) ...@@ -831,7 +870,6 @@ static void option_instat_callback(struct urb *urb)
int status = urb->status; int status = urb->status;
struct usb_serial_port *port = urb->context; struct usb_serial_port *port = urb->context;
struct option_port_private *portdata = usb_get_serial_port_data(port); struct option_port_private *portdata = usb_get_serial_port_data(port);
struct usb_serial *serial = port->serial;
dbg("%s", __func__); dbg("%s", __func__);
dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata); dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata);
...@@ -927,7 +965,6 @@ static int option_open(struct tty_struct *tty, ...@@ -927,7 +965,6 @@ static int option_open(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp) struct usb_serial_port *port, struct file *filp)
{ {
struct option_port_private *portdata; struct option_port_private *portdata;
struct usb_serial *serial = port->serial;
int i, err; int i, err;
struct urb *urb; struct urb *urb;
...@@ -1187,6 +1224,7 @@ static void option_release(struct usb_serial *serial) ...@@ -1187,6 +1224,7 @@ static void option_release(struct usb_serial *serial)
} }
} }
#ifdef CONFIG_PM
static int option_suspend(struct usb_serial *serial, pm_message_t message) static int option_suspend(struct usb_serial *serial, pm_message_t message)
{ {
dbg("%s entered", __func__); dbg("%s entered", __func__);
...@@ -1245,6 +1283,7 @@ static int option_resume(struct usb_serial *serial) ...@@ -1245,6 +1283,7 @@ static int option_resume(struct usb_serial *serial)
} }
return 0; return 0;
} }
#endif
MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC); MODULE_DESCRIPTION(DRIVER_DESC);
......
...@@ -961,7 +961,7 @@ int usb_stor_Bulk_max_lun(struct us_data *us) ...@@ -961,7 +961,7 @@ int usb_stor_Bulk_max_lun(struct us_data *us)
US_BULK_GET_MAX_LUN, US_BULK_GET_MAX_LUN,
USB_DIR_IN | USB_TYPE_CLASS | USB_DIR_IN | USB_TYPE_CLASS |
USB_RECIP_INTERFACE, USB_RECIP_INTERFACE,
0, us->ifnum, us->iobuf, 1, HZ); 0, us->ifnum, us->iobuf, 1, 10*HZ);
US_DEBUGP("GetMaxLUN command result is %d, data is %d\n", US_DEBUGP("GetMaxLUN command result is %d, data is %d\n",
result, us->iobuf[0]); result, us->iobuf[0]);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment