Commit 700e2052 authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

USB: xhci: fix lots of compiler warnings.

Turns out someone never built this code on a 64bit platform.

Someone owes me a beer...
Reported-by: default avatarStephen Rothwell <sfr@canb.auug.org.au>
Cc: Sarah Sharp <sarah.a.sharp@linux.intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent b7258a4a
This diff is collapsed.
...@@ -397,10 +397,8 @@ int xhci_run(struct usb_hcd *hcd) ...@@ -397,10 +397,8 @@ int xhci_run(struct usb_hcd *hcd)
xhci_writel(xhci, temp, &xhci->op_regs->command); xhci_writel(xhci, temp, &xhci->op_regs->command);
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
xhci_dbg(xhci, "// Enabling event ring interrupter 0x%x" xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
" by writing 0x%x to irq_pending\n", xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
(unsigned int) xhci->ir_set,
(unsigned int) ER_IRQ_ENABLE(temp));
xhci_writel(xhci, ER_IRQ_ENABLE(temp), xhci_writel(xhci, ER_IRQ_ENABLE(temp),
&xhci->ir_set->irq_pending); &xhci->ir_set->irq_pending);
xhci_print_ir_set(xhci, xhci->ir_set, 0); xhci_print_ir_set(xhci, xhci->ir_set, 0);
...@@ -431,8 +429,7 @@ int xhci_run(struct usb_hcd *hcd) ...@@ -431,8 +429,7 @@ int xhci_run(struct usb_hcd *hcd)
xhci_writel(xhci, temp, &xhci->op_regs->command); xhci_writel(xhci, temp, &xhci->op_regs->command);
/* Flush PCI posted writes */ /* Flush PCI posted writes */
temp = xhci_readl(xhci, &xhci->op_regs->command); temp = xhci_readl(xhci, &xhci->op_regs->command);
xhci_dbg(xhci, "// @%x = 0x%x\n", xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp);
(unsigned int) &xhci->op_regs->command, temp);
if (doorbell) if (doorbell)
(*doorbell)(xhci); (*doorbell)(xhci);
...@@ -660,7 +657,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) ...@@ -660,7 +657,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
if (ret || !urb->hcpriv) if (ret || !urb->hcpriv)
goto done; goto done;
xhci_dbg(xhci, "Cancel URB 0x%x\n", (unsigned int) urb); xhci_dbg(xhci, "Cancel URB %p\n", urb);
ep_index = xhci_get_endpoint_index(&urb->ep->desc); ep_index = xhci_get_endpoint_index(&urb->ep->desc);
ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index]; ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index];
td = (struct xhci_td *) urb->hcpriv; td = (struct xhci_td *) urb->hcpriv;
...@@ -702,10 +699,10 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, ...@@ -702,10 +699,10 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
int ret; int ret;
ret = xhci_check_args(hcd, udev, ep, 1, __func__); ret = xhci_check_args(hcd, udev, ep, 1, __func__);
xhci_dbg(xhci, "%s called for udev %#x\n", __func__, (unsigned int) udev);
if (ret <= 0) if (ret <= 0)
return ret; return ret;
xhci = hcd_to_xhci(hcd); xhci = hcd_to_xhci(hcd);
xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
drop_flag = xhci_get_endpoint_flag(&ep->desc); drop_flag = xhci_get_endpoint_flag(&ep->desc);
if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
...@@ -730,8 +727,8 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, ...@@ -730,8 +727,8 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
*/ */
if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED || if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED ||
in_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) { in_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
xhci_warn(xhci, "xHCI %s called with disabled ep %#x\n", xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
__func__, (unsigned int) ep); __func__, ep);
spin_unlock_irqrestore(&xhci->lock, flags); spin_unlock_irqrestore(&xhci->lock, flags);
return 0; return 0;
} }
...@@ -817,8 +814,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, ...@@ -817,8 +814,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
* ignore this request. * ignore this request.
*/ */
if (in_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) { if (in_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
xhci_warn(xhci, "xHCI %s called with enabled ep %#x\n", xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
__func__, (unsigned int) ep); __func__, ep);
spin_unlock_irqrestore(&xhci->lock, flags); spin_unlock_irqrestore(&xhci->lock, flags);
return 0; return 0;
} }
...@@ -904,7 +901,7 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ...@@ -904,7 +901,7 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
spin_unlock_irqrestore(&xhci->lock, flags); spin_unlock_irqrestore(&xhci->lock, flags);
return -EINVAL; return -EINVAL;
} }
xhci_dbg(xhci, "%s called for udev %#x\n", __func__, (unsigned int) udev); xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
virt_dev = xhci->devs[udev->slot_id]; virt_dev = xhci->devs[udev->slot_id];
/* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
...@@ -1009,7 +1006,7 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ...@@ -1009,7 +1006,7 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
spin_unlock_irqrestore(&xhci->lock, flags); spin_unlock_irqrestore(&xhci->lock, flags);
return; return;
} }
xhci_dbg(xhci, "%s called for udev %#x\n", __func__, (unsigned int) udev); xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
virt_dev = xhci->devs[udev->slot_id]; virt_dev = xhci->devs[udev->slot_id];
/* Free any rings allocated for added endpoints */ /* Free any rings allocated for added endpoints */
for (i = 0; i < 31; ++i) { for (i = 0; i < 31; ++i) {
...@@ -1184,16 +1181,16 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) ...@@ -1184,16 +1181,16 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
xhci_dbg(xhci, "Op regs DCBAA ptr[0] = %#08x\n", temp); xhci_dbg(xhci, "Op regs DCBAA ptr[0] = %#08x\n", temp);
temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[1]); temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[1]);
xhci_dbg(xhci, "Op regs DCBAA ptr[1] = %#08x\n", temp); xhci_dbg(xhci, "Op regs DCBAA ptr[1] = %#08x\n", temp);
xhci_dbg(xhci, "Slot ID %d dcbaa entry[0] @%08x = %#08x\n", xhci_dbg(xhci, "Slot ID %d dcbaa entry[0] @%p = %#08x\n",
udev->slot_id, udev->slot_id,
(unsigned int) &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id], &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id],
xhci->dcbaa->dev_context_ptrs[2*udev->slot_id]); xhci->dcbaa->dev_context_ptrs[2*udev->slot_id]);
xhci_dbg(xhci, "Slot ID %d dcbaa entry[1] @%08x = %#08x\n", xhci_dbg(xhci, "Slot ID %d dcbaa entry[1] @%p = %#08x\n",
udev->slot_id, udev->slot_id,
(unsigned int) &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1], &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1],
xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1]); xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1]);
xhci_dbg(xhci, "Output Context DMA address = %#08x\n", xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
virt_dev->out_ctx_dma); (unsigned long long)virt_dev->out_ctx_dma);
xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2); xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2);
xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
......
...@@ -40,16 +40,15 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flag ...@@ -40,16 +40,15 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flag
seg = kzalloc(sizeof *seg, flags); seg = kzalloc(sizeof *seg, flags);
if (!seg) if (!seg)
return 0; return 0;
xhci_dbg(xhci, "Allocating priv segment structure at 0x%x\n", xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg);
(unsigned int) seg);
seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma); seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
if (!seg->trbs) { if (!seg->trbs) {
kfree(seg); kfree(seg);
return 0; return 0;
} }
xhci_dbg(xhci, "// Allocating segment at 0x%x (virtual) 0x%x (DMA)\n", xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
(unsigned int) seg->trbs, (u32) dma); seg->trbs, (unsigned long long)dma);
memset(seg->trbs, 0, SEGMENT_SIZE); memset(seg->trbs, 0, SEGMENT_SIZE);
seg->dma = dma; seg->dma = dma;
...@@ -63,14 +62,12 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) ...@@ -63,14 +62,12 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
if (!seg) if (!seg)
return; return;
if (seg->trbs) { if (seg->trbs) {
xhci_dbg(xhci, "Freeing DMA segment at 0x%x" xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
" (virtual) 0x%x (DMA)\n", seg->trbs, (unsigned long long)seg->dma);
(unsigned int) seg->trbs, (u32) seg->dma);
dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma); dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
seg->trbs = NULL; seg->trbs = NULL;
} }
xhci_dbg(xhci, "Freeing priv segment structure at 0x%x\n", xhci_dbg(xhci, "Freeing priv segment structure at %p\n", seg);
(unsigned int) seg);
kfree(seg); kfree(seg);
} }
...@@ -98,8 +95,9 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, ...@@ -98,8 +95,9 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
val |= TRB_TYPE(TRB_LINK); val |= TRB_TYPE(TRB_LINK);
prev->trbs[TRBS_PER_SEGMENT-1].link.control = val; prev->trbs[TRBS_PER_SEGMENT-1].link.control = val;
} }
xhci_dbg(xhci, "Linking segment 0x%x to segment 0x%x (DMA)\n", xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
prev->dma, next->dma); (unsigned long long)prev->dma,
(unsigned long long)next->dma);
} }
/* XXX: Do we need the hcd structure in all these functions? */ /* XXX: Do we need the hcd structure in all these functions? */
...@@ -112,7 +110,7 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) ...@@ -112,7 +110,7 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
return; return;
first_seg = ring->first_seg; first_seg = ring->first_seg;
seg = first_seg->next; seg = first_seg->next;
xhci_dbg(xhci, "Freeing ring at 0x%x\n", (unsigned int) ring); xhci_dbg(xhci, "Freeing ring at %p\n", ring);
while (seg != first_seg) { while (seg != first_seg) {
struct xhci_segment *next = seg->next; struct xhci_segment *next = seg->next;
xhci_segment_free(xhci, seg); xhci_segment_free(xhci, seg);
...@@ -137,7 +135,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, ...@@ -137,7 +135,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
struct xhci_segment *prev; struct xhci_segment *prev;
ring = kzalloc(sizeof *(ring), flags); ring = kzalloc(sizeof *(ring), flags);
xhci_dbg(xhci, "Allocating ring at 0x%x\n", (unsigned int) ring); xhci_dbg(xhci, "Allocating ring at %p\n", ring);
if (!ring) if (!ring)
return 0; return 0;
...@@ -169,8 +167,8 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, ...@@ -169,8 +167,8 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
/* See section 4.9.2.1 and 6.4.4.1 */ /* See section 4.9.2.1 and 6.4.4.1 */
prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE); prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE);
xhci_dbg(xhci, "Wrote link toggle flag to" xhci_dbg(xhci, "Wrote link toggle flag to"
" segment 0x%x (virtual), 0x%x (DMA)\n", " segment %p (virtual), 0x%llx (DMA)\n",
(unsigned int) prev, (u32) prev->dma); prev, (unsigned long long)prev->dma);
} }
/* The ring is empty, so the enqueue pointer == dequeue pointer */ /* The ring is empty, so the enqueue pointer == dequeue pointer */
ring->enqueue = ring->first_seg->trbs; ring->enqueue = ring->first_seg->trbs;
...@@ -242,7 +240,8 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, ...@@ -242,7 +240,8 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
if (!dev->out_ctx) if (!dev->out_ctx)
goto fail; goto fail;
dev->out_ctx_dma = dma; dev->out_ctx_dma = dma;
xhci_dbg(xhci, "Slot %d output ctx = 0x%x (dma)\n", slot_id, dma); xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
(unsigned long long)dma);
memset(dev->out_ctx, 0, sizeof(*dev->out_ctx)); memset(dev->out_ctx, 0, sizeof(*dev->out_ctx));
/* Allocate the (input) device context for address device command */ /* Allocate the (input) device context for address device command */
...@@ -250,7 +249,8 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, ...@@ -250,7 +249,8 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
if (!dev->in_ctx) if (!dev->in_ctx)
goto fail; goto fail;
dev->in_ctx_dma = dma; dev->in_ctx_dma = dma;
xhci_dbg(xhci, "Slot %d input ctx = 0x%x (dma)\n", slot_id, dma); xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
(unsigned long long)dma);
memset(dev->in_ctx, 0, sizeof(*dev->in_ctx)); memset(dev->in_ctx, 0, sizeof(*dev->in_ctx));
/* Allocate endpoint 0 ring */ /* Allocate endpoint 0 ring */
...@@ -266,10 +266,10 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, ...@@ -266,10 +266,10 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
*/ */
xhci->dcbaa->dev_context_ptrs[2*slot_id] = xhci->dcbaa->dev_context_ptrs[2*slot_id] =
(u32) dev->out_ctx_dma + (32); (u32) dev->out_ctx_dma + (32);
xhci_dbg(xhci, "Set slot id %d dcbaa entry 0x%x to 0x%x\n", xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
slot_id, slot_id,
(unsigned int) &xhci->dcbaa->dev_context_ptrs[2*slot_id], &xhci->dcbaa->dev_context_ptrs[2*slot_id],
dev->out_ctx_dma); (unsigned long long)dev->out_ctx_dma);
xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0; xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
return 1; return 1;
...@@ -339,7 +339,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud ...@@ -339,7 +339,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id; dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id;
dev->in_ctx->slot.tt_info |= udev->ttport << 8; dev->in_ctx->slot.tt_info |= udev->ttport << 8;
} }
xhci_dbg(xhci, "udev->tt = 0x%x\n", (unsigned int) udev->tt); xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
/* Step 4 - ring already allocated */ /* Step 4 - ring already allocated */
...@@ -643,8 +643,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) ...@@ -643,8 +643,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
goto fail; goto fail;
memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa)); memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
xhci->dcbaa->dma = dma; xhci->dcbaa->dma = dma;
xhci_dbg(xhci, "// Device context base array address = 0x%x (DMA), 0x%x (virt)\n", xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
xhci->dcbaa->dma, (unsigned int) xhci->dcbaa); (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]); xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]);
xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]); xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]);
...@@ -668,8 +668,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) ...@@ -668,8 +668,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags); xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags);
if (!xhci->cmd_ring) if (!xhci->cmd_ring)
goto fail; goto fail;
xhci_dbg(xhci, "Allocated command ring at 0x%x\n", (unsigned int) xhci->cmd_ring); xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
xhci_dbg(xhci, "First segment DMA is 0x%x\n", (unsigned int) xhci->cmd_ring->first_seg->dma); xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
(unsigned long long)xhci->cmd_ring->first_seg->dma);
/* Set the address in the Command Ring Control register */ /* Set the address in the Command Ring Control register */
val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]); val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]);
...@@ -705,15 +706,16 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) ...@@ -705,15 +706,16 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma); sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma);
if (!xhci->erst.entries) if (!xhci->erst.entries)
goto fail; goto fail;
xhci_dbg(xhci, "// Allocated event ring segment table at 0x%x\n", dma); xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
(unsigned long long)dma);
memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS); memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
xhci->erst.num_entries = ERST_NUM_SEGS; xhci->erst.num_entries = ERST_NUM_SEGS;
xhci->erst.erst_dma_addr = dma; xhci->erst.erst_dma_addr = dma;
xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = 0x%x, dma addr = 0x%x\n", xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
xhci->erst.num_entries, xhci->erst.num_entries,
(unsigned int) xhci->erst.entries, xhci->erst.entries,
xhci->erst.erst_dma_addr); (unsigned long long)xhci->erst.erst_dma_addr);
/* set ring base address and size for each segment table entry */ /* set ring base address and size for each segment table entry */
for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
...@@ -735,8 +737,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) ...@@ -735,8 +737,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n"); xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
/* set the segment table base address */ /* set the segment table base address */
xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%x\n", xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
xhci->erst.erst_dma_addr); (unsigned long long)xhci->erst.erst_dma_addr);
xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]); xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);
val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]); val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]);
val &= ERST_PTR_MASK; val &= ERST_PTR_MASK;
......
...@@ -74,12 +74,12 @@ ...@@ -74,12 +74,12 @@
dma_addr_t trb_virt_to_dma(struct xhci_segment *seg, dma_addr_t trb_virt_to_dma(struct xhci_segment *seg,
union xhci_trb *trb) union xhci_trb *trb)
{ {
unsigned int offset; dma_addr_t offset;
if (!seg || !trb || (void *) trb < (void *) seg->trbs) if (!seg || !trb || (void *) trb < (void *) seg->trbs)
return 0; return 0;
/* offset in bytes, since these are byte-addressable */ /* offset in bytes, since these are byte-addressable */
offset = (unsigned int) trb - (unsigned int) seg->trbs; offset = trb - seg->trbs;
/* SEGMENT_SIZE in bytes, trbs are 16-byte aligned */ /* SEGMENT_SIZE in bytes, trbs are 16-byte aligned */
if (offset > SEGMENT_SIZE || (offset % sizeof(*trb)) != 0) if (offset > SEGMENT_SIZE || (offset % sizeof(*trb)) != 0)
return 0; return 0;
...@@ -145,8 +145,8 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer ...@@ -145,8 +145,8 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) { if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
ring->cycle_state = (ring->cycle_state ? 0 : 1); ring->cycle_state = (ring->cycle_state ? 0 : 1);
if (!in_interrupt()) if (!in_interrupt())
xhci_dbg(xhci, "Toggle cycle state for ring 0x%x = %i\n", xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
(unsigned int) ring, ring,
(unsigned int) ring->cycle_state); (unsigned int) ring->cycle_state);
} }
ring->deq_seg = ring->deq_seg->next; ring->deq_seg = ring->deq_seg->next;
...@@ -195,8 +195,8 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer ...@@ -195,8 +195,8 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
ring->cycle_state = (ring->cycle_state ? 0 : 1); ring->cycle_state = (ring->cycle_state ? 0 : 1);
if (!in_interrupt()) if (!in_interrupt())
xhci_dbg(xhci, "Toggle cycle state for ring 0x%x = %i\n", xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
(unsigned int) ring, ring,
(unsigned int) ring->cycle_state); (unsigned int) ring->cycle_state);
} }
} }
...@@ -387,12 +387,12 @@ void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, ...@@ -387,12 +387,12 @@ void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
*/ */
cur_trb->generic.field[3] &= ~TRB_CHAIN; cur_trb->generic.field[3] &= ~TRB_CHAIN;
xhci_dbg(xhci, "Cancel (unchain) link TRB\n"); xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
xhci_dbg(xhci, "Address = 0x%x (0x%x dma); " xhci_dbg(xhci, "Address = %p (0x%llx dma); "
"in seg 0x%x (0x%x dma)\n", "in seg %p (0x%llx dma)\n",
(unsigned int) cur_trb, cur_trb,
trb_virt_to_dma(cur_seg, cur_trb), (unsigned long long)trb_virt_to_dma(cur_seg, cur_trb),
(unsigned int) cur_seg, cur_seg,
cur_seg->dma); (unsigned long long)cur_seg->dma);
} else { } else {
cur_trb->generic.field[0] = 0; cur_trb->generic.field[0] = 0;
cur_trb->generic.field[1] = 0; cur_trb->generic.field[1] = 0;
...@@ -400,12 +400,12 @@ void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, ...@@ -400,12 +400,12 @@ void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
/* Preserve only the cycle bit of this TRB */ /* Preserve only the cycle bit of this TRB */
cur_trb->generic.field[3] &= TRB_CYCLE; cur_trb->generic.field[3] &= TRB_CYCLE;
cur_trb->generic.field[3] |= TRB_TYPE(TRB_TR_NOOP); cur_trb->generic.field[3] |= TRB_TYPE(TRB_TR_NOOP);
xhci_dbg(xhci, "Cancel TRB 0x%x (0x%x dma) " xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
"in seg 0x%x (0x%x dma)\n", "in seg %p (0x%llx dma)\n",
(unsigned int) cur_trb, cur_trb,
trb_virt_to_dma(cur_seg, cur_trb), (unsigned long long)trb_virt_to_dma(cur_seg, cur_trb),
(unsigned int) cur_seg, cur_seg,
cur_seg->dma); (unsigned long long)cur_seg->dma);
} }
if (cur_trb == cur_td->last_trb) if (cur_trb == cur_td->last_trb)
break; break;
...@@ -456,9 +456,9 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, ...@@ -456,9 +456,9 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
*/ */
list_for_each(entry, &ep_ring->cancelled_td_list) { list_for_each(entry, &ep_ring->cancelled_td_list) {
cur_td = list_entry(entry, struct xhci_td, cancelled_td_list); cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
xhci_dbg(xhci, "Cancelling TD starting at 0x%x, 0x%x (dma).\n", xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
(unsigned int) cur_td->first_trb, cur_td->first_trb,
trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb)); (unsigned long long)trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
/* /*
* If we stopped on the TD we need to cancel, then we have to * If we stopped on the TD we need to cancel, then we have to
* move the xHC endpoint ring dequeue pointer past this TD. * move the xHC endpoint ring dequeue pointer past this TD.
...@@ -480,12 +480,12 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, ...@@ -480,12 +480,12 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = 0x%x (0x%x dma), " xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
"new deq ptr = 0x%x (0x%x dma), new cycle = %u\n", "new deq ptr = %p (0x%llx dma), new cycle = %u\n",
(unsigned int) deq_state.new_deq_seg, deq_state.new_deq_seg,
deq_state.new_deq_seg->dma, (unsigned long long)deq_state.new_deq_seg->dma,
(unsigned int) deq_state.new_deq_ptr, deq_state.new_deq_ptr,
trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr), (unsigned long long)trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr),
deq_state.new_cycle_state); deq_state.new_cycle_state);
queue_set_tr_deq(xhci, slot_id, ep_index, queue_set_tr_deq(xhci, slot_id, ep_index,
deq_state.new_deq_seg, deq_state.new_deq_seg,
...@@ -522,8 +522,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, ...@@ -522,8 +522,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
cur_td->urb->hcpriv = NULL; cur_td->urb->hcpriv = NULL;
usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), cur_td->urb); usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), cur_td->urb);
xhci_dbg(xhci, "Giveback cancelled URB 0x%x\n", xhci_dbg(xhci, "Giveback cancelled URB %p\n", cur_td->urb);
(unsigned int) cur_td->urb);
spin_unlock(&xhci->lock); spin_unlock(&xhci->lock);
/* Doesn't matter what we pass for status, since the core will /* Doesn't matter what we pass for status, since the core will
* just overwrite it (because the URB has been unlinked). * just overwrite it (because the URB has been unlinked).
...@@ -1183,9 +1182,9 @@ unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) ...@@ -1183,9 +1182,9 @@ unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
num_trbs++; num_trbs++;
running_total += TRB_MAX_BUFF_SIZE; running_total += TRB_MAX_BUFF_SIZE;
} }
xhci_dbg(xhci, " sg #%d: dma = %#x, len = %#x (%d), num_trbs = %d\n", xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n",
i, sg_dma_address(sg), len, len, i, (unsigned long long)sg_dma_address(sg),
num_trbs - previous_total_trbs); len, len, num_trbs - previous_total_trbs);
len = min_t(int, len, temp); len = min_t(int, len, temp);
temp -= len; temp -= len;
...@@ -1394,11 +1393,11 @@ int queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, ...@@ -1394,11 +1393,11 @@ int queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */ /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
if (!in_interrupt()) if (!in_interrupt())
dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#x, num_trbs = %d\n", dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#llx, num_trbs = %d\n",
urb->ep->desc.bEndpointAddress, urb->ep->desc.bEndpointAddress,
urb->transfer_buffer_length, urb->transfer_buffer_length,
urb->transfer_buffer_length, urb->transfer_buffer_length,
urb->transfer_dma, (unsigned long long)urb->transfer_dma,
num_trbs); num_trbs);
ret = xhci_prepare_transfer(xhci, xhci->devs[slot_id], ep_index, ret = xhci_prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
...@@ -1640,9 +1639,8 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, ...@@ -1640,9 +1639,8 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
addr = trb_virt_to_dma(deq_seg, deq_ptr); addr = trb_virt_to_dma(deq_seg, deq_ptr);
if (addr == 0) if (addr == 0)
xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
xhci_warn(xhci, "WARN deq seg = 0x%x, deq pt = 0x%x\n", xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
(unsigned int) deq_seg, deq_seg, deq_ptr);
(unsigned int) deq_ptr);
return queue_command(xhci, (u32) addr | cycle_state, 0, 0, return queue_command(xhci, (u32) addr | cycle_state, 0, 0,
trb_slot_id | trb_ep_index | type); trb_slot_id | trb_ep_index | type);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment