Commit 4e8ed7e4 authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Merge branch 'for-greg' of git://gitorious.org/usb/usb into usb-next

* 'for-greg' of git://gitorious.org/usb/usb:
  usb: ehci-omap: Show fatal probing time errors to end user
  usb: musb: introduce api for dma code to check compatibility with usb request
  usb: musb: maintain three states for buffer mappings instead of two
  usb: musb: disable double buffering when it's broken
  usb: musb: hsdma: change back to use musb_read/writew
  usb: musb: core: fix IRQ check
  usb: musb: fix kernel panic during s2ram(v2)
parents 3ea3c9b5 3e434a86
...@@ -796,7 +796,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) ...@@ -796,7 +796,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
hcd = usb_create_hcd(&ehci_omap_hc_driver, &pdev->dev, hcd = usb_create_hcd(&ehci_omap_hc_driver, &pdev->dev,
dev_name(&pdev->dev)); dev_name(&pdev->dev));
if (!hcd) { if (!hcd) {
dev_dbg(&pdev->dev, "failed to create hcd with err %d\n", ret); dev_err(&pdev->dev, "failed to create hcd with err %d\n", ret);
ret = -ENOMEM; ret = -ENOMEM;
goto err_create_hcd; goto err_create_hcd;
} }
...@@ -864,7 +864,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) ...@@ -864,7 +864,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
ret = omap_start_ehc(omap, hcd); ret = omap_start_ehc(omap, hcd);
if (ret) { if (ret) {
dev_dbg(&pdev->dev, "failed to start ehci\n"); dev_err(&pdev->dev, "failed to start ehci with err %d\n", ret);
goto err_start; goto err_start;
} }
...@@ -879,7 +879,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) ...@@ -879,7 +879,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED); ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
if (ret) { if (ret) {
dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret); dev_err(&pdev->dev, "failed to add hcd with err %d\n", ret);
goto err_add_hcd; goto err_add_hcd;
} }
......
...@@ -404,6 +404,7 @@ static int bfin_musb_init(struct musb *musb) ...@@ -404,6 +404,7 @@ static int bfin_musb_init(struct musb *musb)
musb->xceiv->set_power = bfin_musb_set_power; musb->xceiv->set_power = bfin_musb_set_power;
musb->isr = blackfin_interrupt; musb->isr = blackfin_interrupt;
musb->double_buffer_not_ok = true;
return 0; return 0;
} }
......
...@@ -128,12 +128,7 @@ MODULE_ALIAS("platform:" MUSB_DRIVER_NAME); ...@@ -128,12 +128,7 @@ MODULE_ALIAS("platform:" MUSB_DRIVER_NAME);
static inline struct musb *dev_to_musb(struct device *dev) static inline struct musb *dev_to_musb(struct device *dev)
{ {
#ifdef CONFIG_USB_MUSB_HDRC_HCD
/* usbcore insists dev->driver_data is a "struct hcd *" */
return hcd_to_musb(dev_get_drvdata(dev));
#else
return dev_get_drvdata(dev); return dev_get_drvdata(dev);
#endif
} }
/*-------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------*/
...@@ -1876,10 +1871,9 @@ allocate_instance(struct device *dev, ...@@ -1876,10 +1871,9 @@ allocate_instance(struct device *dev,
musb = kzalloc(sizeof *musb, GFP_KERNEL); musb = kzalloc(sizeof *musb, GFP_KERNEL);
if (!musb) if (!musb)
return NULL; return NULL;
dev_set_drvdata(dev, musb);
#endif #endif
dev_set_drvdata(dev, musb);
musb->mregs = mbase; musb->mregs = mbase;
musb->ctrl_base = mbase; musb->ctrl_base = mbase;
musb->nIrq = -ENODEV; musb->nIrq = -ENODEV;
...@@ -2191,7 +2185,7 @@ static int __init musb_probe(struct platform_device *pdev) ...@@ -2191,7 +2185,7 @@ static int __init musb_probe(struct platform_device *pdev)
void __iomem *base; void __iomem *base;
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iomem || irq == 0) if (!iomem || irq <= 0)
return -ENODEV; return -ENODEV;
base = ioremap(iomem->start, resource_size(iomem)); base = ioremap(iomem->start, resource_size(iomem));
......
...@@ -488,6 +488,18 @@ struct musb { ...@@ -488,6 +488,18 @@ struct musb {
unsigned set_address:1; unsigned set_address:1;
unsigned test_mode:1; unsigned test_mode:1;
unsigned softconnect:1; unsigned softconnect:1;
/*
* FIXME: Remove this flag.
*
* This is only added to allow Blackfin to work
* with current driver. For some unknown reason
* Blackfin doesn't work with double buffering
* and that's enabled by default.
*
* We added this flag to forcefully disable double
* buffering until we get it working.
*/
unsigned double_buffer_not_ok:1 __deprecated;
u8 address; u8 address;
u8 test_mode_nr; u8 test_mode_nr;
......
...@@ -169,6 +169,9 @@ struct dma_controller { ...@@ -169,6 +169,9 @@ struct dma_controller {
dma_addr_t dma_addr, dma_addr_t dma_addr,
u32 length); u32 length);
int (*channel_abort)(struct dma_channel *); int (*channel_abort)(struct dma_channel *);
int (*is_compatible)(struct dma_channel *channel,
u16 maxpacket,
void *buf, u32 length);
}; };
/* called after channel_program(), may indicate a fault */ /* called after channel_program(), may indicate a fault */
......
...@@ -92,11 +92,33 @@ ...@@ -92,11 +92,33 @@
/* ----------------------------------------------------------------------- */ /* ----------------------------------------------------------------------- */
#define is_buffer_mapped(req) (is_dma_capable() && \
(req->map_state != UN_MAPPED))
/* Maps the buffer to dma */ /* Maps the buffer to dma */
static inline void map_dma_buffer(struct musb_request *request, static inline void map_dma_buffer(struct musb_request *request,
struct musb *musb) struct musb *musb, struct musb_ep *musb_ep)
{ {
int compatible = true;
struct dma_controller *dma = musb->dma_controller;
request->map_state = UN_MAPPED;
if (!is_dma_capable() || !musb_ep->dma)
return;
/* Check if DMA engine can handle this request.
* DMA code must reject the USB request explicitly.
* Default behaviour is to map the request.
*/
if (dma->is_compatible)
compatible = dma->is_compatible(musb_ep->dma,
musb_ep->packet_sz, request->request.buf,
request->request.length);
if (!compatible)
return;
if (request->request.dma == DMA_ADDR_INVALID) { if (request->request.dma == DMA_ADDR_INVALID) {
request->request.dma = dma_map_single( request->request.dma = dma_map_single(
musb->controller, musb->controller,
...@@ -105,7 +127,7 @@ static inline void map_dma_buffer(struct musb_request *request, ...@@ -105,7 +127,7 @@ static inline void map_dma_buffer(struct musb_request *request,
request->tx request->tx
? DMA_TO_DEVICE ? DMA_TO_DEVICE
: DMA_FROM_DEVICE); : DMA_FROM_DEVICE);
request->mapped = 1; request->map_state = MUSB_MAPPED;
} else { } else {
dma_sync_single_for_device(musb->controller, dma_sync_single_for_device(musb->controller,
request->request.dma, request->request.dma,
...@@ -113,7 +135,7 @@ static inline void map_dma_buffer(struct musb_request *request, ...@@ -113,7 +135,7 @@ static inline void map_dma_buffer(struct musb_request *request,
request->tx request->tx
? DMA_TO_DEVICE ? DMA_TO_DEVICE
: DMA_FROM_DEVICE); : DMA_FROM_DEVICE);
request->mapped = 0; request->map_state = PRE_MAPPED;
} }
} }
...@@ -121,11 +143,14 @@ static inline void map_dma_buffer(struct musb_request *request, ...@@ -121,11 +143,14 @@ static inline void map_dma_buffer(struct musb_request *request,
static inline void unmap_dma_buffer(struct musb_request *request, static inline void unmap_dma_buffer(struct musb_request *request,
struct musb *musb) struct musb *musb)
{ {
if (!is_buffer_mapped(request))
return;
if (request->request.dma == DMA_ADDR_INVALID) { if (request->request.dma == DMA_ADDR_INVALID) {
DBG(20, "not unmapping a never mapped buffer\n"); DBG(20, "not unmapping a never mapped buffer\n");
return; return;
} }
if (request->mapped) { if (request->map_state == MUSB_MAPPED) {
dma_unmap_single(musb->controller, dma_unmap_single(musb->controller,
request->request.dma, request->request.dma,
request->request.length, request->request.length,
...@@ -133,16 +158,15 @@ static inline void unmap_dma_buffer(struct musb_request *request, ...@@ -133,16 +158,15 @@ static inline void unmap_dma_buffer(struct musb_request *request,
? DMA_TO_DEVICE ? DMA_TO_DEVICE
: DMA_FROM_DEVICE); : DMA_FROM_DEVICE);
request->request.dma = DMA_ADDR_INVALID; request->request.dma = DMA_ADDR_INVALID;
request->mapped = 0; } else { /* PRE_MAPPED */
} else {
dma_sync_single_for_cpu(musb->controller, dma_sync_single_for_cpu(musb->controller,
request->request.dma, request->request.dma,
request->request.length, request->request.length,
request->tx request->tx
? DMA_TO_DEVICE ? DMA_TO_DEVICE
: DMA_FROM_DEVICE); : DMA_FROM_DEVICE);
} }
request->map_state = UN_MAPPED;
} }
/* /*
...@@ -172,8 +196,7 @@ __acquires(ep->musb->lock) ...@@ -172,8 +196,7 @@ __acquires(ep->musb->lock)
ep->busy = 1; ep->busy = 1;
spin_unlock(&musb->lock); spin_unlock(&musb->lock);
if (is_dma_capable() && ep->dma) unmap_dma_buffer(req, musb);
unmap_dma_buffer(req, musb);
if (request->status == 0) if (request->status == 0)
DBG(5, "%s done request %p, %d/%d\n", DBG(5, "%s done request %p, %d/%d\n",
ep->end_point.name, request, ep->end_point.name, request,
...@@ -335,7 +358,7 @@ static void txstate(struct musb *musb, struct musb_request *req) ...@@ -335,7 +358,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
csr); csr);
#ifndef CONFIG_MUSB_PIO_ONLY #ifndef CONFIG_MUSB_PIO_ONLY
if (is_dma_capable() && musb_ep->dma) { if (is_buffer_mapped(req)) {
struct dma_controller *c = musb->dma_controller; struct dma_controller *c = musb->dma_controller;
size_t request_size; size_t request_size;
...@@ -436,8 +459,7 @@ static void txstate(struct musb *musb, struct musb_request *req) ...@@ -436,8 +459,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
* Unmap the dma buffer back to cpu if dma channel * Unmap the dma buffer back to cpu if dma channel
* programming fails * programming fails
*/ */
if (is_dma_capable() && musb_ep->dma) unmap_dma_buffer(req, musb);
unmap_dma_buffer(req, musb);
musb_write_fifo(musb_ep->hw_ep, fifo_count, musb_write_fifo(musb_ep->hw_ep, fifo_count,
(u8 *) (request->buf + request->actual)); (u8 *) (request->buf + request->actual));
...@@ -627,7 +649,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) ...@@ -627,7 +649,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
return; return;
} }
if (is_cppi_enabled() && musb_ep->dma) { if (is_cppi_enabled() && is_buffer_mapped(req)) {
struct dma_controller *c = musb->dma_controller; struct dma_controller *c = musb->dma_controller;
struct dma_channel *channel = musb_ep->dma; struct dma_channel *channel = musb_ep->dma;
...@@ -658,7 +680,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) ...@@ -658,7 +680,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
len = musb_readw(epio, MUSB_RXCOUNT); len = musb_readw(epio, MUSB_RXCOUNT);
if (request->actual < request->length) { if (request->actual < request->length) {
#ifdef CONFIG_USB_INVENTRA_DMA #ifdef CONFIG_USB_INVENTRA_DMA
if (is_dma_capable() && musb_ep->dma) { if (is_buffer_mapped(req)) {
struct dma_controller *c; struct dma_controller *c;
struct dma_channel *channel; struct dma_channel *channel;
int use_dma = 0; int use_dma = 0;
...@@ -742,7 +764,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) ...@@ -742,7 +764,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
fifo_count = min_t(unsigned, len, fifo_count); fifo_count = min_t(unsigned, len, fifo_count);
#ifdef CONFIG_USB_TUSB_OMAP_DMA #ifdef CONFIG_USB_TUSB_OMAP_DMA
if (tusb_dma_omap() && musb_ep->dma) { if (tusb_dma_omap() && is_buffer_mapped(req)) {
struct dma_controller *c = musb->dma_controller; struct dma_controller *c = musb->dma_controller;
struct dma_channel *channel = musb_ep->dma; struct dma_channel *channel = musb_ep->dma;
u32 dma_addr = request->dma + request->actual; u32 dma_addr = request->dma + request->actual;
...@@ -762,7 +784,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) ...@@ -762,7 +784,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
* programming fails. This buffer is mapped if the * programming fails. This buffer is mapped if the
* channel allocation is successful * channel allocation is successful
*/ */
if (is_dma_capable() && musb_ep->dma) { if (is_buffer_mapped(req)) {
unmap_dma_buffer(req, musb); unmap_dma_buffer(req, musb);
/* /*
...@@ -989,7 +1011,11 @@ static int musb_gadget_enable(struct usb_ep *ep, ...@@ -989,7 +1011,11 @@ static int musb_gadget_enable(struct usb_ep *ep,
/* Set TXMAXP with the FIFO size of the endpoint /* Set TXMAXP with the FIFO size of the endpoint
* to disable double buffering mode. * to disable double buffering mode.
*/ */
musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11)); if (musb->double_buffer_not_ok)
musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
else
musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
| (musb_ep->hb_mult << 11));
csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
if (musb_readw(regs, MUSB_TXCSR) if (musb_readw(regs, MUSB_TXCSR)
...@@ -1025,7 +1051,11 @@ static int musb_gadget_enable(struct usb_ep *ep, ...@@ -1025,7 +1051,11 @@ static int musb_gadget_enable(struct usb_ep *ep,
/* Set RXMAXP with the FIFO size of the endpoint /* Set RXMAXP with the FIFO size of the endpoint
* to disable double buffering mode. * to disable double buffering mode.
*/ */
musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11)); if (musb->double_buffer_not_ok)
musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx);
else
musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
| (musb_ep->hb_mult << 11));
/* force shared fifo to OUT-only mode */ /* force shared fifo to OUT-only mode */
if (hw_ep->is_shared_fifo) { if (hw_ep->is_shared_fifo) {
...@@ -1214,10 +1244,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, ...@@ -1214,10 +1244,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
request->epnum = musb_ep->current_epnum; request->epnum = musb_ep->current_epnum;
request->tx = musb_ep->is_in; request->tx = musb_ep->is_in;
if (is_dma_capable() && musb_ep->dma) map_dma_buffer(request, musb, musb_ep);
map_dma_buffer(request, musb);
else
request->mapped = 0;
spin_lock_irqsave(&musb->lock, lockflags); spin_lock_irqsave(&musb->lock, lockflags);
......
...@@ -35,13 +35,19 @@ ...@@ -35,13 +35,19 @@
#ifndef __MUSB_GADGET_H #ifndef __MUSB_GADGET_H
#define __MUSB_GADGET_H #define __MUSB_GADGET_H
enum buffer_map_state {
UN_MAPPED = 0,
PRE_MAPPED,
MUSB_MAPPED
};
struct musb_request { struct musb_request {
struct usb_request request; struct usb_request request;
struct musb_ep *ep; struct musb_ep *ep;
struct musb *musb; struct musb *musb;
u8 tx; /* endpoint direction */ u8 tx; /* endpoint direction */
u8 epnum; u8 epnum;
u8 mapped; enum buffer_map_state map_state;
}; };
static inline struct musb_request *to_musb_request(struct usb_request *req) static inline struct musb_request *to_musb_request(struct usb_request *req)
......
...@@ -609,7 +609,7 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep) ...@@ -609,7 +609,7 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
/* Set RXMAXP with the FIFO size of the endpoint /* Set RXMAXP with the FIFO size of the endpoint
* to disable double buffer mode. * to disable double buffer mode.
*/ */
if (musb->hwvers < MUSB_HWVERS_2000) if (musb->double_buffer_not_ok)
musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx); musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
else else
musb_writew(ep->regs, MUSB_RXMAXP, musb_writew(ep->regs, MUSB_RXMAXP,
...@@ -784,14 +784,13 @@ static void musb_ep_program(struct musb *musb, u8 epnum, ...@@ -784,14 +784,13 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
/* protocol/endpoint/interval/NAKlimit */ /* protocol/endpoint/interval/NAKlimit */
if (epnum) { if (epnum) {
musb_writeb(epio, MUSB_TXTYPE, qh->type_reg); musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
if (can_bulk_split(musb, qh->type)) if (musb->double_buffer_not_ok)
musb_writew(epio, MUSB_TXMAXP, musb_writew(epio, MUSB_TXMAXP,
packet_sz hw_ep->max_packet_sz_tx);
| ((hw_ep->max_packet_sz_tx /
packet_sz) - 1) << 11);
else else
musb_writew(epio, MUSB_TXMAXP, musb_writew(epio, MUSB_TXMAXP,
packet_sz); qh->maxpacket |
((qh->hb_mult - 1) << 11));
musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg); musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
} else { } else {
musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg); musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
......
...@@ -94,24 +94,33 @@ static inline void musb_write_hsdma_addr(void __iomem *mbase, ...@@ -94,24 +94,33 @@ static inline void musb_write_hsdma_addr(void __iomem *mbase,
{ {
musb_writew(mbase, musb_writew(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_LOW), MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_LOW),
((u16)((u32) dma_addr & 0xFFFF))); dma_addr);
musb_writew(mbase, musb_writew(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_HIGH), MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_HIGH),
((u16)(((u32) dma_addr >> 16) & 0xFFFF))); (dma_addr >> 16));
} }
static inline u32 musb_read_hsdma_count(void __iomem *mbase, u8 bchannel) static inline u32 musb_read_hsdma_count(void __iomem *mbase, u8 bchannel)
{ {
return musb_readl(mbase, u32 count = musb_readw(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH)); MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH));
count = count << 16;
count |= musb_readw(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW));
return count;
} }
static inline void musb_write_hsdma_count(void __iomem *mbase, static inline void musb_write_hsdma_count(void __iomem *mbase,
u8 bchannel, u32 len) u8 bchannel, u32 len)
{ {
musb_writel(mbase, musb_writew(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW),len);
musb_writew(mbase,
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH), MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH),
len); (len >> 16));
} }
#endif /* CONFIG_BLACKFIN */ #endif /* CONFIG_BLACKFIN */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment