Commit c65bfa62 authored by Mian Yousaf Kaukab's avatar Mian Yousaf Kaukab Committed by Felipe Balbi

usb: musb: maintain three states for buffer mappings instead of two

If dma buffers are mapped by a higher layer, with a boolean musb_request.mapped
it is still possible to call dma_sync_single_for_device() from
musb_g_giveback(), even if txstate()/rxstate() has called unmap_dma_buffer()
before falling back to pio mode.

Moreover, check for musb_ep->dma is moved within map_dma_buffer() so where
applicable checks for it are removed. And where possible, checks for
is_dma_capable() are merged with buffer map state check.
Signed-off-by: default avatarMian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
Signed-off-by: default avatarFelipe Balbi <balbi@ti.com>
parent 06624818
...@@ -92,11 +92,19 @@ ...@@ -92,11 +92,19 @@
/* ----------------------------------------------------------------------- */ /* ----------------------------------------------------------------------- */
#define is_buffer_mapped(req) (is_dma_capable() && \
(req->map_state != UN_MAPPED))
/* Maps the buffer to dma */ /* Maps the buffer to dma */
static inline void map_dma_buffer(struct musb_request *request, static inline void map_dma_buffer(struct musb_request *request,
struct musb *musb) struct musb *musb, struct musb_ep *musb_ep)
{ {
request->map_state = UN_MAPPED;
if (!is_dma_capable() || !musb_ep->dma)
return;
if (request->request.dma == DMA_ADDR_INVALID) { if (request->request.dma == DMA_ADDR_INVALID) {
request->request.dma = dma_map_single( request->request.dma = dma_map_single(
musb->controller, musb->controller,
...@@ -105,7 +113,7 @@ static inline void map_dma_buffer(struct musb_request *request, ...@@ -105,7 +113,7 @@ static inline void map_dma_buffer(struct musb_request *request,
request->tx request->tx
? DMA_TO_DEVICE ? DMA_TO_DEVICE
: DMA_FROM_DEVICE); : DMA_FROM_DEVICE);
request->mapped = 1; request->map_state = MUSB_MAPPED;
} else { } else {
dma_sync_single_for_device(musb->controller, dma_sync_single_for_device(musb->controller,
request->request.dma, request->request.dma,
...@@ -113,7 +121,7 @@ static inline void map_dma_buffer(struct musb_request *request, ...@@ -113,7 +121,7 @@ static inline void map_dma_buffer(struct musb_request *request,
request->tx request->tx
? DMA_TO_DEVICE ? DMA_TO_DEVICE
: DMA_FROM_DEVICE); : DMA_FROM_DEVICE);
request->mapped = 0; request->map_state = PRE_MAPPED;
} }
} }
...@@ -121,11 +129,14 @@ static inline void map_dma_buffer(struct musb_request *request, ...@@ -121,11 +129,14 @@ static inline void map_dma_buffer(struct musb_request *request,
static inline void unmap_dma_buffer(struct musb_request *request, static inline void unmap_dma_buffer(struct musb_request *request,
struct musb *musb) struct musb *musb)
{ {
if (!is_buffer_mapped(request))
return;
if (request->request.dma == DMA_ADDR_INVALID) { if (request->request.dma == DMA_ADDR_INVALID) {
DBG(20, "not unmapping a never mapped buffer\n"); DBG(20, "not unmapping a never mapped buffer\n");
return; return;
} }
if (request->mapped) { if (request->map_state == MUSB_MAPPED) {
dma_unmap_single(musb->controller, dma_unmap_single(musb->controller,
request->request.dma, request->request.dma,
request->request.length, request->request.length,
...@@ -133,16 +144,15 @@ static inline void unmap_dma_buffer(struct musb_request *request, ...@@ -133,16 +144,15 @@ static inline void unmap_dma_buffer(struct musb_request *request,
? DMA_TO_DEVICE ? DMA_TO_DEVICE
: DMA_FROM_DEVICE); : DMA_FROM_DEVICE);
request->request.dma = DMA_ADDR_INVALID; request->request.dma = DMA_ADDR_INVALID;
request->mapped = 0; } else { /* PRE_MAPPED */
} else {
dma_sync_single_for_cpu(musb->controller, dma_sync_single_for_cpu(musb->controller,
request->request.dma, request->request.dma,
request->request.length, request->request.length,
request->tx request->tx
? DMA_TO_DEVICE ? DMA_TO_DEVICE
: DMA_FROM_DEVICE); : DMA_FROM_DEVICE);
} }
request->map_state = UN_MAPPED;
} }
/* /*
...@@ -172,8 +182,7 @@ __acquires(ep->musb->lock) ...@@ -172,8 +182,7 @@ __acquires(ep->musb->lock)
ep->busy = 1; ep->busy = 1;
spin_unlock(&musb->lock); spin_unlock(&musb->lock);
if (is_dma_capable() && ep->dma) unmap_dma_buffer(req, musb);
unmap_dma_buffer(req, musb);
if (request->status == 0) if (request->status == 0)
DBG(5, "%s done request %p, %d/%d\n", DBG(5, "%s done request %p, %d/%d\n",
ep->end_point.name, request, ep->end_point.name, request,
...@@ -335,7 +344,7 @@ static void txstate(struct musb *musb, struct musb_request *req) ...@@ -335,7 +344,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
csr); csr);
#ifndef CONFIG_MUSB_PIO_ONLY #ifndef CONFIG_MUSB_PIO_ONLY
if (is_dma_capable() && musb_ep->dma) { if (is_buffer_mapped(req)) {
struct dma_controller *c = musb->dma_controller; struct dma_controller *c = musb->dma_controller;
size_t request_size; size_t request_size;
...@@ -436,8 +445,7 @@ static void txstate(struct musb *musb, struct musb_request *req) ...@@ -436,8 +445,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
* Unmap the dma buffer back to cpu if dma channel * Unmap the dma buffer back to cpu if dma channel
* programming fails * programming fails
*/ */
if (is_dma_capable() && musb_ep->dma) unmap_dma_buffer(req, musb);
unmap_dma_buffer(req, musb);
musb_write_fifo(musb_ep->hw_ep, fifo_count, musb_write_fifo(musb_ep->hw_ep, fifo_count,
(u8 *) (request->buf + request->actual)); (u8 *) (request->buf + request->actual));
...@@ -627,7 +635,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) ...@@ -627,7 +635,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
return; return;
} }
if (is_cppi_enabled() && musb_ep->dma) { if (is_cppi_enabled() && is_buffer_mapped(req)) {
struct dma_controller *c = musb->dma_controller; struct dma_controller *c = musb->dma_controller;
struct dma_channel *channel = musb_ep->dma; struct dma_channel *channel = musb_ep->dma;
...@@ -658,7 +666,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) ...@@ -658,7 +666,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
len = musb_readw(epio, MUSB_RXCOUNT); len = musb_readw(epio, MUSB_RXCOUNT);
if (request->actual < request->length) { if (request->actual < request->length) {
#ifdef CONFIG_USB_INVENTRA_DMA #ifdef CONFIG_USB_INVENTRA_DMA
if (is_dma_capable() && musb_ep->dma) { if (is_buffer_mapped(req)) {
struct dma_controller *c; struct dma_controller *c;
struct dma_channel *channel; struct dma_channel *channel;
int use_dma = 0; int use_dma = 0;
...@@ -742,7 +750,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) ...@@ -742,7 +750,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
fifo_count = min_t(unsigned, len, fifo_count); fifo_count = min_t(unsigned, len, fifo_count);
#ifdef CONFIG_USB_TUSB_OMAP_DMA #ifdef CONFIG_USB_TUSB_OMAP_DMA
if (tusb_dma_omap() && musb_ep->dma) { if (tusb_dma_omap() && is_buffer_mapped(req)) {
struct dma_controller *c = musb->dma_controller; struct dma_controller *c = musb->dma_controller;
struct dma_channel *channel = musb_ep->dma; struct dma_channel *channel = musb_ep->dma;
u32 dma_addr = request->dma + request->actual; u32 dma_addr = request->dma + request->actual;
...@@ -762,7 +770,7 @@ static void rxstate(struct musb *musb, struct musb_request *req) ...@@ -762,7 +770,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
* programming fails. This buffer is mapped if the * programming fails. This buffer is mapped if the
* channel allocation is successful * channel allocation is successful
*/ */
if (is_dma_capable() && musb_ep->dma) { if (is_buffer_mapped(req)) {
unmap_dma_buffer(req, musb); unmap_dma_buffer(req, musb);
/* /*
...@@ -1222,10 +1230,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, ...@@ -1222,10 +1230,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
request->epnum = musb_ep->current_epnum; request->epnum = musb_ep->current_epnum;
request->tx = musb_ep->is_in; request->tx = musb_ep->is_in;
if (is_dma_capable() && musb_ep->dma) map_dma_buffer(request, musb, musb_ep);
map_dma_buffer(request, musb);
else
request->mapped = 0;
spin_lock_irqsave(&musb->lock, lockflags); spin_lock_irqsave(&musb->lock, lockflags);
......
...@@ -35,13 +35,19 @@ ...@@ -35,13 +35,19 @@
#ifndef __MUSB_GADGET_H #ifndef __MUSB_GADGET_H
#define __MUSB_GADGET_H #define __MUSB_GADGET_H
enum buffer_map_state {
UN_MAPPED = 0,
PRE_MAPPED,
MUSB_MAPPED
};
struct musb_request { struct musb_request {
struct usb_request request; struct usb_request request;
struct musb_ep *ep; struct musb_ep *ep;
struct musb *musb; struct musb *musb;
u8 tx; /* endpoint direction */ u8 tx; /* endpoint direction */
u8 epnum; u8 epnum;
u8 mapped; enum buffer_map_state map_state;
}; };
static inline struct musb_request *to_musb_request(struct usb_request *req) static inline struct musb_request *to_musb_request(struct usb_request *req)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment