Commit 0344606b authored by Neil Zhang's avatar Neil Zhang Committed by Felipe Balbi

usb: gadget: mv_udc: avoid sleeping on spinlock

build_dtd() can be called when hold a spinlock, but GFP_KERNEL may cause
dma_pool_alloc() sleep, So we need use GFP_ATOMIC instead of GFP_KERNEL.
But using GFP_ATOMIC may cause failure when allocating memory, add error
handler to handle it.
Signed-off-by: default avatarAlexey Khoroshilov <khoroshilov@ispras.ru>
Signed-off-by: default avatarNeil Zhang <zhangwm@marvell.com>
Signed-off-by: default avatarFelipe Balbi <balbi@ti.com>
parent a07bc24e
...@@ -373,7 +373,7 @@ static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length, ...@@ -373,7 +373,7 @@ static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
* Be careful that no _GFP_HIGHMEM is set, * Be careful that no _GFP_HIGHMEM is set,
* or we can not use dma_to_virt * or we can not use dma_to_virt
*/ */
dtd = dma_pool_alloc(udc->dtd_pool, GFP_KERNEL, dma); dtd = dma_pool_alloc(udc->dtd_pool, GFP_ATOMIC, dma);
if (dtd == NULL) if (dtd == NULL)
return dtd; return dtd;
...@@ -706,6 +706,7 @@ mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) ...@@ -706,6 +706,7 @@ mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
struct mv_req *req = container_of(_req, struct mv_req, req); struct mv_req *req = container_of(_req, struct mv_req, req);
struct mv_udc *udc = ep->udc; struct mv_udc *udc = ep->udc;
unsigned long flags; unsigned long flags;
int retval;
/* catch various bogus parameters */ /* catch various bogus parameters */
if (!_req || !req->req.complete || !req->req.buf if (!_req || !req->req.complete || !req->req.buf
...@@ -753,15 +754,17 @@ mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) ...@@ -753,15 +754,17 @@ mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
/* build dtds and push them to device queue */ /* build dtds and push them to device queue */
if (!req_to_dtd(req)) { if (!req_to_dtd(req)) {
int retval;
retval = queue_dtd(ep, req); retval = queue_dtd(ep, req);
if (retval) { if (retval) {
spin_unlock_irqrestore(&udc->lock, flags); spin_unlock_irqrestore(&udc->lock, flags);
return retval; dev_err(&udc->dev->dev, "Failed to queue dtd\n");
goto err_unmap_dma;
} }
} else { } else {
spin_unlock_irqrestore(&udc->lock, flags); spin_unlock_irqrestore(&udc->lock, flags);
return -ENOMEM; dev_err(&udc->dev->dev, "Failed to dma_pool_alloc\n");
retval = -ENOMEM;
goto err_unmap_dma;
} }
/* Update ep0 state */ /* Update ep0 state */
...@@ -773,6 +776,22 @@ mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) ...@@ -773,6 +776,22 @@ mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
spin_unlock_irqrestore(&udc->lock, flags); spin_unlock_irqrestore(&udc->lock, flags);
return 0; return 0;
err_unmap_dma:
if (req->mapped) {
dma_unmap_single(ep->udc->gadget.dev.parent,
req->req.dma, req->req.length,
((ep_dir(ep) == EP_DIR_IN) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE));
req->req.dma = DMA_ADDR_INVALID;
req->mapped = 0;
} else
dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
req->req.dma, req->req.length,
((ep_dir(ep) == EP_DIR_IN) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE));
return retval;
} }
static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req) static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req)
...@@ -1497,15 +1516,17 @@ udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty) ...@@ -1497,15 +1516,17 @@ udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
} }
/* prime the data phase */ /* prime the data phase */
if (!req_to_dtd(req)) if (!req_to_dtd(req)) {
retval = queue_dtd(ep, req); retval = queue_dtd(ep, req);
else{ /* no mem */ if (retval) {
dev_err(&udc->dev->dev,
"Failed to queue dtd when prime status\n");
goto out;
}
} else{ /* no mem */
retval = -ENOMEM; retval = -ENOMEM;
goto out; dev_err(&udc->dev->dev,
} "Failed to dma_pool_alloc when prime status\n");
if (retval) {
dev_err(&udc->dev->dev, "response error on GET_STATUS request\n");
goto out; goto out;
} }
...@@ -1513,6 +1534,15 @@ udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty) ...@@ -1513,6 +1534,15 @@ udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
return 0; return 0;
out: out:
if (req->mapped) {
dma_unmap_single(ep->udc->gadget.dev.parent,
req->req.dma, req->req.length,
((ep_dir(ep) == EP_DIR_IN) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE));
req->req.dma = DMA_ADDR_INVALID;
req->mapped = 0;
}
return retval; return retval;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment