Commit 4a5d82ca authored by Yoshihiro Shimoda's avatar Yoshihiro Shimoda Committed by Kleber Sacilotto de Souza

usb: renesas_usbhs: add a workaround for a race condition of workqueue

BugLink: https://bugs.launchpad.net/bugs/1838467

commit b2357839 upstream.

The old commit 6e4b74e4 ("usb: renesas: fix scheduling in atomic
context bug") fixed an atomic issue by using workqueue for the shdmac
dmaengine driver. However, this has a potential race condition issue
between the work pending and usbhsg_ep_free_request() in gadget mode.
When usbhsg_ep_free_request() is called while pending the queue,
since the work_struct will be freed and then the work handler is
called, kernel panic happens on process_one_work().

To fix the issue, if we could call cancel_work_sync() at somewhere
before the free request, it could be easy. However,
the usbhsg_ep_free_request() is called on atomic (e.g. f_ncm driver
calls free request via gether_disconnect()).

For now, almost all users are having "USB-DMAC" and the DMAengine
driver can be used on atomic. So, this patch adds a workaround for
a race condition to call the DMAengine APIs without the workqueue.

This means we still have TODO on shdmac environment (SH7724), but
since it doesn't have SMP, the race condition might not happen.

Fixes: ab330cf3 ("usb: renesas_usbhs: add support for USB-DMAC")
Cc: <stable@vger.kernel.org> # v4.1+
Signed-off-by: default avatarYoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
Signed-off-by: default avatarFelipe Balbi <felipe.balbi@linux.intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: default avatarConnor Kuehl <connor.kuehl@canonical.com>
Signed-off-by: default avatarKhalid Elmously <khalid.elmously@canonical.com>
parent c45c996d
...@@ -819,9 +819,8 @@ static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map) ...@@ -819,9 +819,8 @@ static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
} }
static void usbhsf_dma_complete(void *arg); static void usbhsf_dma_complete(void *arg);
static void xfer_work(struct work_struct *work) static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
{ {
struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
struct usbhs_pipe *pipe = pkt->pipe; struct usbhs_pipe *pipe = pkt->pipe;
struct usbhs_fifo *fifo; struct usbhs_fifo *fifo;
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
...@@ -829,12 +828,10 @@ static void xfer_work(struct work_struct *work) ...@@ -829,12 +828,10 @@ static void xfer_work(struct work_struct *work)
struct dma_chan *chan; struct dma_chan *chan;
struct device *dev = usbhs_priv_to_dev(priv); struct device *dev = usbhs_priv_to_dev(priv);
enum dma_transfer_direction dir; enum dma_transfer_direction dir;
unsigned long flags;
usbhs_lock(priv, flags);
fifo = usbhs_pipe_to_fifo(pipe); fifo = usbhs_pipe_to_fifo(pipe);
if (!fifo) if (!fifo)
goto xfer_work_end; return;
chan = usbhsf_dma_chan_get(fifo, pkt); chan = usbhsf_dma_chan_get(fifo, pkt);
dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
...@@ -843,7 +840,7 @@ static void xfer_work(struct work_struct *work) ...@@ -843,7 +840,7 @@ static void xfer_work(struct work_struct *work)
pkt->trans, dir, pkt->trans, dir,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK); DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) if (!desc)
goto xfer_work_end; return;
desc->callback = usbhsf_dma_complete; desc->callback = usbhsf_dma_complete;
desc->callback_param = pipe; desc->callback_param = pipe;
...@@ -851,7 +848,7 @@ static void xfer_work(struct work_struct *work) ...@@ -851,7 +848,7 @@ static void xfer_work(struct work_struct *work)
pkt->cookie = dmaengine_submit(desc); pkt->cookie = dmaengine_submit(desc);
if (pkt->cookie < 0) { if (pkt->cookie < 0) {
dev_err(dev, "Failed to submit dma descriptor\n"); dev_err(dev, "Failed to submit dma descriptor\n");
goto xfer_work_end; return;
} }
dev_dbg(dev, " %s %d (%d/ %d)\n", dev_dbg(dev, " %s %d (%d/ %d)\n",
...@@ -862,8 +859,17 @@ static void xfer_work(struct work_struct *work) ...@@ -862,8 +859,17 @@ static void xfer_work(struct work_struct *work)
dma_async_issue_pending(chan); dma_async_issue_pending(chan);
usbhsf_dma_start(pipe, fifo); usbhsf_dma_start(pipe, fifo);
usbhs_pipe_enable(pipe); usbhs_pipe_enable(pipe);
}
static void xfer_work(struct work_struct *work)
{
struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
struct usbhs_pipe *pipe = pkt->pipe;
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
unsigned long flags;
xfer_work_end: usbhs_lock(priv, flags);
usbhsf_dma_xfer_preparing(pkt);
usbhs_unlock(priv, flags); usbhs_unlock(priv, flags);
} }
...@@ -916,8 +922,13 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done) ...@@ -916,8 +922,13 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
pkt->trans = len; pkt->trans = len;
usbhsf_tx_irq_ctrl(pipe, 0); usbhsf_tx_irq_ctrl(pipe, 0);
INIT_WORK(&pkt->work, xfer_work); /* FIXME: Workaound for usb dmac that driver can be used in atomic */
schedule_work(&pkt->work); if (usbhs_get_dparam(priv, has_usb_dmac)) {
usbhsf_dma_xfer_preparing(pkt);
} else {
INIT_WORK(&pkt->work, xfer_work);
schedule_work(&pkt->work);
}
return 0; return 0;
...@@ -1023,8 +1034,7 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt, ...@@ -1023,8 +1034,7 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt,
pkt->trans = pkt->length; pkt->trans = pkt->length;
INIT_WORK(&pkt->work, xfer_work); usbhsf_dma_xfer_preparing(pkt);
schedule_work(&pkt->work);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment