Commit 68d94a84 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-fix-5.0-rc6' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine fixes from Vinod Koul:
 - Fix in at_xdmac fr wrongful channel state
 - Fix for imx driver for wrong callback invocation
 - Fix to bcm driver for interrupt race & transaction abort.
 - Fix in dmatest to abort in mapping error

* tag 'dmaengine-fix-5.0-rc6' of git://git.infradead.org/users/vkoul/slave-dma:
  dmaengine: dmatest: Abort test in case of mapping error
  dmaengine: bcm2835: Fix abort of transactions
  dmaengine: bcm2835: Fix interrupt race on RT
  dmaengine: imx-dma: fix wrong callback invoke
  dmaengine: at_xdmac: Fix wrongfull report of a channel as in use
parents aadaa806 6454368a
...@@ -203,6 +203,7 @@ struct at_xdmac_chan { ...@@ -203,6 +203,7 @@ struct at_xdmac_chan {
u32 save_cim; u32 save_cim;
u32 save_cnda; u32 save_cnda;
u32 save_cndc; u32 save_cndc;
u32 irq_status;
unsigned long status; unsigned long status;
struct tasklet_struct tasklet; struct tasklet_struct tasklet;
struct dma_slave_config sconfig; struct dma_slave_config sconfig;
...@@ -1580,8 +1581,8 @@ static void at_xdmac_tasklet(unsigned long data) ...@@ -1580,8 +1581,8 @@ static void at_xdmac_tasklet(unsigned long data)
struct at_xdmac_desc *desc; struct at_xdmac_desc *desc;
u32 error_mask; u32 error_mask;
dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n", dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
__func__, atchan->status); __func__, atchan->irq_status);
error_mask = AT_XDMAC_CIS_RBEIS error_mask = AT_XDMAC_CIS_RBEIS
| AT_XDMAC_CIS_WBEIS | AT_XDMAC_CIS_WBEIS
...@@ -1589,15 +1590,15 @@ static void at_xdmac_tasklet(unsigned long data) ...@@ -1589,15 +1590,15 @@ static void at_xdmac_tasklet(unsigned long data)
if (at_xdmac_chan_is_cyclic(atchan)) { if (at_xdmac_chan_is_cyclic(atchan)) {
at_xdmac_handle_cyclic(atchan); at_xdmac_handle_cyclic(atchan);
} else if ((atchan->status & AT_XDMAC_CIS_LIS) } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
|| (atchan->status & error_mask)) { || (atchan->irq_status & error_mask)) {
struct dma_async_tx_descriptor *txd; struct dma_async_tx_descriptor *txd;
if (atchan->status & AT_XDMAC_CIS_RBEIS) if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
dev_err(chan2dev(&atchan->chan), "read bus error!!!"); dev_err(chan2dev(&atchan->chan), "read bus error!!!");
if (atchan->status & AT_XDMAC_CIS_WBEIS) if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
dev_err(chan2dev(&atchan->chan), "write bus error!!!"); dev_err(chan2dev(&atchan->chan), "write bus error!!!");
if (atchan->status & AT_XDMAC_CIS_ROIS) if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
dev_err(chan2dev(&atchan->chan), "request overflow error!!!"); dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
spin_lock(&atchan->lock); spin_lock(&atchan->lock);
...@@ -1652,7 +1653,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id) ...@@ -1652,7 +1653,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
atchan = &atxdmac->chan[i]; atchan = &atxdmac->chan[i];
chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS); chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
atchan->status = chan_status & chan_imr; atchan->irq_status = chan_status & chan_imr;
dev_vdbg(atxdmac->dma.dev, dev_vdbg(atxdmac->dma.dev,
"%s: chan%d: imr=0x%x, status=0x%x\n", "%s: chan%d: imr=0x%x, status=0x%x\n",
__func__, i, chan_imr, chan_status); __func__, i, chan_imr, chan_status);
...@@ -1666,7 +1667,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id) ...@@ -1666,7 +1667,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
at_xdmac_chan_read(atchan, AT_XDMAC_CDA), at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
tasklet_schedule(&atchan->tasklet); tasklet_schedule(&atchan->tasklet);
......
...@@ -406,38 +406,32 @@ static void bcm2835_dma_fill_cb_chain_with_sg( ...@@ -406,38 +406,32 @@ static void bcm2835_dma_fill_cb_chain_with_sg(
} }
} }
static int bcm2835_dma_abort(void __iomem *chan_base) static int bcm2835_dma_abort(struct bcm2835_chan *c)
{ {
unsigned long cs; void __iomem *chan_base = c->chan_base;
long int timeout = 10000; long int timeout = 10000;
cs = readl(chan_base + BCM2835_DMA_CS); /*
if (!(cs & BCM2835_DMA_ACTIVE)) * A zero control block address means the channel is idle.
* (The ACTIVE flag in the CS register is not a reliable indicator.)
*/
if (!readl(chan_base + BCM2835_DMA_ADDR))
return 0; return 0;
/* Write 0 to the active bit - Pause the DMA */ /* Write 0 to the active bit - Pause the DMA */
writel(0, chan_base + BCM2835_DMA_CS); writel(0, chan_base + BCM2835_DMA_CS);
/* Wait for any current AXI transfer to complete */ /* Wait for any current AXI transfer to complete */
while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) { while ((readl(chan_base + BCM2835_DMA_CS) &
BCM2835_DMA_WAITING_FOR_WRITES) && --timeout)
cpu_relax(); cpu_relax();
cs = readl(chan_base + BCM2835_DMA_CS);
}
/* We'll un-pause when we set of our next DMA */ /* Peripheral might be stuck and fail to signal AXI write responses */
if (!timeout) if (!timeout)
return -ETIMEDOUT; dev_err(c->vc.chan.device->dev,
"failed to complete outstanding writes\n");
if (!(cs & BCM2835_DMA_ACTIVE))
return 0;
/* Terminate the control block chain */
writel(0, chan_base + BCM2835_DMA_NEXTCB);
/* Abort the whole DMA */
writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE,
chan_base + BCM2835_DMA_CS);
writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS);
return 0; return 0;
} }
...@@ -476,8 +470,15 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data) ...@@ -476,8 +470,15 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
spin_lock_irqsave(&c->vc.lock, flags); spin_lock_irqsave(&c->vc.lock, flags);
/* Acknowledge interrupt */ /*
writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS); * Clear the INT flag to receive further interrupts. Keep the channel
* active in case the descriptor is cyclic or in case the client has
* already terminated the descriptor and issued a new one. (May happen
* if this IRQ handler is threaded.) If the channel is finished, it
* will remain idle despite the ACTIVE flag being set.
*/
writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE,
c->chan_base + BCM2835_DMA_CS);
d = c->desc; d = c->desc;
...@@ -485,11 +486,7 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data) ...@@ -485,11 +486,7 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
if (d->cyclic) { if (d->cyclic) {
/* call the cyclic callback */ /* call the cyclic callback */
vchan_cyclic_callback(&d->vd); vchan_cyclic_callback(&d->vd);
} else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) {
/* Keep the DMA engine running */
writel(BCM2835_DMA_ACTIVE,
c->chan_base + BCM2835_DMA_CS);
} else {
vchan_cookie_complete(&c->desc->vd); vchan_cookie_complete(&c->desc->vd);
bcm2835_dma_start_desc(c); bcm2835_dma_start_desc(c);
} }
...@@ -779,7 +776,6 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan) ...@@ -779,7 +776,6 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
unsigned long flags; unsigned long flags;
int timeout = 10000;
LIST_HEAD(head); LIST_HEAD(head);
spin_lock_irqsave(&c->vc.lock, flags); spin_lock_irqsave(&c->vc.lock, flags);
...@@ -789,27 +785,11 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan) ...@@ -789,27 +785,11 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
list_del_init(&c->node); list_del_init(&c->node);
spin_unlock(&d->lock); spin_unlock(&d->lock);
/* /* stop DMA activity */
* Stop DMA activity: we assume the callback will not be called
* after bcm_dma_abort() returns (even if it does, it will see
* c->desc is NULL and exit.)
*/
if (c->desc) { if (c->desc) {
vchan_terminate_vdesc(&c->desc->vd); vchan_terminate_vdesc(&c->desc->vd);
c->desc = NULL; c->desc = NULL;
bcm2835_dma_abort(c->chan_base); bcm2835_dma_abort(c);
/* Wait for stopping */
while (--timeout) {
if (!(readl(c->chan_base + BCM2835_DMA_CS) &
BCM2835_DMA_ACTIVE))
break;
cpu_relax();
}
if (!timeout)
dev_err(d->ddev.dev, "DMA transfer could not be terminated\n");
} }
vchan_get_all_descriptors(&c->vc, &head); vchan_get_all_descriptors(&c->vc, &head);
......
...@@ -711,11 +711,9 @@ static int dmatest_func(void *data) ...@@ -711,11 +711,9 @@ static int dmatest_func(void *data)
srcs[i] = um->addr[i] + src_off; srcs[i] = um->addr[i] + src_off;
ret = dma_mapping_error(dev->dev, um->addr[i]); ret = dma_mapping_error(dev->dev, um->addr[i]);
if (ret) { if (ret) {
dmaengine_unmap_put(um);
result("src mapping error", total_tests, result("src mapping error", total_tests,
src_off, dst_off, len, ret); src_off, dst_off, len, ret);
failed_tests++; goto error_unmap_continue;
continue;
} }
um->to_cnt++; um->to_cnt++;
} }
...@@ -730,11 +728,9 @@ static int dmatest_func(void *data) ...@@ -730,11 +728,9 @@ static int dmatest_func(void *data)
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
ret = dma_mapping_error(dev->dev, dsts[i]); ret = dma_mapping_error(dev->dev, dsts[i]);
if (ret) { if (ret) {
dmaengine_unmap_put(um);
result("dst mapping error", total_tests, result("dst mapping error", total_tests,
src_off, dst_off, len, ret); src_off, dst_off, len, ret);
failed_tests++; goto error_unmap_continue;
continue;
} }
um->bidi_cnt++; um->bidi_cnt++;
} }
...@@ -762,12 +758,10 @@ static int dmatest_func(void *data) ...@@ -762,12 +758,10 @@ static int dmatest_func(void *data)
} }
if (!tx) { if (!tx) {
dmaengine_unmap_put(um);
result("prep error", total_tests, src_off, result("prep error", total_tests, src_off,
dst_off, len, ret); dst_off, len, ret);
msleep(100); msleep(100);
failed_tests++; goto error_unmap_continue;
continue;
} }
done->done = false; done->done = false;
...@@ -776,12 +770,10 @@ static int dmatest_func(void *data) ...@@ -776,12 +770,10 @@ static int dmatest_func(void *data)
cookie = tx->tx_submit(tx); cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) { if (dma_submit_error(cookie)) {
dmaengine_unmap_put(um);
result("submit error", total_tests, src_off, result("submit error", total_tests, src_off,
dst_off, len, ret); dst_off, len, ret);
msleep(100); msleep(100);
failed_tests++; goto error_unmap_continue;
continue;
} }
dma_async_issue_pending(chan); dma_async_issue_pending(chan);
...@@ -790,22 +782,20 @@ static int dmatest_func(void *data) ...@@ -790,22 +782,20 @@ static int dmatest_func(void *data)
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
dmaengine_unmap_put(um);
if (!done->done) { if (!done->done) {
result("test timed out", total_tests, src_off, dst_off, result("test timed out", total_tests, src_off, dst_off,
len, 0); len, 0);
failed_tests++; goto error_unmap_continue;
continue;
} else if (status != DMA_COMPLETE) { } else if (status != DMA_COMPLETE) {
result(status == DMA_ERROR ? result(status == DMA_ERROR ?
"completion error status" : "completion error status" :
"completion busy status", total_tests, src_off, "completion busy status", total_tests, src_off,
dst_off, len, ret); dst_off, len, ret);
failed_tests++; goto error_unmap_continue;
continue;
} }
dmaengine_unmap_put(um);
if (params->noverify) { if (params->noverify) {
verbose_result("test passed", total_tests, src_off, verbose_result("test passed", total_tests, src_off,
dst_off, len, 0); dst_off, len, 0);
...@@ -846,6 +836,12 @@ static int dmatest_func(void *data) ...@@ -846,6 +836,12 @@ static int dmatest_func(void *data)
verbose_result("test passed", total_tests, src_off, verbose_result("test passed", total_tests, src_off,
dst_off, len, 0); dst_off, len, 0);
} }
continue;
error_unmap_continue:
dmaengine_unmap_put(um);
failed_tests++;
} }
ktime = ktime_sub(ktime_get(), ktime); ktime = ktime_sub(ktime_get(), ktime);
ktime = ktime_sub(ktime, comparetime); ktime = ktime_sub(ktime, comparetime);
......
...@@ -618,7 +618,7 @@ static void imxdma_tasklet(unsigned long data) ...@@ -618,7 +618,7 @@ static void imxdma_tasklet(unsigned long data)
{ {
struct imxdma_channel *imxdmac = (void *)data; struct imxdma_channel *imxdmac = (void *)data;
struct imxdma_engine *imxdma = imxdmac->imxdma; struct imxdma_engine *imxdma = imxdmac->imxdma;
struct imxdma_desc *desc; struct imxdma_desc *desc, *next_desc;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&imxdma->lock, flags); spin_lock_irqsave(&imxdma->lock, flags);
...@@ -648,10 +648,10 @@ static void imxdma_tasklet(unsigned long data) ...@@ -648,10 +648,10 @@ static void imxdma_tasklet(unsigned long data)
list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free); list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
if (!list_empty(&imxdmac->ld_queue)) { if (!list_empty(&imxdmac->ld_queue)) {
desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc, next_desc = list_first_entry(&imxdmac->ld_queue,
node); struct imxdma_desc, node);
list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active); list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
if (imxdma_xfer_desc(desc) < 0) if (imxdma_xfer_desc(next_desc) < 0)
dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n", dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
__func__, imxdmac->channel); __func__, imxdmac->channel);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment