Commit fc514460 authored by Lars-Peter Clausen's avatar Lars-Peter Clausen Committed by Vinod Koul

dma: pl330: Fix cyclic transfers

Allocate a descriptor for each period of a cyclic transfer, not just the first.
Also since the callback needs to be called for each finished period make sure to
initialize the callback and callback_param fields of each descriptor in a cyclic
transfer.

Cc: stable@vger.kernel.org
Signed-off-by: default avatarLars-Peter Clausen <lars@metafoo.de>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
parent 27abb2ff
...@@ -2505,6 +2505,10 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) ...@@ -2505,6 +2505,10 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
/* Assign cookies to all nodes */ /* Assign cookies to all nodes */
while (!list_empty(&last->node)) { while (!list_empty(&last->node)) {
desc = list_entry(last->node.next, struct dma_pl330_desc, node); desc = list_entry(last->node.next, struct dma_pl330_desc, node);
if (pch->cyclic) {
desc->txd.callback = last->txd.callback;
desc->txd.callback_param = last->txd.callback_param;
}
dma_cookie_assign(&desc->txd); dma_cookie_assign(&desc->txd);
...@@ -2688,45 +2692,82 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( ...@@ -2688,45 +2692,82 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
size_t period_len, enum dma_transfer_direction direction, size_t period_len, enum dma_transfer_direction direction,
unsigned long flags, void *context) unsigned long flags, void *context)
{ {
struct dma_pl330_desc *desc; struct dma_pl330_desc *desc = NULL, *first = NULL;
struct dma_pl330_chan *pch = to_pchan(chan); struct dma_pl330_chan *pch = to_pchan(chan);
struct dma_pl330_dmac *pdmac = pch->dmac;
unsigned int i;
dma_addr_t dst; dma_addr_t dst;
dma_addr_t src; dma_addr_t src;
desc = pl330_get_desc(pch); if (len % period_len != 0)
if (!desc) {
dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
__func__, __LINE__);
return NULL; return NULL;
}
switch (direction) { if (!is_slave_direction(direction)) {
case DMA_MEM_TO_DEV:
desc->rqcfg.src_inc = 1;
desc->rqcfg.dst_inc = 0;
desc->req.rqtype = MEMTODEV;
src = dma_addr;
dst = pch->fifo_addr;
break;
case DMA_DEV_TO_MEM:
desc->rqcfg.src_inc = 0;
desc->rqcfg.dst_inc = 1;
desc->req.rqtype = DEVTOMEM;
src = pch->fifo_addr;
dst = dma_addr;
break;
default:
dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n", dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
__func__, __LINE__); __func__, __LINE__);
return NULL; return NULL;
} }
desc->rqcfg.brst_size = pch->burst_sz; for (i = 0; i < len / period_len; i++) {
desc->rqcfg.brst_len = 1; desc = pl330_get_desc(pch);
if (!desc) {
dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
__func__, __LINE__);
pch->cyclic = true; if (!first)
return NULL;
spin_lock_irqsave(&pdmac->pool_lock, flags);
while (!list_empty(&first->node)) {
desc = list_entry(first->node.next,
struct dma_pl330_desc, node);
list_move_tail(&desc->node, &pdmac->desc_pool);
}
list_move_tail(&first->node, &pdmac->desc_pool);
fill_px(&desc->px, dst, src, period_len); spin_unlock_irqrestore(&pdmac->pool_lock, flags);
return NULL;
}
switch (direction) {
case DMA_MEM_TO_DEV:
desc->rqcfg.src_inc = 1;
desc->rqcfg.dst_inc = 0;
desc->req.rqtype = MEMTODEV;
src = dma_addr;
dst = pch->fifo_addr;
break;
case DMA_DEV_TO_MEM:
desc->rqcfg.src_inc = 0;
desc->rqcfg.dst_inc = 1;
desc->req.rqtype = DEVTOMEM;
src = pch->fifo_addr;
dst = dma_addr;
break;
default:
break;
}
desc->rqcfg.brst_size = pch->burst_sz;
desc->rqcfg.brst_len = 1;
fill_px(&desc->px, dst, src, period_len);
if (!first)
first = desc;
else
list_add_tail(&desc->node, &first->node);
dma_addr += period_len;
}
if (!desc)
return NULL;
pch->cyclic = true;
desc->txd.flags = flags;
return &desc->txd; return &desc->txd;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment