Commit 4f46f8ac authored by Guennadi Liakhovetski's avatar Guennadi Liakhovetski Committed by Paul Mundt

dmaengine: shdma: restore partial transfer calculation

The recent shdma driver split has mistakenly removed support for partial
DMA transfer size calculation on forced termination. This patch restores
it.
Signed-off-by: default avatarGuennadi Liakhovetski <g.liakhovetski@gmx.de>
Acked-by: default avatarVinod Koul <vinod.koul@linux.intel.com>
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent ac694dbd
...@@ -483,6 +483,7 @@ static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan, ...@@ -483,6 +483,7 @@ static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
new->mark = DESC_PREPARED; new->mark = DESC_PREPARED;
new->async_tx.flags = flags; new->async_tx.flags = flags;
new->direction = direction; new->direction = direction;
new->partial = 0;
*len -= copy_size; *len -= copy_size;
if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
...@@ -644,6 +645,14 @@ static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, ...@@ -644,6 +645,14 @@ static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
case DMA_TERMINATE_ALL: case DMA_TERMINATE_ALL:
spin_lock_irqsave(&schan->chan_lock, flags); spin_lock_irqsave(&schan->chan_lock, flags);
ops->halt_channel(schan); ops->halt_channel(schan);
if (ops->get_partial && !list_empty(&schan->ld_queue)) {
/* Record partial transfer */
struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
struct shdma_desc, node);
desc->partial = ops->get_partial(schan, desc);
}
spin_unlock_irqrestore(&schan->chan_lock, flags); spin_unlock_irqrestore(&schan->chan_lock, flags);
shdma_chan_ld_cleanup(schan, true); shdma_chan_ld_cleanup(schan, true);
......
...@@ -381,6 +381,17 @@ static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq) ...@@ -381,6 +381,17 @@ static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq)
return true; return true;
} }
static size_t sh_dmae_get_partial(struct shdma_chan *schan,
struct shdma_desc *sdesc)
{
struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
shdma_chan);
struct sh_dmae_desc *sh_desc = container_of(sdesc,
struct sh_dmae_desc, shdma_desc);
return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
sh_chan->xmit_shift;
}
/* Called from error IRQ or NMI */ /* Called from error IRQ or NMI */
static bool sh_dmae_reset(struct sh_dmae_device *shdev) static bool sh_dmae_reset(struct sh_dmae_device *shdev)
{ {
...@@ -632,6 +643,7 @@ static const struct shdma_ops sh_dmae_shdma_ops = { ...@@ -632,6 +643,7 @@ static const struct shdma_ops sh_dmae_shdma_ops = {
.start_xfer = sh_dmae_start_xfer, .start_xfer = sh_dmae_start_xfer,
.embedded_desc = sh_dmae_embedded_desc, .embedded_desc = sh_dmae_embedded_desc,
.chan_irq = sh_dmae_chan_irq, .chan_irq = sh_dmae_chan_irq,
.get_partial = sh_dmae_get_partial,
}; };
static int __devinit sh_dmae_probe(struct platform_device *pdev) static int __devinit sh_dmae_probe(struct platform_device *pdev)
......
...@@ -50,6 +50,7 @@ struct shdma_desc { ...@@ -50,6 +50,7 @@ struct shdma_desc {
struct list_head node; struct list_head node;
struct dma_async_tx_descriptor async_tx; struct dma_async_tx_descriptor async_tx;
enum dma_transfer_direction direction; enum dma_transfer_direction direction;
size_t partial;
dma_cookie_t cookie; dma_cookie_t cookie;
int chunks; int chunks;
int mark; int mark;
...@@ -98,6 +99,7 @@ struct shdma_ops { ...@@ -98,6 +99,7 @@ struct shdma_ops {
void (*start_xfer)(struct shdma_chan *, struct shdma_desc *); void (*start_xfer)(struct shdma_chan *, struct shdma_desc *);
struct shdma_desc *(*embedded_desc)(void *, int); struct shdma_desc *(*embedded_desc)(void *, int);
bool (*chan_irq)(struct shdma_chan *, int); bool (*chan_irq)(struct shdma_chan *, int);
size_t (*get_partial)(struct shdma_chan *, struct shdma_desc *);
}; };
struct shdma_dev { struct shdma_dev {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment