Commit d80f381f authored by Maxime Ripard's avatar Maxime Ripard Committed by Vinod Koul

dmaengine: fsl-edma: Split device_control

Split the device_control callback of the Freescale EDMA driver to make use
of the newly introduced callbacks, that will eventually be used to retrieve
slave capabilities.
Signed-off-by: default avatarMaxime Ripard <maxime.ripard@free-electrons.com>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
parent 2258b675
...@@ -289,62 +289,69 @@ static void fsl_edma_free_desc(struct virt_dma_desc *vdesc) ...@@ -289,62 +289,69 @@ static void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
kfree(fsl_desc); kfree(fsl_desc);
} }
static int fsl_edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, static int fsl_edma_terminate_all(struct dma_chan *chan)
unsigned long arg)
{ {
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
struct dma_slave_config *cfg = (void *)arg;
unsigned long flags; unsigned long flags;
LIST_HEAD(head); LIST_HEAD(head);
switch (cmd) { spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
case DMA_TERMINATE_ALL: fsl_edma_disable_request(fsl_chan);
spin_lock_irqsave(&fsl_chan->vchan.lock, flags); fsl_chan->edesc = NULL;
vchan_get_all_descriptors(&fsl_chan->vchan, &head);
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
return 0;
}
static int fsl_edma_pause(struct dma_chan *chan)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
unsigned long flags;
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
if (fsl_chan->edesc) {
fsl_edma_disable_request(fsl_chan); fsl_edma_disable_request(fsl_chan);
fsl_chan->edesc = NULL; fsl_chan->status = DMA_PAUSED;
vchan_get_all_descriptors(&fsl_chan->vchan, &head); }
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
vchan_dma_desc_free_list(&fsl_chan->vchan, &head); return 0;
return 0; }
case DMA_SLAVE_CONFIG:
fsl_chan->fsc.dir = cfg->direction;
if (cfg->direction == DMA_DEV_TO_MEM) {
fsl_chan->fsc.dev_addr = cfg->src_addr;
fsl_chan->fsc.addr_width = cfg->src_addr_width;
fsl_chan->fsc.burst = cfg->src_maxburst;
fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width);
} else if (cfg->direction == DMA_MEM_TO_DEV) {
fsl_chan->fsc.dev_addr = cfg->dst_addr;
fsl_chan->fsc.addr_width = cfg->dst_addr_width;
fsl_chan->fsc.burst = cfg->dst_maxburst;
fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width);
} else {
return -EINVAL;
}
return 0;
case DMA_PAUSE: static int fsl_edma_resume(struct dma_chan *chan)
spin_lock_irqsave(&fsl_chan->vchan.lock, flags); {
if (fsl_chan->edesc) { struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
fsl_edma_disable_request(fsl_chan); unsigned long flags;
fsl_chan->status = DMA_PAUSED;
}
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
return 0;
case DMA_RESUME:
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
if (fsl_chan->edesc) {
fsl_edma_enable_request(fsl_chan);
fsl_chan->status = DMA_IN_PROGRESS;
}
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
return 0;
default: spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
return -ENXIO; if (fsl_chan->edesc) {
fsl_edma_enable_request(fsl_chan);
fsl_chan->status = DMA_IN_PROGRESS;
} }
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
return 0;
}
static int fsl_edma_slave_config(struct dma_chan *chan,
struct dma_slave_config *cfg)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
fsl_chan->fsc.dir = cfg->direction;
if (cfg->direction == DMA_DEV_TO_MEM) {
fsl_chan->fsc.dev_addr = cfg->src_addr;
fsl_chan->fsc.addr_width = cfg->src_addr_width;
fsl_chan->fsc.burst = cfg->src_maxburst;
fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width);
} else if (cfg->direction == DMA_MEM_TO_DEV) {
fsl_chan->fsc.dev_addr = cfg->dst_addr;
fsl_chan->fsc.addr_width = cfg->dst_addr_width;
fsl_chan->fsc.burst = cfg->dst_maxburst;
fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width);
} else {
return -EINVAL;
}
return 0;
} }
static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan, static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
...@@ -917,7 +924,10 @@ static int fsl_edma_probe(struct platform_device *pdev) ...@@ -917,7 +924,10 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status; fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg; fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic; fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic;
fsl_edma->dma_dev.device_control = fsl_edma_control; fsl_edma->dma_dev.device_config = fsl_edma_slave_config;
fsl_edma->dma_dev.device_pause = fsl_edma_pause;
fsl_edma->dma_dev.device_resume = fsl_edma_resume;
fsl_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending; fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
fsl_edma->dma_dev.device_slave_caps = fsl_dma_device_slave_caps; fsl_edma->dma_dev.device_slave_caps = fsl_dma_device_slave_caps;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment