Commit a575d0b4 authored by Nicholas Graumann's avatar Nicholas Graumann Committed by Vinod Koul

dmaengine: xilinx_dma: Introduce xilinx_dma_get_residue

Introduce a function that can calculate residues for IPs that support it:
AXI DMA and CDMA.
Signed-off-by: default avatarNicholas Graumann <nick.graumann@gmail.com>
Signed-off-by: default avatarRadhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
Link: https://lore.kernel.org/r/1571150904-3988-5-git-send-email-radhey.shyam.pandey@xilinx.comSigned-off-by: default avatarVinod Koul <vkoul@kernel.org>
parent 95f68c62
......@@ -784,6 +784,44 @@ static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
}
}
/**
* xilinx_dma_get_residue - Compute residue for a given descriptor
* @chan: Driver specific dma channel
* @desc: dma transaction descriptor
*
* Return: The number of residue bytes for the descriptor.
*/
static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
struct xilinx_dma_tx_descriptor *desc)
{
struct xilinx_cdma_tx_segment *cdma_seg;
struct xilinx_axidma_tx_segment *axidma_seg;
struct xilinx_cdma_desc_hw *cdma_hw;
struct xilinx_axidma_desc_hw *axidma_hw;
struct list_head *entry;
u32 residue = 0;
list_for_each(entry, &desc->segments) {
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
cdma_seg = list_entry(entry,
struct xilinx_cdma_tx_segment,
node);
cdma_hw = &cdma_seg->hw;
residue += (cdma_hw->control - cdma_hw->status) &
chan->xdev->max_buffer_len;
} else {
axidma_seg = list_entry(entry,
struct xilinx_axidma_tx_segment,
node);
axidma_hw = &axidma_seg->hw;
residue += (axidma_hw->control - axidma_hw->status) &
chan->xdev->max_buffer_len;
}
}
return residue;
}
/**
* xilinx_dma_chan_handle_cyclic - Cyclic dma callback
* @chan: Driver specific dma channel
......@@ -993,8 +1031,6 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
{
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
struct xilinx_dma_tx_descriptor *desc;
struct xilinx_axidma_tx_segment *segment;
struct xilinx_axidma_desc_hw *hw;
enum dma_status ret;
unsigned long flags;
u32 residue = 0;
......@@ -1003,22 +1039,20 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
if (ret == DMA_COMPLETE || !txstate)
return ret;
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
spin_lock_irqsave(&chan->lock, flags);
spin_lock_irqsave(&chan->lock, flags);
desc = list_last_entry(&chan->active_list,
struct xilinx_dma_tx_descriptor, node);
if (chan->has_sg) {
list_for_each_entry(segment, &desc->segments, node) {
hw = &segment->hw;
residue += (hw->control - hw->status) &
chan->xdev->max_buffer_len;
}
}
spin_unlock_irqrestore(&chan->lock, flags);
desc = list_last_entry(&chan->active_list,
struct xilinx_dma_tx_descriptor, node);
/*
* VDMA and simple mode do not support residue reporting, so the
* residue field will always be 0.
*/
if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
residue = xilinx_dma_get_residue(chan, desc);
dma_set_residue(txstate, residue);
}
spin_unlock_irqrestore(&chan->lock, flags);
dma_set_residue(txstate, residue);
return ret;
}
......@@ -2705,12 +2739,15 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xilinx_dma_prep_dma_cyclic;
xdev->common.device_prep_interleaved_dma =
xilinx_dma_prep_interleaved;
/* Residue calculation is supported by only AXI DMA */
/* Residue calculation is supported by only AXI DMA and CDMA */
xdev->common.residue_granularity =
DMA_RESIDUE_GRANULARITY_SEGMENT;
} else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
/* Residue calculation is supported by only AXI DMA and CDMA */
xdev->common.residue_granularity =
DMA_RESIDUE_GRANULARITY_SEGMENT;
} else {
xdev->common.device_prep_interleaved_dma =
xilinx_vdma_dma_prep_interleaved;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment