Commit 92d794df authored by Kedareswara rao Appana's avatar Kedareswara rao Appana Committed by Vinod Koul

dmaengine: vdma: Add support for cyclic dma mode

This patch adds support for AXI DMA cyclic dma mode.
In cyclic mode, DMA fetches and processes the same
BDs without interruption. The DMA continues to fetch and process
until it is stopped or reset.
Signed-off-by: default avatarKedareswara rao Appana <appanad@xilinx.com>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
parent 1a695a90
...@@ -157,6 +157,7 @@ ...@@ -157,6 +157,7 @@
/* AXI DMA Specific Masks/Bit fields */ /* AXI DMA Specific Masks/Bit fields */
#define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0) #define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0)
#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
#define XILINX_DMA_CR_COALESCE_SHIFT 16 #define XILINX_DMA_CR_COALESCE_SHIFT 16
#define XILINX_DMA_BD_SOP BIT(27) #define XILINX_DMA_BD_SOP BIT(27)
#define XILINX_DMA_BD_EOP BIT(26) #define XILINX_DMA_BD_EOP BIT(26)
...@@ -278,11 +279,13 @@ struct xilinx_cdma_tx_segment { ...@@ -278,11 +279,13 @@ struct xilinx_cdma_tx_segment {
* @async_tx: Async transaction descriptor * @async_tx: Async transaction descriptor
* @segments: TX segments list * @segments: TX segments list
* @node: Node in the channel descriptors list * @node: Node in the channel descriptors list
* @cyclic: Check for cyclic transfers.
*/ */
struct xilinx_dma_tx_descriptor { struct xilinx_dma_tx_descriptor {
struct dma_async_tx_descriptor async_tx; struct dma_async_tx_descriptor async_tx;
struct list_head segments; struct list_head segments;
struct list_head node; struct list_head node;
bool cyclic;
}; };
/** /**
...@@ -302,6 +305,7 @@ struct xilinx_dma_tx_descriptor { ...@@ -302,6 +305,7 @@ struct xilinx_dma_tx_descriptor {
* @direction: Transfer direction * @direction: Transfer direction
* @num_frms: Number of frames * @num_frms: Number of frames
* @has_sg: Support scatter transfers * @has_sg: Support scatter transfers
* @cyclic: Check for cyclic transfers.
* @genlock: Support genlock mode * @genlock: Support genlock mode
* @err: Channel has errors * @err: Channel has errors
* @tasklet: Cleanup work after irq * @tasklet: Cleanup work after irq
...@@ -312,6 +316,7 @@ struct xilinx_dma_tx_descriptor { ...@@ -312,6 +316,7 @@ struct xilinx_dma_tx_descriptor {
* @desc_submitcount: Descriptor h/w submitted count * @desc_submitcount: Descriptor h/w submitted count
* @residue: Residue for AXI DMA * @residue: Residue for AXI DMA
* @seg_v: Statically allocated segments base * @seg_v: Statically allocated segments base
* @cyclic_seg_v: Statically allocated segment base for cyclic transfers
* @start_transfer: Differentiate b/w DMA IP's transfer * @start_transfer: Differentiate b/w DMA IP's transfer
*/ */
struct xilinx_dma_chan { struct xilinx_dma_chan {
...@@ -330,6 +335,7 @@ struct xilinx_dma_chan { ...@@ -330,6 +335,7 @@ struct xilinx_dma_chan {
enum dma_transfer_direction direction; enum dma_transfer_direction direction;
int num_frms; int num_frms;
bool has_sg; bool has_sg;
bool cyclic;
bool genlock; bool genlock;
bool err; bool err;
struct tasklet_struct tasklet; struct tasklet_struct tasklet;
...@@ -340,6 +346,7 @@ struct xilinx_dma_chan { ...@@ -340,6 +346,7 @@ struct xilinx_dma_chan {
u32 desc_submitcount; u32 desc_submitcount;
u32 residue; u32 residue;
struct xilinx_axidma_tx_segment *seg_v; struct xilinx_axidma_tx_segment *seg_v;
struct xilinx_axidma_tx_segment *cyclic_seg_v;
void (*start_transfer)(struct xilinx_dma_chan *chan); void (*start_transfer)(struct xilinx_dma_chan *chan);
}; };
...@@ -660,12 +667,36 @@ static void xilinx_dma_free_chan_resources(struct dma_chan *dchan) ...@@ -660,12 +667,36 @@ static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
dev_dbg(chan->dev, "Free all channel resources.\n"); dev_dbg(chan->dev, "Free all channel resources.\n");
xilinx_dma_free_descriptors(chan); xilinx_dma_free_descriptors(chan);
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
xilinx_dma_free_tx_segment(chan, chan->cyclic_seg_v);
xilinx_dma_free_tx_segment(chan, chan->seg_v); xilinx_dma_free_tx_segment(chan, chan->seg_v);
}
dma_pool_destroy(chan->desc_pool); dma_pool_destroy(chan->desc_pool);
chan->desc_pool = NULL; chan->desc_pool = NULL;
} }
/**
* xilinx_dma_chan_handle_cyclic - Cyclic dma callback
* @chan: Driver specific dma channel
* @desc: dma transaction descriptor
* @flags: flags for spin lock
*/
static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
struct xilinx_dma_tx_descriptor *desc,
unsigned long *flags)
{
dma_async_tx_callback callback;
void *callback_param;
callback = desc->async_tx.callback;
callback_param = desc->async_tx.callback_param;
if (callback) {
spin_unlock_irqrestore(&chan->lock, *flags);
callback(callback_param);
spin_lock_irqsave(&chan->lock, *flags);
}
}
/** /**
* xilinx_dma_chan_desc_cleanup - Clean channel descriptors * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
* @chan: Driver specific DMA channel * @chan: Driver specific DMA channel
...@@ -681,6 +712,11 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) ...@@ -681,6 +712,11 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
dma_async_tx_callback callback; dma_async_tx_callback callback;
void *callback_param; void *callback_param;
if (desc->cyclic) {
xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
break;
}
/* Remove from the list of running transactions */ /* Remove from the list of running transactions */
list_del(&desc->node); list_del(&desc->node);
...@@ -757,7 +793,7 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) ...@@ -757,7 +793,7 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
return -ENOMEM; return -ENOMEM;
} }
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
/* /*
* For AXI DMA case after submitting a pending_list, keep * For AXI DMA case after submitting a pending_list, keep
* an extra segment allocated so that the "next descriptor" * an extra segment allocated so that the "next descriptor"
...@@ -768,6 +804,15 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) ...@@ -768,6 +804,15 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
*/ */
chan->seg_v = xilinx_axidma_alloc_tx_segment(chan); chan->seg_v = xilinx_axidma_alloc_tx_segment(chan);
/*
* For cyclic DMA mode we need to program the tail Descriptor
* register with a value which is not a part of the BD chain
* so allocating a desc segment during channel allocation for
* programming tail descriptor.
*/
chan->cyclic_seg_v = xilinx_axidma_alloc_tx_segment(chan);
}
dma_cookie_init(dchan); dma_cookie_init(dchan);
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
...@@ -1157,8 +1202,12 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) ...@@ -1157,8 +1202,12 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
/* Start the transfer */ /* Start the transfer */
if (chan->has_sg) { if (chan->has_sg) {
dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, if (chan->cyclic)
tail_segment->phys); dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
chan->cyclic_seg_v->phys);
else
dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
tail_segment->phys);
} else { } else {
struct xilinx_axidma_tx_segment *segment; struct xilinx_axidma_tx_segment *segment;
struct xilinx_axidma_desc_hw *hw; struct xilinx_axidma_desc_hw *hw;
...@@ -1209,7 +1258,8 @@ static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan) ...@@ -1209,7 +1258,8 @@ static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
list_for_each_entry_safe(desc, next, &chan->active_list, node) { list_for_each_entry_safe(desc, next, &chan->active_list, node) {
list_del(&desc->node); list_del(&desc->node);
dma_cookie_complete(&desc->async_tx); if (!desc->cyclic)
dma_cookie_complete(&desc->async_tx);
list_add_tail(&desc->node, &chan->done_list); list_add_tail(&desc->node, &chan->done_list);
} }
} }
...@@ -1397,6 +1447,11 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx) ...@@ -1397,6 +1447,11 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
unsigned long flags; unsigned long flags;
int err; int err;
if (chan->cyclic) {
xilinx_dma_free_tx_descriptor(chan, desc);
return -EBUSY;
}
if (chan->err) { if (chan->err) {
/* /*
* If reset fails, need to hard reset the system. * If reset fails, need to hard reset the system.
...@@ -1414,6 +1469,9 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx) ...@@ -1414,6 +1469,9 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
/* Put this transaction onto the tail of the pending queue */ /* Put this transaction onto the tail of the pending queue */
append_desc_queue(chan, desc); append_desc_queue(chan, desc);
if (desc->cyclic)
chan->cyclic = true;
spin_unlock_irqrestore(&chan->lock, flags); spin_unlock_irqrestore(&chan->lock, flags);
return cookie; return cookie;
...@@ -1668,6 +1726,102 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( ...@@ -1668,6 +1726,102 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
return NULL; return NULL;
} }
/**
* xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
* @chan: DMA channel
* @sgl: scatterlist to transfer to/from
* @sg_len: number of entries in @scatterlist
* @direction: DMA direction
* @flags: transfer ack flags
*/
static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
struct xilinx_dma_tx_descriptor *desc;
struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
size_t copy, sg_used;
unsigned int num_periods;
int i;
u32 reg;
num_periods = buf_len / period_len;
if (!is_slave_direction(direction))
return NULL;
/* Allocate a transaction descriptor. */
desc = xilinx_dma_alloc_tx_descriptor(chan);
if (!desc)
return NULL;
chan->direction = direction;
dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
desc->async_tx.tx_submit = xilinx_dma_tx_submit;
for (i = 0; i < num_periods; ++i) {
sg_used = 0;
while (sg_used < period_len) {
struct xilinx_axidma_desc_hw *hw;
/* Get a free segment */
segment = xilinx_axidma_alloc_tx_segment(chan);
if (!segment)
goto error;
/*
* Calculate the maximum number of bytes to transfer,
* making sure it is less than the hw limit
*/
copy = min_t(size_t, period_len - sg_used,
XILINX_DMA_MAX_TRANS_LEN);
hw = &segment->hw;
hw->buf_addr = buf_addr + sg_used + (period_len * i);
hw->control = copy;
if (prev)
prev->hw.next_desc = segment->phys;
prev = segment;
sg_used += copy;
/*
* Insert the segment into the descriptor segments
* list.
*/
list_add_tail(&segment->node, &desc->segments);
}
}
head_segment = list_first_entry(&desc->segments,
struct xilinx_axidma_tx_segment, node);
desc->async_tx.phys = head_segment->phys;
desc->cyclic = true;
reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
/* For the last DMA_MEM_TO_DEV transfer, set EOP */
if (direction == DMA_MEM_TO_DEV) {
segment->hw.control |= XILINX_DMA_BD_SOP;
segment = list_last_entry(&desc->segments,
struct xilinx_axidma_tx_segment,
node);
segment->hw.control |= XILINX_DMA_BD_EOP;
segment->hw.next_desc = (u32) head_segment->phys;
}
return &desc->async_tx;
error:
xilinx_dma_free_tx_descriptor(chan, desc);
return NULL;
}
/** /**
* xilinx_dma_terminate_all - Halt the channel and free descriptors * xilinx_dma_terminate_all - Halt the channel and free descriptors
* @chan: Driver specific DMA Channel pointer * @chan: Driver specific DMA Channel pointer
...@@ -1675,6 +1829,10 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( ...@@ -1675,6 +1829,10 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
static int xilinx_dma_terminate_all(struct dma_chan *dchan) static int xilinx_dma_terminate_all(struct dma_chan *dchan)
{ {
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
u32 reg;
if (chan->cyclic)
xilinx_dma_chan_reset(chan);
/* Halt the DMA engine */ /* Halt the DMA engine */
xilinx_dma_halt(chan); xilinx_dma_halt(chan);
...@@ -1682,6 +1840,13 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan) ...@@ -1682,6 +1840,13 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
/* Remove and free all of the descriptors in the lists */ /* Remove and free all of the descriptors in the lists */
xilinx_dma_free_descriptors(chan); xilinx_dma_free_descriptors(chan);
if (chan->cyclic) {
reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
chan->cyclic = false;
}
return 0; return 0;
} }
...@@ -2218,7 +2383,10 @@ static int xilinx_dma_probe(struct platform_device *pdev) ...@@ -2218,7 +2383,10 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xdev->common.device_tx_status = xilinx_dma_tx_status; xdev->common.device_tx_status = xilinx_dma_tx_status;
xdev->common.device_issue_pending = xilinx_dma_issue_pending; xdev->common.device_issue_pending = xilinx_dma_issue_pending;
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg; xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
xdev->common.device_prep_dma_cyclic =
xilinx_dma_prep_dma_cyclic;
/* Residue calculation is supported by only AXI DMA */ /* Residue calculation is supported by only AXI DMA */
xdev->common.residue_granularity = xdev->common.residue_granularity =
DMA_RESIDUE_GRANULARITY_SEGMENT; DMA_RESIDUE_GRANULARITY_SEGMENT;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment