Commit feb59d77 authored by Vinod Koul's avatar Vinod Koul

Merge branch 'topic/xilinx' into for-linus

parents 42cb6e07 c2be36ac
...@@ -37,10 +37,11 @@ Required properties: ...@@ -37,10 +37,11 @@ Required properties:
Required properties for VDMA: Required properties for VDMA:
- xlnx,num-fstores: Should be the number of framebuffers as configured in h/w. - xlnx,num-fstores: Should be the number of framebuffers as configured in h/w.
Optional properties:
- xlnx,include-sg: Tells configured for Scatter-mode in
the hardware.
Optional properties for AXI DMA: Optional properties for AXI DMA:
- xlnx,sg-length-width: Should be set to the width in bits of the length
register as configured in h/w. Takes values {8...26}. If the property
is missing or invalid then the default value 23 is used. This is the
maximum value that is supported by all IP versions.
- xlnx,mcdma: Tells whether configured for multi-channel mode in the hardware. - xlnx,mcdma: Tells whether configured for multi-channel mode in the hardware.
Optional properties for VDMA: Optional properties for VDMA:
- xlnx,flush-fsync: Tells which channel to Flush on Frame sync. - xlnx,flush-fsync: Tells which channel to Flush on Frame sync.
......
...@@ -86,6 +86,7 @@ ...@@ -86,6 +86,7 @@
#define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6) #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
#define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5) #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
#define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4) #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
#define XILINX_DMA_DMASR_SG_MASK BIT(3)
#define XILINX_DMA_DMASR_IDLE BIT(1) #define XILINX_DMA_DMASR_IDLE BIT(1)
#define XILINX_DMA_DMASR_HALTED BIT(0) #define XILINX_DMA_DMASR_HALTED BIT(0)
#define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24) #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
...@@ -161,7 +162,9 @@ ...@@ -161,7 +162,9 @@
#define XILINX_DMA_REG_BTT 0x28 #define XILINX_DMA_REG_BTT 0x28
/* AXI DMA Specific Masks/Bit fields */ /* AXI DMA Specific Masks/Bit fields */
#define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0) #define XILINX_DMA_MAX_TRANS_LEN_MIN 8
#define XILINX_DMA_MAX_TRANS_LEN_MAX 23
#define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4) #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
#define XILINX_DMA_CR_COALESCE_SHIFT 16 #define XILINX_DMA_CR_COALESCE_SHIFT 16
...@@ -412,7 +415,6 @@ struct xilinx_dma_config { ...@@ -412,7 +415,6 @@ struct xilinx_dma_config {
* @dev: Device Structure * @dev: Device Structure
* @common: DMA device structure * @common: DMA device structure
* @chan: Driver specific DMA channel * @chan: Driver specific DMA channel
* @has_sg: Specifies whether Scatter-Gather is present or not
* @mcdma: Specifies whether Multi-Channel is present or not * @mcdma: Specifies whether Multi-Channel is present or not
* @flush_on_fsync: Flush on frame sync * @flush_on_fsync: Flush on frame sync
* @ext_addr: Indicates 64 bit addressing is supported by dma device * @ext_addr: Indicates 64 bit addressing is supported by dma device
...@@ -425,13 +427,13 @@ struct xilinx_dma_config { ...@@ -425,13 +427,13 @@ struct xilinx_dma_config {
* @rxs_clk: DMA s2mm stream clock * @rxs_clk: DMA s2mm stream clock
* @nr_channels: Number of channels DMA device supports * @nr_channels: Number of channels DMA device supports
* @chan_id: DMA channel identifier * @chan_id: DMA channel identifier
* @max_buffer_len: Max buffer length
*/ */
struct xilinx_dma_device { struct xilinx_dma_device {
void __iomem *regs; void __iomem *regs;
struct device *dev; struct device *dev;
struct dma_device common; struct dma_device common;
struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
bool has_sg;
bool mcdma; bool mcdma;
u32 flush_on_fsync; u32 flush_on_fsync;
bool ext_addr; bool ext_addr;
...@@ -444,6 +446,7 @@ struct xilinx_dma_device { ...@@ -444,6 +446,7 @@ struct xilinx_dma_device {
struct clk *rxs_clk; struct clk *rxs_clk;
u32 nr_channels; u32 nr_channels;
u32 chan_id; u32 chan_id;
u32 max_buffer_len;
}; };
/* Macros */ /* Macros */
...@@ -959,6 +962,34 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) ...@@ -959,6 +962,34 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
return 0; return 0;
} }
/**
* xilinx_dma_calc_copysize - Calculate the amount of data to copy
* @chan: Driver specific DMA channel
* @size: Total data that needs to be copied
* @done: Amount of data that has been already copied
*
* Return: Amount of data that has to be copied
*/
static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan,
int size, int done)
{
size_t copy;
copy = min_t(size_t, size - done,
chan->xdev->max_buffer_len);
if ((copy + done < size) &&
chan->xdev->common.copy_align) {
/*
* If this is not the last descriptor, make sure
* the next one will be properly aligned
*/
copy = rounddown(copy,
(1 << chan->xdev->common.copy_align));
}
return copy;
}
/** /**
* xilinx_dma_tx_status - Get DMA transaction status * xilinx_dma_tx_status - Get DMA transaction status
* @dchan: DMA channel * @dchan: DMA channel
...@@ -992,7 +1023,7 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, ...@@ -992,7 +1023,7 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
list_for_each_entry(segment, &desc->segments, node) { list_for_each_entry(segment, &desc->segments, node) {
hw = &segment->hw; hw = &segment->hw;
residue += (hw->control - hw->status) & residue += (hw->control - hw->status) &
XILINX_DMA_MAX_TRANS_LEN; chan->xdev->max_buffer_len;
} }
} }
spin_unlock_irqrestore(&chan->lock, flags); spin_unlock_irqrestore(&chan->lock, flags);
...@@ -1070,7 +1101,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) ...@@ -1070,7 +1101,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
struct xilinx_vdma_config *config = &chan->config; struct xilinx_vdma_config *config = &chan->config;
struct xilinx_dma_tx_descriptor *desc, *tail_desc; struct xilinx_dma_tx_descriptor *desc, *tail_desc;
u32 reg, j; u32 reg, j;
struct xilinx_vdma_tx_segment *tail_segment; struct xilinx_vdma_tx_segment *segment, *last = NULL;
int i = 0;
/* This function was invoked with lock held */ /* This function was invoked with lock held */
if (chan->err) if (chan->err)
...@@ -1087,17 +1119,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) ...@@ -1087,17 +1119,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
tail_desc = list_last_entry(&chan->pending_list, tail_desc = list_last_entry(&chan->pending_list,
struct xilinx_dma_tx_descriptor, node); struct xilinx_dma_tx_descriptor, node);
tail_segment = list_last_entry(&tail_desc->segments,
struct xilinx_vdma_tx_segment, node);
/*
* If hardware is idle, then all descriptors on the running lists are
* done, start new transfers
*/
if (chan->has_sg)
dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
desc->async_tx.phys);
/* Configure the hardware using info in the config structure */ /* Configure the hardware using info in the config structure */
if (chan->has_vflip) { if (chan->has_vflip) {
reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP); reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
...@@ -1114,15 +1135,11 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) ...@@ -1114,15 +1135,11 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
else else
reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
/* /* If not parking, enable circular mode */
* With SG, start with circular mode, so that BDs can be fetched.
* In direct register mode, if not parking, enable circular mode
*/
if (chan->has_sg || !config->park)
reg |= XILINX_DMA_DMACR_CIRC_EN;
if (config->park) if (config->park)
reg &= ~XILINX_DMA_DMACR_CIRC_EN; reg &= ~XILINX_DMA_DMACR_CIRC_EN;
else
reg |= XILINX_DMA_DMACR_CIRC_EN;
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
...@@ -1144,15 +1161,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) ...@@ -1144,15 +1161,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
return; return;
/* Start the transfer */ /* Start the transfer */
if (chan->has_sg) {
dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
tail_segment->phys);
list_splice_tail_init(&chan->pending_list, &chan->active_list);
chan->desc_pendingcount = 0;
} else {
struct xilinx_vdma_tx_segment *segment, *last = NULL;
int i = 0;
if (chan->desc_submitcount < chan->num_frms) if (chan->desc_submitcount < chan->num_frms)
i = chan->desc_submitcount; i = chan->desc_submitcount;
...@@ -1185,7 +1193,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) ...@@ -1185,7 +1193,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
list_add_tail(&desc->node, &chan->active_list); list_add_tail(&desc->node, &chan->active_list);
if (chan->desc_submitcount == chan->num_frms) if (chan->desc_submitcount == chan->num_frms)
chan->desc_submitcount = 0; chan->desc_submitcount = 0;
}
chan->idle = false; chan->idle = false;
} }
...@@ -1254,7 +1261,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) ...@@ -1254,7 +1261,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
/* Start the transfer */ /* Start the transfer */
dma_ctrl_write(chan, XILINX_DMA_REG_BTT, dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
hw->control & XILINX_DMA_MAX_TRANS_LEN); hw->control & chan->xdev->max_buffer_len);
} }
list_splice_tail_init(&chan->pending_list, &chan->active_list); list_splice_tail_init(&chan->pending_list, &chan->active_list);
...@@ -1357,7 +1364,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) ...@@ -1357,7 +1364,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
/* Start the transfer */ /* Start the transfer */
dma_ctrl_write(chan, XILINX_DMA_REG_BTT, dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
hw->control & XILINX_DMA_MAX_TRANS_LEN); hw->control & chan->xdev->max_buffer_len);
} }
list_splice_tail_init(&chan->pending_list, &chan->active_list); list_splice_tail_init(&chan->pending_list, &chan->active_list);
...@@ -1718,7 +1725,7 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, ...@@ -1718,7 +1725,7 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
struct xilinx_cdma_tx_segment *segment; struct xilinx_cdma_tx_segment *segment;
struct xilinx_cdma_desc_hw *hw; struct xilinx_cdma_desc_hw *hw;
if (!len || len > XILINX_DMA_MAX_TRANS_LEN) if (!len || len > chan->xdev->max_buffer_len)
return NULL; return NULL;
desc = xilinx_dma_alloc_tx_descriptor(chan); desc = xilinx_dma_alloc_tx_descriptor(chan);
...@@ -1808,8 +1815,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( ...@@ -1808,8 +1815,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
* Calculate the maximum number of bytes to transfer, * Calculate the maximum number of bytes to transfer,
* making sure it is less than the hw limit * making sure it is less than the hw limit
*/ */
copy = min_t(size_t, sg_dma_len(sg) - sg_used, copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg),
XILINX_DMA_MAX_TRANS_LEN); sg_used);
hw = &segment->hw; hw = &segment->hw;
/* Fill in the descriptor */ /* Fill in the descriptor */
...@@ -1913,8 +1920,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( ...@@ -1913,8 +1920,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
* Calculate the maximum number of bytes to transfer, * Calculate the maximum number of bytes to transfer,
* making sure it is less than the hw limit * making sure it is less than the hw limit
*/ */
copy = min_t(size_t, period_len - sg_used, copy = xilinx_dma_calc_copysize(chan, period_len,
XILINX_DMA_MAX_TRANS_LEN); sg_used);
hw = &segment->hw; hw = &segment->hw;
xilinx_axidma_buf(chan, hw, buf_addr, sg_used, xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
period_len * i); period_len * i);
...@@ -2389,7 +2396,6 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, ...@@ -2389,7 +2396,6 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
chan->dev = xdev->dev; chan->dev = xdev->dev;
chan->xdev = xdev; chan->xdev = xdev;
chan->has_sg = xdev->has_sg;
chan->desc_pendingcount = 0x0; chan->desc_pendingcount = 0x0;
chan->ext_addr = xdev->ext_addr; chan->ext_addr = xdev->ext_addr;
/* This variable ensures that descriptors are not /* This variable ensures that descriptors are not
...@@ -2489,6 +2495,15 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, ...@@ -2489,6 +2495,15 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
chan->stop_transfer = xilinx_dma_stop_transfer; chan->stop_transfer = xilinx_dma_stop_transfer;
} }
/* check if SG is enabled (only for AXIDMA and CDMA) */
if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) {
if (dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
XILINX_DMA_DMASR_SG_MASK)
chan->has_sg = true;
dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id,
chan->has_sg ? "enabled" : "disabled");
}
/* Initialize the tasklet */ /* Initialize the tasklet */
tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet, tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
(unsigned long)chan); (unsigned long)chan);
...@@ -2596,7 +2611,7 @@ static int xilinx_dma_probe(struct platform_device *pdev) ...@@ -2596,7 +2611,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
struct xilinx_dma_device *xdev; struct xilinx_dma_device *xdev;
struct device_node *child, *np = pdev->dev.of_node; struct device_node *child, *np = pdev->dev.of_node;
struct resource *io; struct resource *io;
u32 num_frames, addr_width; u32 num_frames, addr_width, len_width;
int i, err; int i, err;
/* Allocate and initialize the DMA engine structure */ /* Allocate and initialize the DMA engine structure */
...@@ -2627,9 +2642,24 @@ static int xilinx_dma_probe(struct platform_device *pdev) ...@@ -2627,9 +2642,24 @@ static int xilinx_dma_probe(struct platform_device *pdev)
return PTR_ERR(xdev->regs); return PTR_ERR(xdev->regs);
/* Retrieve the DMA engine properties from the device tree */ /* Retrieve the DMA engine properties from the device tree */
xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma"); xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
if (!of_property_read_u32(node, "xlnx,sg-length-width",
&len_width)) {
if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
dev_warn(xdev->dev,
"invalid xlnx,sg-length-width property value. Using default width\n");
} else {
if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n");
xdev->max_buffer_len =
GENMASK(len_width - 1, 0);
}
}
}
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
err = of_property_read_u32(node, "xlnx,num-fstores", err = of_property_read_u32(node, "xlnx,num-fstores",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment