Commit 055128ee authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-5.2-rc1' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine updates from Vinod Koul:

 - Updates to stm32 dma residue calculations

 - Interleave dma capability to axi-dmac and support for ZynqMP arch

 - Rework of channel assignment for rcar dma

 - Debugfs for pl330 driver

 - Support for Tegra186/Tegra194, refactoring for new chips and support
   for pause/resume

 - Updates to axi-dmac, bcm2835, fsl-edma, idma64, imx-sdma, rcar-dmac,
   stm32-dma etc

 - dev_get_drvdata() updates on few drivers

* tag 'dmaengine-5.2-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (34 commits)
  dmaengine: tegra210-adma: restore channel status
  dmaengine: tegra210-dma: free dma controller in remove()
  dmaengine: tegra210-adma: add pause/resume support
  dmaengine: tegra210-adma: add support for Tegra186/Tegra194
  Documentation: DT: Add compatibility binding for Tegra186
  dmaengine: tegra210-adma: prepare for supporting newer Tegra chips
  dmaengine: at_xdmac: remove a stray bottom half unlock
  dmaengine: fsl-edma: Adjust indentation
  dmaengine: fsl-edma: Fix typo in Vybrid name
  dmaengine: stm32-dma: fix residue calculation in stm32-dma
  dmaengine: nbpfaxi: Use dev_get_drvdata()
  dmaengine: bcm-sba-raid: Use dev_get_drvdata()
  dmaengine: stm32-dma: Fix unsigned variable compared with zero
  dmaengine: stm32-dma: use platform_get_irq()
  dmaengine: rcar-dmac: Update copyright information
  dmaengine: imx-sdma: Only check ratio on parts that support 1:1
  dmaengine: xgene-dma: fix spelling mistake "descripto" -> "descriptor"
  dmaengine: idma64: Move driver name to the header
  dmaengine: bcm2835: Drop duplicate capability setting.
  dmaengine: pl330: _stop: clear interrupt status
  ...
parents ddab5337 f33e7bb3
...@@ -18,7 +18,6 @@ Required properties for adi,channels sub-node: ...@@ -18,7 +18,6 @@ Required properties for adi,channels sub-node:
Required channel sub-node properties: Required channel sub-node properties:
- reg: Which channel this node refers to. - reg: Which channel this node refers to.
- adi,length-width: Width of the DMA transfer length register.
- adi,source-bus-width, - adi,source-bus-width,
adi,destination-bus-width: Width of the source or destination bus in bits. adi,destination-bus-width: Width of the source or destination bus in bits.
- adi,source-bus-type, - adi,source-bus-type,
...@@ -28,7 +27,8 @@ Required channel sub-node properties: ...@@ -28,7 +27,8 @@ Required channel sub-node properties:
1 (AXI_DMAC_TYPE_AXI_STREAM): Streaming AXI interface 1 (AXI_DMAC_TYPE_AXI_STREAM): Streaming AXI interface
2 (AXI_DMAC_TYPE_AXI_FIFO): FIFO interface 2 (AXI_DMAC_TYPE_AXI_FIFO): FIFO interface
Optional channel properties: Deprecated optional channel properties:
- adi,length-width: Width of the DMA transfer length register.
- adi,cyclic: Must be set if the channel supports hardware cyclic DMA - adi,cyclic: Must be set if the channel supports hardware cyclic DMA
transfers. transfers.
- adi,2d: Must be set if the channel supports hardware 2D DMA transfers. - adi,2d: Must be set if the channel supports hardware 2D DMA transfers.
......
...@@ -4,7 +4,9 @@ The Tegra Audio DMA controller that is used for transferring data ...@@ -4,7 +4,9 @@ The Tegra Audio DMA controller that is used for transferring data
between system memory and the Audio Processing Engine (APE). between system memory and the Audio Processing Engine (APE).
Required properties: Required properties:
- compatible: Must be "nvidia,tegra210-adma". - compatible: Should contain one of the following:
- "nvidia,tegra210-adma": for Tegra210
- "nvidia,tegra186-adma": for Tegra186 and Tegra194
- reg: Should contain DMA registers location and length. This should be - reg: Should contain DMA registers location and length. This should be
a single entry that includes all of the per-channel registers in one a single entry that includes all of the per-channel registers in one
contiguous bank. contiguous bank.
......
...@@ -99,7 +99,7 @@ config AT_XDMAC ...@@ -99,7 +99,7 @@ config AT_XDMAC
config AXI_DMAC config AXI_DMAC
tristate "Analog Devices AXI-DMAC DMA support" tristate "Analog Devices AXI-DMAC DMA support"
depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_SOCFPGA || COMPILE_TEST depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_SOCFPGA || COMPILE_TEST
select DMA_ENGINE select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS select DMA_VIRTUAL_CHANNELS
help help
......
...@@ -254,6 +254,7 @@ enum pl08x_dma_chan_state { ...@@ -254,6 +254,7 @@ enum pl08x_dma_chan_state {
* @slave: whether this channel is a device (slave) or for memcpy * @slave: whether this channel is a device (slave) or for memcpy
* @signal: the physical DMA request signal which this channel is using * @signal: the physical DMA request signal which this channel is using
* @mux_use: count of descriptors using this DMA request signal setting * @mux_use: count of descriptors using this DMA request signal setting
* @waiting_at: time in jiffies when this channel moved to waiting state
*/ */
struct pl08x_dma_chan { struct pl08x_dma_chan {
struct virt_dma_chan vc; struct virt_dma_chan vc;
...@@ -267,6 +268,7 @@ struct pl08x_dma_chan { ...@@ -267,6 +268,7 @@ struct pl08x_dma_chan {
bool slave; bool slave;
int signal; int signal;
unsigned mux_use; unsigned mux_use;
unsigned long waiting_at;
}; };
/** /**
...@@ -875,6 +877,7 @@ static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan) ...@@ -875,6 +877,7 @@ static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan)
if (!ch) { if (!ch) {
dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
plchan->state = PL08X_CHAN_WAITING; plchan->state = PL08X_CHAN_WAITING;
plchan->waiting_at = jiffies;
return; return;
} }
...@@ -913,22 +916,29 @@ static void pl08x_phy_free(struct pl08x_dma_chan *plchan) ...@@ -913,22 +916,29 @@ static void pl08x_phy_free(struct pl08x_dma_chan *plchan)
{ {
struct pl08x_driver_data *pl08x = plchan->host; struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_dma_chan *p, *next; struct pl08x_dma_chan *p, *next;
unsigned long waiting_at;
retry: retry:
next = NULL; next = NULL;
waiting_at = jiffies;
/* Find a waiting virtual channel for the next transfer. */ /*
* Find a waiting virtual channel for the next transfer.
* To be fair, time when each channel reached waiting state is compared
* to select channel that is waiting for the longest time.
*/
list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node) list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node)
if (p->state == PL08X_CHAN_WAITING) { if (p->state == PL08X_CHAN_WAITING &&
p->waiting_at <= waiting_at) {
next = p; next = p;
break; waiting_at = p->waiting_at;
} }
if (!next && pl08x->has_slave) { if (!next && pl08x->has_slave) {
list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node) list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node)
if (p->state == PL08X_CHAN_WAITING) { if (p->state == PL08X_CHAN_WAITING &&
p->waiting_at <= waiting_at) {
next = p; next = p;
break; waiting_at = p->waiting_at;
} }
} }
......
...@@ -308,6 +308,11 @@ static inline int at_xdmac_csize(u32 maxburst) ...@@ -308,6 +308,11 @@ static inline int at_xdmac_csize(u32 maxburst)
return csize; return csize;
}; };
static inline bool at_xdmac_chan_is_peripheral_xfer(u32 cfg)
{
return cfg & AT_XDMAC_CC_TYPE_PER_TRAN;
}
static inline u8 at_xdmac_get_dwidth(u32 cfg) static inline u8 at_xdmac_get_dwidth(u32 cfg)
{ {
return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET; return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
...@@ -389,7 +394,13 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan, ...@@ -389,7 +394,13 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff); at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE | AT_XDMAC_CIE_ROIE; reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE;
/*
* Request Overflow Error is only for peripheral synchronized transfers
*/
if (at_xdmac_chan_is_peripheral_xfer(first->lld.mbr_cfg))
reg |= AT_XDMAC_CIE_ROIE;
/* /*
* There is no end of list when doing cyclic dma, we need to get * There is no end of list when doing cyclic dma, we need to get
* an interrupt after each periods. * an interrupt after each periods.
...@@ -1575,6 +1586,46 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) ...@@ -1575,6 +1586,46 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
dmaengine_desc_get_callback_invoke(txd, NULL); dmaengine_desc_get_callback_invoke(txd, NULL);
} }
static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
{
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
struct at_xdmac_desc *bad_desc;
/*
* The descriptor currently at the head of the active list is
* broken. Since we don't have any way to report errors, we'll
* just have to scream loudly and try to continue with other
* descriptors queued (if any).
*/
if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
dev_err(chan2dev(&atchan->chan), "read bus error!!!");
if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
dev_err(chan2dev(&atchan->chan), "write bus error!!!");
if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
spin_lock_bh(&atchan->lock);
/* Channel must be disabled first as it's not done automatically */
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
cpu_relax();
bad_desc = list_first_entry(&atchan->xfers_list,
struct at_xdmac_desc,
xfer_node);
spin_unlock_bh(&atchan->lock);
/* Print bad descriptor's details if needed */
dev_dbg(chan2dev(&atchan->chan),
"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
__func__, &bad_desc->lld.mbr_sa, &bad_desc->lld.mbr_da,
bad_desc->lld.mbr_ubc);
/* Then continue with usual descriptor management */
}
static void at_xdmac_tasklet(unsigned long data) static void at_xdmac_tasklet(unsigned long data)
{ {
struct at_xdmac_chan *atchan = (struct at_xdmac_chan *)data; struct at_xdmac_chan *atchan = (struct at_xdmac_chan *)data;
...@@ -1594,19 +1645,19 @@ static void at_xdmac_tasklet(unsigned long data) ...@@ -1594,19 +1645,19 @@ static void at_xdmac_tasklet(unsigned long data)
|| (atchan->irq_status & error_mask)) { || (atchan->irq_status & error_mask)) {
struct dma_async_tx_descriptor *txd; struct dma_async_tx_descriptor *txd;
if (atchan->irq_status & AT_XDMAC_CIS_RBEIS) if (atchan->irq_status & error_mask)
dev_err(chan2dev(&atchan->chan), "read bus error!!!"); at_xdmac_handle_error(atchan);
if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
dev_err(chan2dev(&atchan->chan), "write bus error!!!");
if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
spin_lock(&atchan->lock); spin_lock(&atchan->lock);
desc = list_first_entry(&atchan->xfers_list, desc = list_first_entry(&atchan->xfers_list,
struct at_xdmac_desc, struct at_xdmac_desc,
xfer_node); xfer_node);
dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
BUG_ON(!desc->active_xfer); if (!desc->active_xfer) {
dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
spin_unlock(&atchan->lock);
return;
}
txd = &desc->tx_dma_desc; txd = &desc->tx_dma_desc;
......
...@@ -1459,8 +1459,7 @@ static void sba_receive_message(struct mbox_client *cl, void *msg) ...@@ -1459,8 +1459,7 @@ static void sba_receive_message(struct mbox_client *cl, void *msg)
static int sba_debugfs_stats_show(struct seq_file *file, void *offset) static int sba_debugfs_stats_show(struct seq_file *file, void *offset)
{ {
struct platform_device *pdev = to_platform_device(file->private); struct sba_device *sba = dev_get_drvdata(file->private);
struct sba_device *sba = platform_get_drvdata(pdev);
/* Write stats in file */ /* Write stats in file */
sba_write_stats_in_seqfile(sba, file); sba_write_stats_in_seqfile(sba, file);
......
...@@ -891,7 +891,6 @@ static int bcm2835_dma_probe(struct platform_device *pdev) ...@@ -891,7 +891,6 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask); dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask);
dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask); dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask);
od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources; od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources;
od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources; od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources;
......
...@@ -166,7 +166,7 @@ static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan) ...@@ -166,7 +166,7 @@ static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan)
static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len) static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len)
{ {
if (len == 0 || len > chan->max_length) if (len == 0)
return false; return false;
if ((len & chan->align_mask) != 0) /* Not aligned */ if ((len & chan->align_mask) != 0) /* Not aligned */
return false; return false;
...@@ -379,6 +379,49 @@ static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs) ...@@ -379,6 +379,49 @@ static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
return desc; return desc;
} }
static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
enum dma_transfer_direction direction, dma_addr_t addr,
unsigned int num_periods, unsigned int period_len,
struct axi_dmac_sg *sg)
{
unsigned int num_segments, i;
unsigned int segment_size;
unsigned int len;
/* Split into multiple equally sized segments if necessary */
num_segments = DIV_ROUND_UP(period_len, chan->max_length);
segment_size = DIV_ROUND_UP(period_len, num_segments);
/* Take care of alignment */
segment_size = ((segment_size - 1) | chan->align_mask) + 1;
for (i = 0; i < num_periods; i++) {
len = period_len;
while (len > segment_size) {
if (direction == DMA_DEV_TO_MEM)
sg->dest_addr = addr;
else
sg->src_addr = addr;
sg->x_len = segment_size;
sg->y_len = 1;
sg++;
addr += segment_size;
len -= segment_size;
}
if (direction == DMA_DEV_TO_MEM)
sg->dest_addr = addr;
else
sg->src_addr = addr;
sg->x_len = len;
sg->y_len = 1;
sg++;
addr += len;
}
return sg;
}
static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg( static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
struct dma_chan *c, struct scatterlist *sgl, struct dma_chan *c, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction, unsigned int sg_len, enum dma_transfer_direction direction,
...@@ -386,16 +429,24 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg( ...@@ -386,16 +429,24 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
{ {
struct axi_dmac_chan *chan = to_axi_dmac_chan(c); struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
struct axi_dmac_desc *desc; struct axi_dmac_desc *desc;
struct axi_dmac_sg *dsg;
struct scatterlist *sg; struct scatterlist *sg;
unsigned int num_sgs;
unsigned int i; unsigned int i;
if (direction != chan->direction) if (direction != chan->direction)
return NULL; return NULL;
desc = axi_dmac_alloc_desc(sg_len); num_sgs = 0;
for_each_sg(sgl, sg, sg_len, i)
num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length);
desc = axi_dmac_alloc_desc(num_sgs);
if (!desc) if (!desc)
return NULL; return NULL;
dsg = desc->sg;
for_each_sg(sgl, sg, sg_len, i) { for_each_sg(sgl, sg, sg_len, i) {
if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) || if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) ||
!axi_dmac_check_len(chan, sg_dma_len(sg))) { !axi_dmac_check_len(chan, sg_dma_len(sg))) {
...@@ -403,12 +454,8 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg( ...@@ -403,12 +454,8 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
return NULL; return NULL;
} }
if (direction == DMA_DEV_TO_MEM) dsg = axi_dmac_fill_linear_sg(chan, direction, sg_dma_address(sg), 1,
desc->sg[i].dest_addr = sg_dma_address(sg); sg_dma_len(sg), dsg);
else
desc->sg[i].src_addr = sg_dma_address(sg);
desc->sg[i].x_len = sg_dma_len(sg);
desc->sg[i].y_len = 1;
} }
desc->cyclic = false; desc->cyclic = false;
...@@ -423,7 +470,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic( ...@@ -423,7 +470,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
{ {
struct axi_dmac_chan *chan = to_axi_dmac_chan(c); struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
struct axi_dmac_desc *desc; struct axi_dmac_desc *desc;
unsigned int num_periods, i; unsigned int num_periods, num_segments;
if (direction != chan->direction) if (direction != chan->direction)
return NULL; return NULL;
...@@ -436,20 +483,14 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic( ...@@ -436,20 +483,14 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
return NULL; return NULL;
num_periods = buf_len / period_len; num_periods = buf_len / period_len;
num_segments = DIV_ROUND_UP(period_len, chan->max_length);
desc = axi_dmac_alloc_desc(num_periods); desc = axi_dmac_alloc_desc(num_periods * num_segments);
if (!desc) if (!desc)
return NULL; return NULL;
for (i = 0; i < num_periods; i++) { axi_dmac_fill_linear_sg(chan, direction, buf_addr, num_periods,
if (direction == DMA_DEV_TO_MEM) period_len, desc->sg);
desc->sg[i].dest_addr = buf_addr;
else
desc->sg[i].src_addr = buf_addr;
desc->sg[i].x_len = period_len;
desc->sg[i].y_len = 1;
buf_addr += period_len;
}
desc->cyclic = true; desc->cyclic = true;
...@@ -485,7 +526,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved( ...@@ -485,7 +526,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
if (chan->hw_2d) { if (chan->hw_2d) {
if (!axi_dmac_check_len(chan, xt->sgl[0].size) || if (!axi_dmac_check_len(chan, xt->sgl[0].size) ||
!axi_dmac_check_len(chan, xt->numf)) xt->numf == 0)
return NULL; return NULL;
if (xt->sgl[0].size + dst_icg > chan->max_length || if (xt->sgl[0].size + dst_icg > chan->max_length ||
xt->sgl[0].size + src_icg > chan->max_length) xt->sgl[0].size + src_icg > chan->max_length)
...@@ -577,15 +618,6 @@ static int axi_dmac_parse_chan_dt(struct device_node *of_chan, ...@@ -577,15 +618,6 @@ static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
return ret; return ret;
chan->dest_width = val / 8; chan->dest_width = val / 8;
ret = of_property_read_u32(of_chan, "adi,length-width", &val);
if (ret)
return ret;
if (val >= 32)
chan->max_length = UINT_MAX;
else
chan->max_length = (1ULL << val) - 1;
chan->align_mask = max(chan->dest_width, chan->src_width) - 1; chan->align_mask = max(chan->dest_width, chan->src_width) - 1;
if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan)) if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
...@@ -597,12 +629,27 @@ static int axi_dmac_parse_chan_dt(struct device_node *of_chan, ...@@ -597,12 +629,27 @@ static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
else else
chan->direction = DMA_DEV_TO_DEV; chan->direction = DMA_DEV_TO_DEV;
chan->hw_cyclic = of_property_read_bool(of_chan, "adi,cyclic");
chan->hw_2d = of_property_read_bool(of_chan, "adi,2d");
return 0; return 0;
} }
static void axi_dmac_detect_caps(struct axi_dmac *dmac)
{
struct axi_dmac_chan *chan = &dmac->chan;
axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC);
if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC)
chan->hw_cyclic = true;
axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, 1);
if (axi_dmac_read(dmac, AXI_DMAC_REG_Y_LENGTH) == 1)
chan->hw_2d = true;
axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0xffffffff);
chan->max_length = axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
if (chan->max_length != UINT_MAX)
chan->max_length++;
}
static int axi_dmac_probe(struct platform_device *pdev) static int axi_dmac_probe(struct platform_device *pdev)
{ {
struct device_node *of_channels, *of_chan; struct device_node *of_channels, *of_chan;
...@@ -647,11 +694,12 @@ static int axi_dmac_probe(struct platform_device *pdev) ...@@ -647,11 +694,12 @@ static int axi_dmac_probe(struct platform_device *pdev)
of_node_put(of_channels); of_node_put(of_channels);
pdev->dev.dma_parms = &dmac->dma_parms; pdev->dev.dma_parms = &dmac->dma_parms;
dma_set_max_seg_size(&pdev->dev, dmac->chan.max_length); dma_set_max_seg_size(&pdev->dev, UINT_MAX);
dma_dev = &dmac->dma_dev; dma_dev = &dmac->dma_dev;
dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask); dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
dma_cap_set(DMA_INTERLEAVE, dma_dev->cap_mask);
dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources; dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources;
dma_dev->device_tx_status = dma_cookie_status; dma_dev->device_tx_status = dma_cookie_status;
dma_dev->device_issue_pending = axi_dmac_issue_pending; dma_dev->device_issue_pending = axi_dmac_issue_pending;
...@@ -675,6 +723,8 @@ static int axi_dmac_probe(struct platform_device *pdev) ...@@ -675,6 +723,8 @@ static int axi_dmac_probe(struct platform_device *pdev)
if (ret < 0) if (ret < 0)
return ret; return ret;
axi_dmac_detect_caps(dmac);
axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00); axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
ret = dma_async_device_register(dma_dev); ret = dma_async_device_register(dma_dev);
......
...@@ -136,7 +136,7 @@ struct fsl_edma_desc { ...@@ -136,7 +136,7 @@ struct fsl_edma_desc {
}; };
enum edma_version { enum edma_version {
v1, /* 32ch, Vybdir, mpc57x, etc */ v1, /* 32ch, Vybrid, mpc57x, etc */
v2, /* 64ch Coldfire */ v2, /* 64ch Coldfire */
}; };
......
...@@ -19,10 +19,9 @@ ...@@ -19,10 +19,9 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/slab.h> #include <linux/slab.h>
#include "idma64.h" #include <linux/dma/idma64.h>
/* Platform driver name */ #include "idma64.h"
#define DRV_NAME "idma64"
/* For now we support only two channels */ /* For now we support only two channels */
#define IDMA64_NR_CHAN 2 #define IDMA64_NR_CHAN 2
...@@ -592,7 +591,7 @@ static int idma64_probe(struct idma64_chip *chip) ...@@ -592,7 +591,7 @@ static int idma64_probe(struct idma64_chip *chip)
idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
idma64->dma.dev = chip->dev; idma64->dma.dev = chip->sysdev;
dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK); dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
...@@ -632,6 +631,7 @@ static int idma64_platform_probe(struct platform_device *pdev) ...@@ -632,6 +631,7 @@ static int idma64_platform_probe(struct platform_device *pdev)
{ {
struct idma64_chip *chip; struct idma64_chip *chip;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct device *sysdev = dev->parent;
struct resource *mem; struct resource *mem;
int ret; int ret;
...@@ -648,11 +648,12 @@ static int idma64_platform_probe(struct platform_device *pdev) ...@@ -648,11 +648,12 @@ static int idma64_platform_probe(struct platform_device *pdev)
if (IS_ERR(chip->regs)) if (IS_ERR(chip->regs))
return PTR_ERR(chip->regs); return PTR_ERR(chip->regs);
ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); ret = dma_coerce_mask_and_coherent(sysdev, DMA_BIT_MASK(64));
if (ret) if (ret)
return ret; return ret;
chip->dev = dev; chip->dev = dev;
chip->sysdev = sysdev;
ret = idma64_probe(chip); ret = idma64_probe(chip);
if (ret) if (ret)
...@@ -697,7 +698,7 @@ static struct platform_driver idma64_platform_driver = { ...@@ -697,7 +698,7 @@ static struct platform_driver idma64_platform_driver = {
.probe = idma64_platform_probe, .probe = idma64_platform_probe,
.remove = idma64_platform_remove, .remove = idma64_platform_remove,
.driver = { .driver = {
.name = DRV_NAME, .name = LPSS_IDMA64_DRIVER_NAME,
.pm = &idma64_dev_pm_ops, .pm = &idma64_dev_pm_ops,
}, },
}; };
...@@ -707,4 +708,4 @@ module_platform_driver(idma64_platform_driver); ...@@ -707,4 +708,4 @@ module_platform_driver(idma64_platform_driver);
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("iDMA64 core driver"); MODULE_DESCRIPTION("iDMA64 core driver");
MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>"); MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
MODULE_ALIAS("platform:" DRV_NAME); MODULE_ALIAS("platform:" LPSS_IDMA64_DRIVER_NAME);
...@@ -216,12 +216,14 @@ static inline void idma64_writel(struct idma64 *idma64, int offset, u32 value) ...@@ -216,12 +216,14 @@ static inline void idma64_writel(struct idma64 *idma64, int offset, u32 value)
/** /**
* struct idma64_chip - representation of iDMA 64-bit controller hardware * struct idma64_chip - representation of iDMA 64-bit controller hardware
* @dev: struct device of the DMA controller * @dev: struct device of the DMA controller
* @sysdev: struct device of the physical device that does DMA
* @irq: irq line * @irq: irq line
* @regs: memory mapped I/O space * @regs: memory mapped I/O space
* @idma64: struct idma64 that is filed by idma64_probe() * @idma64: struct idma64 that is filed by idma64_probe()
*/ */
struct idma64_chip { struct idma64_chip {
struct device *dev; struct device *dev;
struct device *sysdev;
int irq; int irq;
void __iomem *regs; void __iomem *regs;
struct idma64 *idma64; struct idma64 *idma64;
......
...@@ -419,6 +419,7 @@ struct sdma_driver_data { ...@@ -419,6 +419,7 @@ struct sdma_driver_data {
int chnenbl0; int chnenbl0;
int num_events; int num_events;
struct sdma_script_start_addrs *script_addrs; struct sdma_script_start_addrs *script_addrs;
bool check_ratio;
}; };
struct sdma_engine { struct sdma_engine {
...@@ -557,6 +558,13 @@ static struct sdma_driver_data sdma_imx7d = { ...@@ -557,6 +558,13 @@ static struct sdma_driver_data sdma_imx7d = {
.script_addrs = &sdma_script_imx7d, .script_addrs = &sdma_script_imx7d,
}; };
static struct sdma_driver_data sdma_imx8mq = {
.chnenbl0 = SDMA_CHNENBL0_IMX35,
.num_events = 48,
.script_addrs = &sdma_script_imx7d,
.check_ratio = 1,
};
static const struct platform_device_id sdma_devtypes[] = { static const struct platform_device_id sdma_devtypes[] = {
{ {
.name = "imx25-sdma", .name = "imx25-sdma",
...@@ -579,6 +587,9 @@ static const struct platform_device_id sdma_devtypes[] = { ...@@ -579,6 +587,9 @@ static const struct platform_device_id sdma_devtypes[] = {
}, { }, {
.name = "imx7d-sdma", .name = "imx7d-sdma",
.driver_data = (unsigned long)&sdma_imx7d, .driver_data = (unsigned long)&sdma_imx7d,
}, {
.name = "imx8mq-sdma",
.driver_data = (unsigned long)&sdma_imx8mq,
}, { }, {
/* sentinel */ /* sentinel */
} }
...@@ -593,6 +604,7 @@ static const struct of_device_id sdma_dt_ids[] = { ...@@ -593,6 +604,7 @@ static const struct of_device_id sdma_dt_ids[] = {
{ .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, }, { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
{ .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, }, { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
{ .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, }, { .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, },
{ .compatible = "fsl,imx8mq-sdma", .data = &sdma_imx8mq, },
{ /* sentinel */ } { /* sentinel */ }
}; };
MODULE_DEVICE_TABLE(of, sdma_dt_ids); MODULE_DEVICE_TABLE(of, sdma_dt_ids);
...@@ -1852,7 +1864,8 @@ static int sdma_init(struct sdma_engine *sdma) ...@@ -1852,7 +1864,8 @@ static int sdma_init(struct sdma_engine *sdma)
if (ret) if (ret)
goto disable_clk_ipg; goto disable_clk_ipg;
if (clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg)) if (sdma->drvdata->check_ratio &&
(clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg)))
sdma->clk_ratio = 1; sdma->clk_ratio = 1;
/* Be sure SDMA has not started yet */ /* Be sure SDMA has not started yet */
......
...@@ -1491,14 +1491,14 @@ MODULE_DEVICE_TABLE(platform, nbpf_ids); ...@@ -1491,14 +1491,14 @@ MODULE_DEVICE_TABLE(platform, nbpf_ids);
#ifdef CONFIG_PM #ifdef CONFIG_PM
static int nbpf_runtime_suspend(struct device *dev) static int nbpf_runtime_suspend(struct device *dev)
{ {
struct nbpf_device *nbpf = platform_get_drvdata(to_platform_device(dev)); struct nbpf_device *nbpf = dev_get_drvdata(dev);
clk_disable_unprepare(nbpf->clk); clk_disable_unprepare(nbpf->clk);
return 0; return 0;
} }
static int nbpf_runtime_resume(struct device *dev) static int nbpf_runtime_resume(struct device *dev)
{ {
struct nbpf_device *nbpf = platform_get_drvdata(to_platform_device(dev)); struct nbpf_device *nbpf = dev_get_drvdata(dev);
return clk_prepare_enable(nbpf->clk); return clk_prepare_enable(nbpf->clk);
} }
#endif #endif
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
* (at your option) any later version. * (at your option) any later version.
*/ */
#include <linux/debugfs.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -966,6 +967,7 @@ static void _stop(struct pl330_thread *thrd) ...@@ -966,6 +967,7 @@ static void _stop(struct pl330_thread *thrd)
{ {
void __iomem *regs = thrd->dmac->base; void __iomem *regs = thrd->dmac->base;
u8 insn[6] = {0, 0, 0, 0, 0, 0}; u8 insn[6] = {0, 0, 0, 0, 0, 0};
u32 inten = readl(regs + INTEN);
if (_state(thrd) == PL330_STATE_FAULT_COMPLETING) if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING); UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
...@@ -978,10 +980,13 @@ static void _stop(struct pl330_thread *thrd) ...@@ -978,10 +980,13 @@ static void _stop(struct pl330_thread *thrd)
_emit_KILL(0, insn); _emit_KILL(0, insn);
/* Stop generating interrupts for SEV */
writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN);
_execute_DBGINSN(thrd, insn, is_manager(thrd)); _execute_DBGINSN(thrd, insn, is_manager(thrd));
/* clear the event */
if (inten & (1 << thrd->ev))
writel(1 << thrd->ev, regs + INTCLR);
/* Stop generating interrupts for SEV */
writel(inten & ~(1 << thrd->ev), regs + INTEN);
} }
/* Start doing req 'idx' of thread 'thrd' */ /* Start doing req 'idx' of thread 'thrd' */
...@@ -2896,6 +2901,55 @@ static irqreturn_t pl330_irq_handler(int irq, void *data) ...@@ -2896,6 +2901,55 @@ static irqreturn_t pl330_irq_handler(int irq, void *data)
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
#ifdef CONFIG_DEBUG_FS
static int pl330_debugfs_show(struct seq_file *s, void *data)
{
struct pl330_dmac *pl330 = s->private;
int chans, pchs, ch, pr;
chans = pl330->pcfg.num_chan;
pchs = pl330->num_peripherals;
seq_puts(s, "PL330 physical channels:\n");
seq_puts(s, "THREAD:\t\tCHANNEL:\n");
seq_puts(s, "--------\t-----\n");
for (ch = 0; ch < chans; ch++) {
struct pl330_thread *thrd = &pl330->channels[ch];
int found = -1;
for (pr = 0; pr < pchs; pr++) {
struct dma_pl330_chan *pch = &pl330->peripherals[pr];
if (!pch->thread || thrd->id != pch->thread->id)
continue;
found = pr;
}
seq_printf(s, "%d\t\t", thrd->id);
if (found == -1)
seq_puts(s, "--\n");
else
seq_printf(s, "%d\n", found);
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(pl330_debugfs);
static inline void init_pl330_debugfs(struct pl330_dmac *pl330)
{
debugfs_create_file(dev_name(pl330->ddma.dev),
S_IFREG | 0444, NULL, pl330,
&pl330_debugfs_fops);
}
#else
static inline void init_pl330_debugfs(struct pl330_dmac *pl330)
{
}
#endif
/* /*
* Runtime PM callbacks are provided by amba/bus.c driver. * Runtime PM callbacks are provided by amba/bus.c driver.
* *
...@@ -3082,6 +3136,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) ...@@ -3082,6 +3136,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
dev_err(&adev->dev, "unable to set the seg size\n"); dev_err(&adev->dev, "unable to set the seg size\n");
init_pl330_debugfs(pl330);
dev_info(&adev->dev, dev_info(&adev->dev,
"Loaded driver for PL330 DMAC-%x\n", adev->periphid); "Loaded driver for PL330 DMAC-%x\n", adev->periphid);
dev_info(&adev->dev, dev_info(&adev->dev,
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* /*
* Renesas R-Car Gen2 DMA Controller Driver * Renesas R-Car Gen2/Gen3 DMA Controller Driver
* *
* Copyright (C) 2014 Renesas Electronics Inc. * Copyright (C) 2014-2019 Renesas Electronics Inc.
* *
* Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com> * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*/ */
......
...@@ -1042,33 +1042,97 @@ static u32 stm32_dma_get_remaining_bytes(struct stm32_dma_chan *chan) ...@@ -1042,33 +1042,97 @@ static u32 stm32_dma_get_remaining_bytes(struct stm32_dma_chan *chan)
return ndtr << width; return ndtr << width;
} }
/**
* stm32_dma_is_current_sg - check that expected sg_req is currently transferred
* @chan: dma channel
*
* This function called when IRQ are disable, checks that the hardware has not
* switched on the next transfer in double buffer mode. The test is done by
* comparing the next_sg memory address with the hardware related register
* (based on CT bit value).
*
* Returns true if expected current transfer is still running or double
* buffer mode is not activated.
*/
static bool stm32_dma_is_current_sg(struct stm32_dma_chan *chan)
{
struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
struct stm32_dma_sg_req *sg_req;
u32 dma_scr, dma_smar, id;
id = chan->id;
dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
if (!(dma_scr & STM32_DMA_SCR_DBM))
return true;
sg_req = &chan->desc->sg_req[chan->next_sg];
if (dma_scr & STM32_DMA_SCR_CT) {
dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(id));
return (dma_smar == sg_req->chan_reg.dma_sm0ar);
}
dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(id));
return (dma_smar == sg_req->chan_reg.dma_sm1ar);
}
static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan, static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
struct stm32_dma_desc *desc, struct stm32_dma_desc *desc,
u32 next_sg) u32 next_sg)
{ {
u32 modulo, burst_size; u32 modulo, burst_size;
u32 residue = 0; u32 residue;
u32 n_sg = next_sg;
struct stm32_dma_sg_req *sg_req = &chan->desc->sg_req[chan->next_sg];
int i; int i;
/* /*
* In cyclic mode, for the last period, residue = remaining bytes from * Calculate the residue means compute the descriptors
* NDTR * information:
* - the sg_req currently transferred
* - the Hardware remaining position in this sg (NDTR bits field).
*
* A race condition may occur if DMA is running in cyclic or double
* buffer mode, since the DMA register are automatically reloaded at end
* of period transfer. The hardware may have switched to the next
* transfer (CT bit updated) just before the position (SxNDTR reg) is
* read.
* In this case the SxNDTR reg could (or not) correspond to the new
* transfer position, and not the expected one.
* The strategy implemented in the stm32 driver is to:
* - read the SxNDTR register
* - crosscheck that hardware is still in current transfer.
* In case of switch, we can assume that the DMA is at the beginning of
* the next transfer. So we approximate the residue in consequence, by
* pointing on the beginning of next transfer.
*
* This race condition doesn't apply for none cyclic mode, as double
* buffer is not used. In such situation registers are updated by the
* software.
*/ */
if (chan->desc->cyclic && next_sg == 0) {
residue = stm32_dma_get_remaining_bytes(chan); residue = stm32_dma_get_remaining_bytes(chan);
goto end;
if (!stm32_dma_is_current_sg(chan)) {
n_sg++;
if (n_sg == chan->desc->num_sgs)
n_sg = 0;
residue = sg_req->len;
} }
/* /*
* For all other periods in cyclic mode, and in sg mode, * In cyclic mode, for the last period, residue = remaining bytes
* residue = remaining bytes from NDTR + remaining periods/sg to be * from NDTR,
* transferred * else for all other periods in cyclic mode, and in sg mode,
* residue = remaining bytes from NDTR + remaining
* periods/sg to be transferred
*/ */
for (i = next_sg; i < desc->num_sgs; i++) if (!chan->desc->cyclic || n_sg != 0)
for (i = n_sg; i < desc->num_sgs; i++)
residue += desc->sg_req[i].len; residue += desc->sg_req[i].len;
residue += stm32_dma_get_remaining_bytes(chan);
end:
if (!chan->mem_burst) if (!chan->mem_burst)
return residue; return residue;
...@@ -1302,13 +1366,16 @@ static int stm32_dma_probe(struct platform_device *pdev) ...@@ -1302,13 +1366,16 @@ static int stm32_dma_probe(struct platform_device *pdev)
for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) { for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
chan = &dmadev->chan[i]; chan = &dmadev->chan[i];
res = platform_get_resource(pdev, IORESOURCE_IRQ, i); chan->irq = platform_get_irq(pdev, i);
if (!res) { ret = platform_get_irq(pdev, i);
ret = -EINVAL; if (ret < 0) {
dev_err(&pdev->dev, "No irq resource for chan %d\n", i); if (ret != -EPROBE_DEFER)
dev_err(&pdev->dev,
"No irq resource for chan %d\n", i);
goto err_unregister; goto err_unregister;
} }
chan->irq = res->start; chan->irq = ret;
ret = devm_request_irq(&pdev->dev, chan->irq, ret = devm_request_irq(&pdev->dev, chan->irq,
stm32_dma_chan_irq, 0, stm32_dma_chan_irq, 0,
dev_name(chan2dev(chan)), chan); dev_name(chan2dev(chan)), chan);
......
...@@ -22,7 +22,6 @@ ...@@ -22,7 +22,6 @@
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/of_dma.h> #include <linux/of_dma.h>
#include <linux/of_irq.h> #include <linux/of_irq.h>
#include <linux/pm_clock.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -31,35 +30,33 @@ ...@@ -31,35 +30,33 @@
#define ADMA_CH_CMD 0x00 #define ADMA_CH_CMD 0x00
#define ADMA_CH_STATUS 0x0c #define ADMA_CH_STATUS 0x0c
#define ADMA_CH_STATUS_XFER_EN BIT(0) #define ADMA_CH_STATUS_XFER_EN BIT(0)
#define ADMA_CH_STATUS_XFER_PAUSED BIT(1)
#define ADMA_CH_INT_STATUS 0x10 #define ADMA_CH_INT_STATUS 0x10
#define ADMA_CH_INT_STATUS_XFER_DONE BIT(0) #define ADMA_CH_INT_STATUS_XFER_DONE BIT(0)
#define ADMA_CH_INT_CLEAR 0x1c #define ADMA_CH_INT_CLEAR 0x1c
#define ADMA_CH_CTRL 0x24 #define ADMA_CH_CTRL 0x24
#define ADMA_CH_CTRL_TX_REQ(val) (((val) & 0xf) << 28)
#define ADMA_CH_CTRL_TX_REQ_MAX 10
#define ADMA_CH_CTRL_RX_REQ(val) (((val) & 0xf) << 24)
#define ADMA_CH_CTRL_RX_REQ_MAX 10
#define ADMA_CH_CTRL_DIR(val) (((val) & 0xf) << 12) #define ADMA_CH_CTRL_DIR(val) (((val) & 0xf) << 12)
#define ADMA_CH_CTRL_DIR_AHUB2MEM 2 #define ADMA_CH_CTRL_DIR_AHUB2MEM 2
#define ADMA_CH_CTRL_DIR_MEM2AHUB 4 #define ADMA_CH_CTRL_DIR_MEM2AHUB 4
#define ADMA_CH_CTRL_MODE_CONTINUOUS (2 << 8) #define ADMA_CH_CTRL_MODE_CONTINUOUS (2 << 8)
#define ADMA_CH_CTRL_FLOWCTRL_EN BIT(1) #define ADMA_CH_CTRL_FLOWCTRL_EN BIT(1)
#define ADMA_CH_CTRL_XFER_PAUSE_SHIFT 0
#define ADMA_CH_CONFIG 0x28 #define ADMA_CH_CONFIG 0x28
#define ADMA_CH_CONFIG_SRC_BUF(val) (((val) & 0x7) << 28) #define ADMA_CH_CONFIG_SRC_BUF(val) (((val) & 0x7) << 28)
#define ADMA_CH_CONFIG_TRG_BUF(val) (((val) & 0x7) << 24) #define ADMA_CH_CONFIG_TRG_BUF(val) (((val) & 0x7) << 24)
#define ADMA_CH_CONFIG_BURST_SIZE(val) (((val) & 0x7) << 20) #define ADMA_CH_CONFIG_BURST_SIZE_SHIFT 20
#define ADMA_CH_CONFIG_BURST_16 5 #define ADMA_CH_CONFIG_MAX_BURST_SIZE 16
#define ADMA_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0xf) #define ADMA_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0xf)
#define ADMA_CH_CONFIG_MAX_BUFS 8 #define ADMA_CH_CONFIG_MAX_BUFS 8
#define ADMA_CH_FIFO_CTRL 0x2c #define ADMA_CH_FIFO_CTRL 0x2c
#define ADMA_CH_FIFO_CTRL_OVRFW_THRES(val) (((val) & 0xf) << 24) #define ADMA_CH_FIFO_CTRL_OVRFW_THRES(val) (((val) & 0xf) << 24)
#define ADMA_CH_FIFO_CTRL_STARV_THRES(val) (((val) & 0xf) << 16) #define ADMA_CH_FIFO_CTRL_STARV_THRES(val) (((val) & 0xf) << 16)
#define ADMA_CH_FIFO_CTRL_TX_SIZE(val) (((val) & 0xf) << 8) #define ADMA_CH_FIFO_CTRL_TX_FIFO_SIZE_SHIFT 8
#define ADMA_CH_FIFO_CTRL_RX_SIZE(val) ((val) & 0xf) #define ADMA_CH_FIFO_CTRL_RX_FIFO_SIZE_SHIFT 0
#define ADMA_CH_LOWER_SRC_ADDR 0x34 #define ADMA_CH_LOWER_SRC_ADDR 0x34
#define ADMA_CH_LOWER_TRG_ADDR 0x3c #define ADMA_CH_LOWER_TRG_ADDR 0x3c
...@@ -69,25 +66,41 @@ ...@@ -69,25 +66,41 @@
#define ADMA_CH_XFER_STATUS 0x54 #define ADMA_CH_XFER_STATUS 0x54
#define ADMA_CH_XFER_STATUS_COUNT_MASK 0xffff #define ADMA_CH_XFER_STATUS_COUNT_MASK 0xffff
#define ADMA_GLOBAL_CMD 0xc00 #define ADMA_GLOBAL_CMD 0x00
#define ADMA_GLOBAL_SOFT_RESET 0xc04 #define ADMA_GLOBAL_SOFT_RESET 0x04
#define ADMA_GLOBAL_INT_CLEAR 0xc20
#define ADMA_GLOBAL_CTRL 0xc24
#define ADMA_CH_REG_OFFSET(a) (a * 0x80) #define TEGRA_ADMA_BURST_COMPLETE_TIME 20
#define ADMA_CH_FIFO_CTRL_DEFAULT (ADMA_CH_FIFO_CTRL_OVRFW_THRES(1) | \ #define ADMA_CH_FIFO_CTRL_DEFAULT (ADMA_CH_FIFO_CTRL_OVRFW_THRES(1) | \
ADMA_CH_FIFO_CTRL_STARV_THRES(1) | \ ADMA_CH_FIFO_CTRL_STARV_THRES(1))
ADMA_CH_FIFO_CTRL_TX_SIZE(3) | \
ADMA_CH_FIFO_CTRL_RX_SIZE(3)) #define ADMA_CH_REG_FIELD_VAL(val, mask, shift) (((val) & mask) << shift)
struct tegra_adma; struct tegra_adma;
/* /*
* struct tegra_adma_chip_data - Tegra chip specific data * struct tegra_adma_chip_data - Tegra chip specific data
* @global_reg_offset: Register offset of DMA global register.
* @global_int_clear: Register offset of DMA global interrupt clear.
* @ch_req_tx_shift: Register offset for AHUB transmit channel select.
* @ch_req_rx_shift: Register offset for AHUB receive channel select.
* @ch_base_offset: Reister offset of DMA channel registers.
* @ch_req_mask: Mask for Tx or Rx channel select.
* @ch_req_max: Maximum number of Tx or Rx channels available.
* @ch_reg_size: Size of DMA channel register space.
* @nr_channels: Number of DMA channels available. * @nr_channels: Number of DMA channels available.
*/ */
struct tegra_adma_chip_data { struct tegra_adma_chip_data {
int nr_channels; unsigned int (*adma_get_burst_config)(unsigned int burst_size);
unsigned int global_reg_offset;
unsigned int global_int_clear;
unsigned int ch_req_tx_shift;
unsigned int ch_req_rx_shift;
unsigned int ch_base_offset;
unsigned int ch_req_mask;
unsigned int ch_req_max;
unsigned int ch_reg_size;
unsigned int nr_channels;
}; };
/* /*
...@@ -99,6 +112,7 @@ struct tegra_adma_chan_regs { ...@@ -99,6 +112,7 @@ struct tegra_adma_chan_regs {
unsigned int src_addr; unsigned int src_addr;
unsigned int trg_addr; unsigned int trg_addr;
unsigned int fifo_ctrl; unsigned int fifo_ctrl;
unsigned int cmd;
unsigned int tc; unsigned int tc;
}; };
...@@ -128,6 +142,7 @@ struct tegra_adma_chan { ...@@ -128,6 +142,7 @@ struct tegra_adma_chan {
enum dma_transfer_direction sreq_dir; enum dma_transfer_direction sreq_dir;
unsigned int sreq_index; unsigned int sreq_index;
bool sreq_reserved; bool sreq_reserved;
struct tegra_adma_chan_regs ch_regs;
/* Transfer count and position info */ /* Transfer count and position info */
unsigned int tx_buf_count; unsigned int tx_buf_count;
...@@ -141,6 +156,7 @@ struct tegra_adma { ...@@ -141,6 +156,7 @@ struct tegra_adma {
struct dma_device dma_dev; struct dma_device dma_dev;
struct device *dev; struct device *dev;
void __iomem *base_addr; void __iomem *base_addr;
struct clk *ahub_clk;
unsigned int nr_channels; unsigned int nr_channels;
unsigned long rx_requests_reserved; unsigned long rx_requests_reserved;
unsigned long tx_requests_reserved; unsigned long tx_requests_reserved;
...@@ -148,18 +164,20 @@ struct tegra_adma { ...@@ -148,18 +164,20 @@ struct tegra_adma {
/* Used to store global command register state when suspending */ /* Used to store global command register state when suspending */
unsigned int global_cmd; unsigned int global_cmd;
const struct tegra_adma_chip_data *cdata;
/* Last member of the structure */ /* Last member of the structure */
struct tegra_adma_chan channels[0]; struct tegra_adma_chan channels[0];
}; };
static inline void tdma_write(struct tegra_adma *tdma, u32 reg, u32 val) static inline void tdma_write(struct tegra_adma *tdma, u32 reg, u32 val)
{ {
writel(val, tdma->base_addr + reg); writel(val, tdma->base_addr + tdma->cdata->global_reg_offset + reg);
} }
static inline u32 tdma_read(struct tegra_adma *tdma, u32 reg) static inline u32 tdma_read(struct tegra_adma *tdma, u32 reg)
{ {
return readl(tdma->base_addr + reg); return readl(tdma->base_addr + tdma->cdata->global_reg_offset + reg);
} }
static inline void tdma_ch_write(struct tegra_adma_chan *tdc, u32 reg, u32 val) static inline void tdma_ch_write(struct tegra_adma_chan *tdc, u32 reg, u32 val)
...@@ -209,14 +227,16 @@ static int tegra_adma_init(struct tegra_adma *tdma) ...@@ -209,14 +227,16 @@ static int tegra_adma_init(struct tegra_adma *tdma)
int ret; int ret;
/* Clear any interrupts */ /* Clear any interrupts */
tdma_write(tdma, ADMA_GLOBAL_INT_CLEAR, 0x1); tdma_write(tdma, tdma->cdata->global_int_clear, 0x1);
/* Assert soft reset */ /* Assert soft reset */
tdma_write(tdma, ADMA_GLOBAL_SOFT_RESET, 0x1); tdma_write(tdma, ADMA_GLOBAL_SOFT_RESET, 0x1);
/* Wait for reset to clear */ /* Wait for reset to clear */
ret = readx_poll_timeout(readl, ret = readx_poll_timeout(readl,
tdma->base_addr + ADMA_GLOBAL_SOFT_RESET, tdma->base_addr +
tdma->cdata->global_reg_offset +
ADMA_GLOBAL_SOFT_RESET,
status, status == 0, 20, 10000); status, status == 0, 20, 10000);
if (ret) if (ret)
return ret; return ret;
...@@ -236,13 +256,13 @@ static int tegra_adma_request_alloc(struct tegra_adma_chan *tdc, ...@@ -236,13 +256,13 @@ static int tegra_adma_request_alloc(struct tegra_adma_chan *tdc,
if (tdc->sreq_reserved) if (tdc->sreq_reserved)
return tdc->sreq_dir == direction ? 0 : -EINVAL; return tdc->sreq_dir == direction ? 0 : -EINVAL;
switch (direction) { if (sreq_index > tdma->cdata->ch_req_max) {
case DMA_MEM_TO_DEV:
if (sreq_index > ADMA_CH_CTRL_TX_REQ_MAX) {
dev_err(tdma->dev, "invalid DMA request\n"); dev_err(tdma->dev, "invalid DMA request\n");
return -EINVAL; return -EINVAL;
} }
switch (direction) {
case DMA_MEM_TO_DEV:
if (test_and_set_bit(sreq_index, &tdma->tx_requests_reserved)) { if (test_and_set_bit(sreq_index, &tdma->tx_requests_reserved)) {
dev_err(tdma->dev, "DMA request reserved\n"); dev_err(tdma->dev, "DMA request reserved\n");
return -EINVAL; return -EINVAL;
...@@ -250,11 +270,6 @@ static int tegra_adma_request_alloc(struct tegra_adma_chan *tdc, ...@@ -250,11 +270,6 @@ static int tegra_adma_request_alloc(struct tegra_adma_chan *tdc,
break; break;
case DMA_DEV_TO_MEM: case DMA_DEV_TO_MEM:
if (sreq_index > ADMA_CH_CTRL_RX_REQ_MAX) {
dev_err(tdma->dev, "invalid DMA request\n");
return -EINVAL;
}
if (test_and_set_bit(sreq_index, &tdma->rx_requests_reserved)) { if (test_and_set_bit(sreq_index, &tdma->rx_requests_reserved)) {
dev_err(tdma->dev, "DMA request reserved\n"); dev_err(tdma->dev, "DMA request reserved\n");
return -EINVAL; return -EINVAL;
...@@ -428,6 +443,51 @@ static void tegra_adma_issue_pending(struct dma_chan *dc) ...@@ -428,6 +443,51 @@ static void tegra_adma_issue_pending(struct dma_chan *dc)
spin_unlock_irqrestore(&tdc->vc.lock, flags); spin_unlock_irqrestore(&tdc->vc.lock, flags);
} }
static bool tegra_adma_is_paused(struct tegra_adma_chan *tdc)
{
u32 csts;
csts = tdma_ch_read(tdc, ADMA_CH_STATUS);
csts &= ADMA_CH_STATUS_XFER_PAUSED;
return csts ? true : false;
}
static int tegra_adma_pause(struct dma_chan *dc)
{
struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc);
struct tegra_adma_desc *desc = tdc->desc;
struct tegra_adma_chan_regs *ch_regs = &desc->ch_regs;
int dcnt = 10;
ch_regs->ctrl = tdma_ch_read(tdc, ADMA_CH_CTRL);
ch_regs->ctrl |= (1 << ADMA_CH_CTRL_XFER_PAUSE_SHIFT);
tdma_ch_write(tdc, ADMA_CH_CTRL, ch_regs->ctrl);
while (dcnt-- && !tegra_adma_is_paused(tdc))
udelay(TEGRA_ADMA_BURST_COMPLETE_TIME);
if (dcnt < 0) {
dev_err(tdc2dev(tdc), "unable to pause DMA channel\n");
return -EBUSY;
}
return 0;
}
static int tegra_adma_resume(struct dma_chan *dc)
{
struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc);
struct tegra_adma_desc *desc = tdc->desc;
struct tegra_adma_chan_regs *ch_regs = &desc->ch_regs;
ch_regs->ctrl = tdma_ch_read(tdc, ADMA_CH_CTRL);
ch_regs->ctrl &= ~(1 << ADMA_CH_CTRL_XFER_PAUSE_SHIFT);
tdma_ch_write(tdc, ADMA_CH_CTRL, ch_regs->ctrl);
return 0;
}
static int tegra_adma_terminate_all(struct dma_chan *dc) static int tegra_adma_terminate_all(struct dma_chan *dc)
{ {
struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc); struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc);
...@@ -481,12 +541,29 @@ static enum dma_status tegra_adma_tx_status(struct dma_chan *dc, ...@@ -481,12 +541,29 @@ static enum dma_status tegra_adma_tx_status(struct dma_chan *dc,
return ret; return ret;
} }
static unsigned int tegra210_adma_get_burst_config(unsigned int burst_size)
{
if (!burst_size || burst_size > ADMA_CH_CONFIG_MAX_BURST_SIZE)
burst_size = ADMA_CH_CONFIG_MAX_BURST_SIZE;
return fls(burst_size) << ADMA_CH_CONFIG_BURST_SIZE_SHIFT;
}
static unsigned int tegra186_adma_get_burst_config(unsigned int burst_size)
{
if (!burst_size || burst_size > ADMA_CH_CONFIG_MAX_BURST_SIZE)
burst_size = ADMA_CH_CONFIG_MAX_BURST_SIZE;
return (burst_size - 1) << ADMA_CH_CONFIG_BURST_SIZE_SHIFT;
}
static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc, static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
struct tegra_adma_desc *desc, struct tegra_adma_desc *desc,
dma_addr_t buf_addr, dma_addr_t buf_addr,
enum dma_transfer_direction direction) enum dma_transfer_direction direction)
{ {
struct tegra_adma_chan_regs *ch_regs = &desc->ch_regs; struct tegra_adma_chan_regs *ch_regs = &desc->ch_regs;
const struct tegra_adma_chip_data *cdata = tdc->tdma->cdata;
unsigned int burst_size, adma_dir; unsigned int burst_size, adma_dir;
if (desc->num_periods > ADMA_CH_CONFIG_MAX_BUFS) if (desc->num_periods > ADMA_CH_CONFIG_MAX_BUFS)
...@@ -495,17 +572,21 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc, ...@@ -495,17 +572,21 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
switch (direction) { switch (direction) {
case DMA_MEM_TO_DEV: case DMA_MEM_TO_DEV:
adma_dir = ADMA_CH_CTRL_DIR_MEM2AHUB; adma_dir = ADMA_CH_CTRL_DIR_MEM2AHUB;
burst_size = fls(tdc->sconfig.dst_maxburst); burst_size = tdc->sconfig.dst_maxburst;
ch_regs->config = ADMA_CH_CONFIG_SRC_BUF(desc->num_periods - 1); ch_regs->config = ADMA_CH_CONFIG_SRC_BUF(desc->num_periods - 1);
ch_regs->ctrl = ADMA_CH_CTRL_TX_REQ(tdc->sreq_index); ch_regs->ctrl = ADMA_CH_REG_FIELD_VAL(tdc->sreq_index,
cdata->ch_req_mask,
cdata->ch_req_tx_shift);
ch_regs->src_addr = buf_addr; ch_regs->src_addr = buf_addr;
break; break;
case DMA_DEV_TO_MEM: case DMA_DEV_TO_MEM:
adma_dir = ADMA_CH_CTRL_DIR_AHUB2MEM; adma_dir = ADMA_CH_CTRL_DIR_AHUB2MEM;
burst_size = fls(tdc->sconfig.src_maxburst); burst_size = tdc->sconfig.src_maxburst;
ch_regs->config = ADMA_CH_CONFIG_TRG_BUF(desc->num_periods - 1); ch_regs->config = ADMA_CH_CONFIG_TRG_BUF(desc->num_periods - 1);
ch_regs->ctrl = ADMA_CH_CTRL_RX_REQ(tdc->sreq_index); ch_regs->ctrl = ADMA_CH_REG_FIELD_VAL(tdc->sreq_index,
cdata->ch_req_mask,
cdata->ch_req_rx_shift);
ch_regs->trg_addr = buf_addr; ch_regs->trg_addr = buf_addr;
break; break;
...@@ -514,13 +595,10 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc, ...@@ -514,13 +595,10 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
return -EINVAL; return -EINVAL;
} }
if (!burst_size || burst_size > ADMA_CH_CONFIG_BURST_16)
burst_size = ADMA_CH_CONFIG_BURST_16;
ch_regs->ctrl |= ADMA_CH_CTRL_DIR(adma_dir) | ch_regs->ctrl |= ADMA_CH_CTRL_DIR(adma_dir) |
ADMA_CH_CTRL_MODE_CONTINUOUS | ADMA_CH_CTRL_MODE_CONTINUOUS |
ADMA_CH_CTRL_FLOWCTRL_EN; ADMA_CH_CTRL_FLOWCTRL_EN;
ch_regs->config |= ADMA_CH_CONFIG_BURST_SIZE(burst_size); ch_regs->config |= cdata->adma_get_burst_config(burst_size);
ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1); ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1);
ch_regs->fifo_ctrl = ADMA_CH_FIFO_CTRL_DEFAULT; ch_regs->fifo_ctrl = ADMA_CH_FIFO_CTRL_DEFAULT;
ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK; ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK;
...@@ -635,32 +713,99 @@ static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec, ...@@ -635,32 +713,99 @@ static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
static int tegra_adma_runtime_suspend(struct device *dev) static int tegra_adma_runtime_suspend(struct device *dev)
{ {
struct tegra_adma *tdma = dev_get_drvdata(dev); struct tegra_adma *tdma = dev_get_drvdata(dev);
struct tegra_adma_chan_regs *ch_reg;
struct tegra_adma_chan *tdc;
int i;
tdma->global_cmd = tdma_read(tdma, ADMA_GLOBAL_CMD); tdma->global_cmd = tdma_read(tdma, ADMA_GLOBAL_CMD);
if (!tdma->global_cmd)
goto clk_disable;
for (i = 0; i < tdma->nr_channels; i++) {
tdc = &tdma->channels[i];
ch_reg = &tdc->ch_regs;
ch_reg->cmd = tdma_ch_read(tdc, ADMA_CH_CMD);
/* skip if channel is not active */
if (!ch_reg->cmd)
continue;
ch_reg->tc = tdma_ch_read(tdc, ADMA_CH_TC);
ch_reg->src_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_SRC_ADDR);
ch_reg->trg_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_TRG_ADDR);
ch_reg->ctrl = tdma_ch_read(tdc, ADMA_CH_CTRL);
ch_reg->fifo_ctrl = tdma_ch_read(tdc, ADMA_CH_FIFO_CTRL);
ch_reg->config = tdma_ch_read(tdc, ADMA_CH_CONFIG);
}
clk_disable:
clk_disable_unprepare(tdma->ahub_clk);
return pm_clk_suspend(dev); return 0;
} }
static int tegra_adma_runtime_resume(struct device *dev) static int tegra_adma_runtime_resume(struct device *dev)
{ {
struct tegra_adma *tdma = dev_get_drvdata(dev); struct tegra_adma *tdma = dev_get_drvdata(dev);
int ret; struct tegra_adma_chan_regs *ch_reg;
struct tegra_adma_chan *tdc;
int ret, i;
ret = pm_clk_resume(dev); ret = clk_prepare_enable(tdma->ahub_clk);
if (ret) if (ret) {
dev_err(dev, "ahub clk_enable failed: %d\n", ret);
return ret; return ret;
}
tdma_write(tdma, ADMA_GLOBAL_CMD, tdma->global_cmd); tdma_write(tdma, ADMA_GLOBAL_CMD, tdma->global_cmd);
if (!tdma->global_cmd)
return 0;
for (i = 0; i < tdma->nr_channels; i++) {
tdc = &tdma->channels[i];
ch_reg = &tdc->ch_regs;
/* skip if channel was not active earlier */
if (!ch_reg->cmd)
continue;
tdma_ch_write(tdc, ADMA_CH_TC, ch_reg->tc);
tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR, ch_reg->src_addr);
tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR, ch_reg->trg_addr);
tdma_ch_write(tdc, ADMA_CH_CTRL, ch_reg->ctrl);
tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_reg->fifo_ctrl);
tdma_ch_write(tdc, ADMA_CH_CONFIG, ch_reg->config);
tdma_ch_write(tdc, ADMA_CH_CMD, ch_reg->cmd);
}
return 0; return 0;
} }
static const struct tegra_adma_chip_data tegra210_chip_data = { static const struct tegra_adma_chip_data tegra210_chip_data = {
.adma_get_burst_config = tegra210_adma_get_burst_config,
.global_reg_offset = 0xc00,
.global_int_clear = 0x20,
.ch_req_tx_shift = 28,
.ch_req_rx_shift = 24,
.ch_base_offset = 0,
.ch_req_mask = 0xf,
.ch_req_max = 10,
.ch_reg_size = 0x80,
.nr_channels = 22, .nr_channels = 22,
}; };
static const struct tegra_adma_chip_data tegra186_chip_data = {
.adma_get_burst_config = tegra186_adma_get_burst_config,
.global_reg_offset = 0,
.global_int_clear = 0x402c,
.ch_req_tx_shift = 27,
.ch_req_rx_shift = 22,
.ch_base_offset = 0x10000,
.ch_req_mask = 0x1f,
.ch_req_max = 20,
.ch_reg_size = 0x100,
.nr_channels = 32,
};
static const struct of_device_id tegra_adma_of_match[] = { static const struct of_device_id tegra_adma_of_match[] = {
{ .compatible = "nvidia,tegra210-adma", .data = &tegra210_chip_data }, { .compatible = "nvidia,tegra210-adma", .data = &tegra210_chip_data },
{ .compatible = "nvidia,tegra186-adma", .data = &tegra186_chip_data },
{ }, { },
}; };
MODULE_DEVICE_TABLE(of, tegra_adma_of_match); MODULE_DEVICE_TABLE(of, tegra_adma_of_match);
...@@ -685,6 +830,7 @@ static int tegra_adma_probe(struct platform_device *pdev) ...@@ -685,6 +830,7 @@ static int tegra_adma_probe(struct platform_device *pdev)
return -ENOMEM; return -ENOMEM;
tdma->dev = &pdev->dev; tdma->dev = &pdev->dev;
tdma->cdata = cdata;
tdma->nr_channels = cdata->nr_channels; tdma->nr_channels = cdata->nr_channels;
platform_set_drvdata(pdev, tdma); platform_set_drvdata(pdev, tdma);
...@@ -693,13 +839,11 @@ static int tegra_adma_probe(struct platform_device *pdev) ...@@ -693,13 +839,11 @@ static int tegra_adma_probe(struct platform_device *pdev)
if (IS_ERR(tdma->base_addr)) if (IS_ERR(tdma->base_addr))
return PTR_ERR(tdma->base_addr); return PTR_ERR(tdma->base_addr);
ret = pm_clk_create(&pdev->dev); tdma->ahub_clk = devm_clk_get(&pdev->dev, "d_audio");
if (ret) if (IS_ERR(tdma->ahub_clk)) {
return ret; dev_err(&pdev->dev, "Error: Missing ahub controller clock\n");
return PTR_ERR(tdma->ahub_clk);
ret = of_pm_clk_add_clk(&pdev->dev, "d_audio"); }
if (ret)
goto clk_destroy;
pm_runtime_enable(&pdev->dev); pm_runtime_enable(&pdev->dev);
...@@ -715,7 +859,8 @@ static int tegra_adma_probe(struct platform_device *pdev) ...@@ -715,7 +859,8 @@ static int tegra_adma_probe(struct platform_device *pdev)
for (i = 0; i < tdma->nr_channels; i++) { for (i = 0; i < tdma->nr_channels; i++) {
struct tegra_adma_chan *tdc = &tdma->channels[i]; struct tegra_adma_chan *tdc = &tdma->channels[i];
tdc->chan_addr = tdma->base_addr + ADMA_CH_REG_OFFSET(i); tdc->chan_addr = tdma->base_addr + cdata->ch_base_offset
+ (cdata->ch_reg_size * i);
tdc->irq = of_irq_get(pdev->dev.of_node, i); tdc->irq = of_irq_get(pdev->dev.of_node, i);
if (tdc->irq <= 0) { if (tdc->irq <= 0) {
...@@ -746,6 +891,8 @@ static int tegra_adma_probe(struct platform_device *pdev) ...@@ -746,6 +891,8 @@ static int tegra_adma_probe(struct platform_device *pdev)
tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
tdma->dma_dev.device_pause = tegra_adma_pause;
tdma->dma_dev.device_resume = tegra_adma_resume;
ret = dma_async_device_register(&tdma->dma_dev); ret = dma_async_device_register(&tdma->dma_dev);
if (ret < 0) { if (ret < 0) {
...@@ -776,8 +923,6 @@ static int tegra_adma_probe(struct platform_device *pdev) ...@@ -776,8 +923,6 @@ static int tegra_adma_probe(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev); pm_runtime_put_sync(&pdev->dev);
rpm_disable: rpm_disable:
pm_runtime_disable(&pdev->dev); pm_runtime_disable(&pdev->dev);
clk_destroy:
pm_clk_destroy(&pdev->dev);
return ret; return ret;
} }
...@@ -787,6 +932,7 @@ static int tegra_adma_remove(struct platform_device *pdev) ...@@ -787,6 +932,7 @@ static int tegra_adma_remove(struct platform_device *pdev)
struct tegra_adma *tdma = platform_get_drvdata(pdev); struct tegra_adma *tdma = platform_get_drvdata(pdev);
int i; int i;
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&tdma->dma_dev); dma_async_device_unregister(&tdma->dma_dev);
for (i = 0; i < tdma->nr_channels; ++i) for (i = 0; i < tdma->nr_channels; ++i)
...@@ -794,22 +940,15 @@ static int tegra_adma_remove(struct platform_device *pdev) ...@@ -794,22 +940,15 @@ static int tegra_adma_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev); pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev); pm_runtime_disable(&pdev->dev);
pm_clk_destroy(&pdev->dev);
return 0; return 0;
} }
#ifdef CONFIG_PM_SLEEP
static int tegra_adma_pm_suspend(struct device *dev)
{
return pm_runtime_suspended(dev) == false;
}
#endif
static const struct dev_pm_ops tegra_adma_dev_pm_ops = { static const struct dev_pm_ops tegra_adma_dev_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_adma_runtime_suspend, SET_RUNTIME_PM_OPS(tegra_adma_runtime_suspend,
tegra_adma_runtime_resume, NULL) tegra_adma_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(tegra_adma_pm_suspend, NULL) SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
}; };
static struct platform_driver tegra_admac_driver = { static struct platform_driver tegra_admac_driver = {
......
...@@ -703,7 +703,7 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) ...@@ -703,7 +703,7 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
INIT_LIST_HEAD(&ld_completed); INIT_LIST_HEAD(&ld_completed);
spin_lock_bh(&chan->lock); spin_lock(&chan->lock);
/* Clean already completed and acked descriptors */ /* Clean already completed and acked descriptors */
xgene_dma_clean_completed_descriptor(chan); xgene_dma_clean_completed_descriptor(chan);
...@@ -772,7 +772,7 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) ...@@ -772,7 +772,7 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
*/ */
xgene_chan_xfer_ld_pending(chan); xgene_chan_xfer_ld_pending(chan);
spin_unlock_bh(&chan->lock); spin_unlock(&chan->lock);
/* Run the callback for each descriptor, in order */ /* Run the callback for each descriptor, in order */
list_for_each_entry_safe(desc_sw, _desc_sw, &ld_completed, node) { list_for_each_entry_safe(desc_sw, _desc_sw, &ld_completed, node) {
...@@ -797,7 +797,7 @@ static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan) ...@@ -797,7 +797,7 @@ static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan)
return -ENOMEM; return -ENOMEM;
} }
chan_dbg(chan, "Allocate descripto pool\n"); chan_dbg(chan, "Allocate descriptor pool\n");
return 1; return 1;
} }
......
...@@ -28,6 +28,8 @@ ...@@ -28,6 +28,8 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/dma/idma64.h>
#include "intel-lpss.h" #include "intel-lpss.h"
#define LPSS_DEV_OFFSET 0x000 #define LPSS_DEV_OFFSET 0x000
...@@ -96,8 +98,6 @@ static const struct resource intel_lpss_idma64_resources[] = { ...@@ -96,8 +98,6 @@ static const struct resource intel_lpss_idma64_resources[] = {
DEFINE_RES_IRQ(0), DEFINE_RES_IRQ(0),
}; };
#define LPSS_IDMA64_DRIVER_NAME "idma64"
/* /*
* Cells needs to be ordered so that the iDMA is created first. This is * Cells needs to be ordered so that the iDMA is created first. This is
* because we need to be sure the DMA is available when the host controller * because we need to be sure the DMA is available when the host controller
......
...@@ -1498,12 +1498,7 @@ static int pxa2xx_spi_get_port_id(struct acpi_device *adev) ...@@ -1498,12 +1498,7 @@ static int pxa2xx_spi_get_port_id(struct acpi_device *adev)
static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param) static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
{ {
struct device *dev = param; return param == chan->device->dev;
if (dev != chan->device->dev->parent)
return false;
return true;
} }
#endif /* CONFIG_PCI */ #endif /* CONFIG_PCI */
......
...@@ -365,7 +365,7 @@ static bool dw8250_fallback_dma_filter(struct dma_chan *chan, void *param) ...@@ -365,7 +365,7 @@ static bool dw8250_fallback_dma_filter(struct dma_chan *chan, void *param)
static bool dw8250_idma_filter(struct dma_chan *chan, void *param) static bool dw8250_idma_filter(struct dma_chan *chan, void *param)
{ {
return param == chan->device->dev->parent; return param == chan->device->dev;
} }
/* /*
...@@ -434,7 +434,7 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data) ...@@ -434,7 +434,7 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
data->uart_16550_compatible = true; data->uart_16550_compatible = true;
} }
/* Platforms with iDMA */ /* Platforms with iDMA 64-bit */
if (platform_get_resource_byname(to_platform_device(p->dev), if (platform_get_resource_byname(to_platform_device(p->dev),
IORESOURCE_MEM, "lpss_priv")) { IORESOURCE_MEM, "lpss_priv")) {
data->dma.rx_param = p->dev->parent; data->dma.rx_param = p->dev->parent;
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions for the Intel integrated DMA 64-bit
*
* Copyright (C) 2019 Intel Corporation
*/
#ifndef __LINUX_DMA_IDMA64_H__
#define __LINUX_DMA_IDMA64_H__
/* Platform driver name */
#define LPSS_IDMA64_DRIVER_NAME "idma64"
#endif /* __LINUX_DMA_IDMA64_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment