Commit e964f1e0 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-5.7-rc1' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine updates from Vinod Koul:
 "Core:
   - Some code cleanup and optimization in core by Andy

   - Debugfs support for displaying dmaengine channels by Peter

  Drivers:
   - New driver for uniphier-xdmac controller

   - Updates to stm32 dma, mdma and dmamux drivers and PM support

   - More updates to idxd drivers

   - Bunch of changes in tegra-apb driver and cleaning up of pm
     functions

   - Bunch of spelling fixes and Replace zero-length array patches

   - Shutdown hook for fsl-dpaa2-qdma driver

   - Support for interleaved transfers for ti-edma and virtualization
     support for k3-dma driver

   - Support for reset and updates in xilinx_dma driver

   - Improvements and locking updates in at_hdma driver"

* tag 'dmaengine-5.7-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (89 commits)
  dt-bindings: dma: renesas,usb-dmac: add r8a77961 support
  dmaengine: uniphier-xdmac: Remove redandant error log for platform_get_irq
  dmaengine: tegra-apb: Improve DMA synchronization
  dmaengine: tegra-apb: Don't save/restore IRQ flags in interrupt handler
  dmaengine: tegra-apb: mark PM functions as __maybe_unused
  dmaengine: fix spelling mistake "exceds" -> "exceeds"
  dmaengine: sprd: Set request pending flag when DMA controller is active
  dmaengine: ppc4xx: Use scnprintf() for avoiding potential buffer overflow
  dmaengine: idxd: remove global token limit check
  dmaengine: idxd: reflect shadow copy of traffic class programming
  dmaengine: idxd: Merge definition of dsa_batch_desc into dsa_hw_desc
  dmaengine: Create debug directories for DMA devices
  dmaengine: ti: k3-udma: Implement custom dbg_summary_show for debugfs
  dmaengine: Add basic debugfs support
  dmaengine: fsl-dpaa2-qdma: remove set but not used variable 'dpaa2_qdma'
  dmaengine: ti: edma: fix null dereference because of a typo in pointer name
  dmaengine: fsl-dpaa2-qdma: Adding shutdown hook
  dmaengine: uniphier-xdmac: Add UniPhier external DMA controller driver
  dt-bindings: dmaengine: Add UniPhier external DMA controller bindings
  dmaengine: ti: k3-udma: Implement support for atype (for virtualization)
  ...
parents 5c8db3eb cea582b5
...@@ -16,6 +16,7 @@ Required Properties: ...@@ -16,6 +16,7 @@ Required Properties:
- "renesas,r8a7794-usb-dmac" (R-Car E2) - "renesas,r8a7794-usb-dmac" (R-Car E2)
- "renesas,r8a7795-usb-dmac" (R-Car H3) - "renesas,r8a7795-usb-dmac" (R-Car H3)
- "renesas,r8a7796-usb-dmac" (R-Car M3-W) - "renesas,r8a7796-usb-dmac" (R-Car M3-W)
- "renesas,r8a77961-usb-dmac" (R-Car M3-W+)
- "renesas,r8a77965-usb-dmac" (R-Car M3-N) - "renesas,r8a77965-usb-dmac" (R-Car M3-N)
- "renesas,r8a77990-usb-dmac" (R-Car E3) - "renesas,r8a77990-usb-dmac" (R-Car E3)
- "renesas,r8a77995-usb-dmac" (R-Car D3) - "renesas,r8a77995-usb-dmac" (R-Car D3)
......
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/socionext,uniphier-xdmac.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Socionext UniPhier external DMA controller
description: |
This describes the devicetree bindings for an external DMA engine to perform
memory-to-memory or peripheral-to-memory data transfer capable of supporting
16 channels, implemented in Socionext UniPhier SoCs.
maintainers:
- Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
allOf:
- $ref: "dma-controller.yaml#"
properties:
compatible:
const: socionext,uniphier-xdmac
reg:
items:
- description: XDMAC base register region (offset and length)
- description: XDMAC extension register region (offset and length)
interrupts:
maxItems: 1
"#dma-cells":
const: 2
description: |
DMA request from clients consists of 2 cells:
1. Channel index
2. Transfer request factor number, If no transfer factor, use 0.
The number is SoC-specific, and this should be specified with
relation to the device to use the DMA controller.
dma-channels:
minimum: 1
maximum: 16
additionalProperties: false
required:
- compatible
- reg
- interrupts
- "#dma-cells"
examples:
- |
xdmac: dma-controller@5fc10000 {
compatible = "socionext,uniphier-xdmac";
reg = <0x5fc10000 0x1000>, <0x5fc20000 0x800>;
interrupts = <0 188 4>;
#dma-cells = <2>;
dma-channels = <16>;
};
...
...@@ -180,7 +180,7 @@ edma1_tptc0: tptc@27b0000 { ...@@ -180,7 +180,7 @@ edma1_tptc0: tptc@27b0000 {
}; };
edma1_tptc1: tptc@27b8000 { edma1_tptc1: tptc@27b8000 {
compatible = "ti, k2g-edma3-tptc", "ti,edma3-tptc"; compatible = "ti,k2g-edma3-tptc", "ti,edma3-tptc";
reg = <0x027b8000 0x400>; reg = <0x027b8000 0x400>;
power-domains = <&k2g_pds 0x4f>; power-domains = <&k2g_pds 0x4f>;
}; };
......
...@@ -45,7 +45,8 @@ allOf: ...@@ -45,7 +45,8 @@ allOf:
properties: properties:
"#dma-cells": "#dma-cells":
const: 1 minimum: 1
maximum: 2
description: | description: |
The cell is the PSI-L thread ID of the remote (to UDMAP) end. The cell is the PSI-L thread ID of the remote (to UDMAP) end.
Valid ranges for thread ID depends on the data movement direction: Valid ranges for thread ID depends on the data movement direction:
...@@ -55,6 +56,8 @@ properties: ...@@ -55,6 +56,8 @@ properties:
Please refer to the device documentation for the PSI-L thread map and also Please refer to the device documentation for the PSI-L thread map and also
the PSI-L peripheral chapter for the correct thread ID. the PSI-L peripheral chapter for the correct thread ID.
When #dma-cells is 2, the second parameter is the channel ATYPE.
compatible: compatible:
enum: enum:
- ti,am654-navss-main-udmap - ti,am654-navss-main-udmap
...@@ -131,6 +134,20 @@ required: ...@@ -131,6 +134,20 @@ required:
- ti,sci-rm-range-rchan - ti,sci-rm-range-rchan
- ti,sci-rm-range-rflow - ti,sci-rm-range-rflow
if:
properties:
"#dma-cells":
const: 2
then:
properties:
ti,udma-atype:
description: ATYPE value which should be used by non slave channels
allOf:
- $ref: /schemas/types.yaml#/definitions/uint32
required:
- ti,udma-atype
examples: examples:
- |+ - |+
cbass_main { cbass_main {
......
...@@ -616,8 +616,8 @@ config TXX9_DMAC ...@@ -616,8 +616,8 @@ config TXX9_DMAC
integrated in chips such as the Toshiba TX4927/38/39. integrated in chips such as the Toshiba TX4927/38/39.
config TEGRA20_APB_DMA config TEGRA20_APB_DMA
bool "NVIDIA Tegra20 APB DMA support" tristate "NVIDIA Tegra20 APB DMA support"
depends on ARCH_TEGRA depends on ARCH_TEGRA || COMPILE_TEST
select DMA_ENGINE select DMA_ENGINE
help help
Support for the NVIDIA Tegra20 APB DMA controller driver. The Support for the NVIDIA Tegra20 APB DMA controller driver. The
...@@ -658,6 +658,17 @@ config UNIPHIER_MDMAC ...@@ -658,6 +658,17 @@ config UNIPHIER_MDMAC
UniPhier platform. This DMA controller is used as the external UniPhier platform. This DMA controller is used as the external
DMA engine of the SD/eMMC controllers of the LD4, Pro4, sLD8 SoCs. DMA engine of the SD/eMMC controllers of the LD4, Pro4, sLD8 SoCs.
config UNIPHIER_XDMAC
tristate "UniPhier XDMAC support"
depends on ARCH_UNIPHIER || COMPILE_TEST
depends on OF
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
Enable support for the XDMAC (external DMA controller) on the
UniPhier platform. This DMA controller can transfer data from
memory to memory, memory to peripheral and peripheral to memory.
config XGENE_DMA config XGENE_DMA
tristate "APM X-Gene DMA support" tristate "APM X-Gene DMA support"
depends on ARCH_XGENE || COMPILE_TEST depends on ARCH_XGENE || COMPILE_TEST
......
...@@ -78,6 +78,7 @@ obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o ...@@ -78,6 +78,7 @@ obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
obj-$(CONFIG_TEGRA210_ADMA) += tegra210-adma.o obj-$(CONFIG_TEGRA210_ADMA) += tegra210-adma.o
obj-$(CONFIG_TIMB_DMA) += timb_dma.o obj-$(CONFIG_TIMB_DMA) += timb_dma.o
obj-$(CONFIG_UNIPHIER_MDMAC) += uniphier-mdmac.o obj-$(CONFIG_UNIPHIER_MDMAC) += uniphier-mdmac.o
obj-$(CONFIG_UNIPHIER_XDMAC) += uniphier-xdmac.o
obj-$(CONFIG_XGENE_DMA) += xgene-dma.o obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
obj-$(CONFIG_ZX_DMA) += zx_dma.o obj-$(CONFIG_ZX_DMA) += zx_dma.o
obj-$(CONFIG_ST_FDMA) += st_fdma.o obj-$(CONFIG_ST_FDMA) += st_fdma.o
......
This diff is collapsed.
...@@ -243,7 +243,6 @@ enum atc_status { ...@@ -243,7 +243,6 @@ enum atc_status {
* @active_list: list of descriptors dmaengine is being running on * @active_list: list of descriptors dmaengine is being running on
* @queue: list of descriptors ready to be submitted to engine * @queue: list of descriptors ready to be submitted to engine
* @free_list: list of descriptors usable by the channel * @free_list: list of descriptors usable by the channel
* @descs_allocated: records the actual size of the descriptor pool
*/ */
struct at_dma_chan { struct at_dma_chan {
struct dma_chan chan_common; struct dma_chan chan_common;
...@@ -264,7 +263,6 @@ struct at_dma_chan { ...@@ -264,7 +263,6 @@ struct at_dma_chan {
struct list_head active_list; struct list_head active_list;
struct list_head queue; struct list_head queue;
struct list_head free_list; struct list_head free_list;
unsigned int descs_allocated;
}; };
#define channel_readl(atchan, name) \ #define channel_readl(atchan, name) \
......
...@@ -1543,9 +1543,6 @@ static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan, ...@@ -1543,9 +1543,6 @@ static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
static void at_xdmac_advance_work(struct at_xdmac_chan *atchan) static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
{ {
struct at_xdmac_desc *desc; struct at_xdmac_desc *desc;
unsigned long flags;
spin_lock_irqsave(&atchan->lock, flags);
/* /*
* If channel is enabled, do nothing, advance_work will be triggered * If channel is enabled, do nothing, advance_work will be triggered
...@@ -1559,8 +1556,6 @@ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan) ...@@ -1559,8 +1556,6 @@ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
if (!desc->active_xfer) if (!desc->active_xfer)
at_xdmac_start_xfer(atchan, desc); at_xdmac_start_xfer(atchan, desc);
} }
spin_unlock_irqrestore(&atchan->lock, flags);
} }
static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
...@@ -1596,7 +1591,7 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan) ...@@ -1596,7 +1591,7 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
if (atchan->irq_status & AT_XDMAC_CIS_ROIS) if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
dev_err(chan2dev(&atchan->chan), "request overflow error!!!"); dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
spin_lock_bh(&atchan->lock); spin_lock_irq(&atchan->lock);
/* Channel must be disabled first as it's not done automatically */ /* Channel must be disabled first as it's not done automatically */
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
...@@ -1607,7 +1602,7 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan) ...@@ -1607,7 +1602,7 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
struct at_xdmac_desc, struct at_xdmac_desc,
xfer_node); xfer_node);
spin_unlock_bh(&atchan->lock); spin_unlock_irq(&atchan->lock);
/* Print bad descriptor's details if needed */ /* Print bad descriptor's details if needed */
dev_dbg(chan2dev(&atchan->chan), dev_dbg(chan2dev(&atchan->chan),
...@@ -1640,31 +1635,31 @@ static void at_xdmac_tasklet(unsigned long data) ...@@ -1640,31 +1635,31 @@ static void at_xdmac_tasklet(unsigned long data)
if (atchan->irq_status & error_mask) if (atchan->irq_status & error_mask)
at_xdmac_handle_error(atchan); at_xdmac_handle_error(atchan);
spin_lock(&atchan->lock); spin_lock_irq(&atchan->lock);
desc = list_first_entry(&atchan->xfers_list, desc = list_first_entry(&atchan->xfers_list,
struct at_xdmac_desc, struct at_xdmac_desc,
xfer_node); xfer_node);
dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
if (!desc->active_xfer) { if (!desc->active_xfer) {
dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting"); dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
spin_unlock(&atchan->lock); spin_unlock_irq(&atchan->lock);
return; return;
} }
txd = &desc->tx_dma_desc; txd = &desc->tx_dma_desc;
at_xdmac_remove_xfer(atchan, desc); at_xdmac_remove_xfer(atchan, desc);
spin_unlock(&atchan->lock); spin_unlock_irq(&atchan->lock);
if (!at_xdmac_chan_is_cyclic(atchan)) {
dma_cookie_complete(txd); dma_cookie_complete(txd);
if (txd->flags & DMA_PREP_INTERRUPT) if (txd->flags & DMA_PREP_INTERRUPT)
dmaengine_desc_get_callback_invoke(txd, NULL); dmaengine_desc_get_callback_invoke(txd, NULL);
}
dma_run_dependencies(txd); dma_run_dependencies(txd);
spin_lock_irq(&atchan->lock);
at_xdmac_advance_work(atchan); at_xdmac_advance_work(atchan);
spin_unlock_irq(&atchan->lock);
} }
} }
...@@ -1725,11 +1720,15 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id) ...@@ -1725,11 +1720,15 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
static void at_xdmac_issue_pending(struct dma_chan *chan) static void at_xdmac_issue_pending(struct dma_chan *chan)
{ {
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
unsigned long flags;
dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__); dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
if (!at_xdmac_chan_is_cyclic(atchan)) if (!at_xdmac_chan_is_cyclic(atchan)) {
spin_lock_irqsave(&atchan->lock, flags);
at_xdmac_advance_work(atchan); at_xdmac_advance_work(atchan);
spin_unlock_irqrestore(&atchan->lock, flags);
}
return; return;
} }
...@@ -1822,26 +1821,21 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan) ...@@ -1822,26 +1821,21 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
struct at_xdmac_desc *desc; struct at_xdmac_desc *desc;
int i; int i;
unsigned long flags;
spin_lock_irqsave(&atchan->lock, flags);
if (at_xdmac_chan_is_enabled(atchan)) { if (at_xdmac_chan_is_enabled(atchan)) {
dev_err(chan2dev(chan), dev_err(chan2dev(chan),
"can't allocate channel resources (channel enabled)\n"); "can't allocate channel resources (channel enabled)\n");
i = -EIO; return -EIO;
goto spin_unlock;
} }
if (!list_empty(&atchan->free_descs_list)) { if (!list_empty(&atchan->free_descs_list)) {
dev_err(chan2dev(chan), dev_err(chan2dev(chan),
"can't allocate channel resources (channel not free from a previous use)\n"); "can't allocate channel resources (channel not free from a previous use)\n");
i = -EIO; return -EIO;
goto spin_unlock;
} }
for (i = 0; i < init_nr_desc_per_channel; i++) { for (i = 0; i < init_nr_desc_per_channel; i++) {
desc = at_xdmac_alloc_desc(chan, GFP_ATOMIC); desc = at_xdmac_alloc_desc(chan, GFP_KERNEL);
if (!desc) { if (!desc) {
dev_warn(chan2dev(chan), dev_warn(chan2dev(chan),
"only %d descriptors have been allocated\n", i); "only %d descriptors have been allocated\n", i);
...@@ -1854,8 +1848,6 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan) ...@@ -1854,8 +1848,6 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
spin_unlock:
spin_unlock_irqrestore(&atchan->lock, flags);
return i; return i;
} }
......
...@@ -120,7 +120,7 @@ struct sba_request { ...@@ -120,7 +120,7 @@ struct sba_request {
struct brcm_message msg; struct brcm_message msg;
struct dma_async_tx_descriptor tx; struct dma_async_tx_descriptor tx;
/* SBA commands */ /* SBA commands */
struct brcm_sba_command cmds[0]; struct brcm_sba_command cmds[];
}; };
enum sba_version { enum sba_version {
......
...@@ -58,6 +58,87 @@ static DEFINE_IDA(dma_ida); ...@@ -58,6 +58,87 @@ static DEFINE_IDA(dma_ida);
static LIST_HEAD(dma_device_list); static LIST_HEAD(dma_device_list);
static long dmaengine_ref_count; static long dmaengine_ref_count;
/* --- debugfs implementation --- */
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
static struct dentry *rootdir;
static void dmaengine_debug_register(struct dma_device *dma_dev)
{
dma_dev->dbg_dev_root = debugfs_create_dir(dev_name(dma_dev->dev),
rootdir);
if (IS_ERR(dma_dev->dbg_dev_root))
dma_dev->dbg_dev_root = NULL;
}
static void dmaengine_debug_unregister(struct dma_device *dma_dev)
{
debugfs_remove_recursive(dma_dev->dbg_dev_root);
dma_dev->dbg_dev_root = NULL;
}
static void dmaengine_dbg_summary_show(struct seq_file *s,
struct dma_device *dma_dev)
{
struct dma_chan *chan;
list_for_each_entry(chan, &dma_dev->channels, device_node) {
if (chan->client_count) {
seq_printf(s, " %-13s| %s", dma_chan_name(chan),
chan->dbg_client_name ?: "in-use");
if (chan->router)
seq_printf(s, " (via router: %s)\n",
dev_name(chan->router->dev));
else
seq_puts(s, "\n");
}
}
}
static int dmaengine_summary_show(struct seq_file *s, void *data)
{
struct dma_device *dma_dev = NULL;
mutex_lock(&dma_list_mutex);
list_for_each_entry(dma_dev, &dma_device_list, global_node) {
seq_printf(s, "dma%d (%s): number of channels: %u\n",
dma_dev->dev_id, dev_name(dma_dev->dev),
dma_dev->chancnt);
if (dma_dev->dbg_summary_show)
dma_dev->dbg_summary_show(s, dma_dev);
else
dmaengine_dbg_summary_show(s, dma_dev);
if (!list_is_last(&dma_dev->global_node, &dma_device_list))
seq_puts(s, "\n");
}
mutex_unlock(&dma_list_mutex);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(dmaengine_summary);
static void __init dmaengine_debugfs_init(void)
{
rootdir = debugfs_create_dir("dmaengine", NULL);
/* /sys/kernel/debug/dmaengine/summary */
debugfs_create_file("summary", 0444, rootdir, NULL,
&dmaengine_summary_fops);
}
#else
static inline void dmaengine_debugfs_init(void) { }
static inline int dmaengine_debug_register(struct dma_device *dma_dev)
{
return 0;
}
static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { }
#endif /* DEBUG_FS */
/* --- sysfs implementation --- */ /* --- sysfs implementation --- */
#define DMA_SLAVE_NAME "slave" #define DMA_SLAVE_NAME "slave"
...@@ -760,6 +841,11 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name) ...@@ -760,6 +841,11 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
return chan ? chan : ERR_PTR(-EPROBE_DEFER); return chan ? chan : ERR_PTR(-EPROBE_DEFER);
found: found:
#ifdef CONFIG_DEBUG_FS
chan->dbg_client_name = kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev),
name);
#endif
chan->name = kasprintf(GFP_KERNEL, "dma:%s", name); chan->name = kasprintf(GFP_KERNEL, "dma:%s", name);
if (!chan->name) if (!chan->name)
return chan; return chan;
...@@ -837,6 +923,11 @@ void dma_release_channel(struct dma_chan *chan) ...@@ -837,6 +923,11 @@ void dma_release_channel(struct dma_chan *chan)
chan->name = NULL; chan->name = NULL;
chan->slave = NULL; chan->slave = NULL;
} }
#ifdef CONFIG_DEBUG_FS
kfree(chan->dbg_client_name);
chan->dbg_client_name = NULL;
#endif
mutex_unlock(&dma_list_mutex); mutex_unlock(&dma_list_mutex);
} }
EXPORT_SYMBOL_GPL(dma_release_channel); EXPORT_SYMBOL_GPL(dma_release_channel);
...@@ -1196,6 +1287,8 @@ int dma_async_device_register(struct dma_device *device) ...@@ -1196,6 +1287,8 @@ int dma_async_device_register(struct dma_device *device)
dma_channel_rebalance(); dma_channel_rebalance();
mutex_unlock(&dma_list_mutex); mutex_unlock(&dma_list_mutex);
dmaengine_debug_register(device);
return 0; return 0;
err_out: err_out:
...@@ -1229,6 +1322,8 @@ void dma_async_device_unregister(struct dma_device *device) ...@@ -1229,6 +1322,8 @@ void dma_async_device_unregister(struct dma_device *device)
{ {
struct dma_chan *chan, *n; struct dma_chan *chan, *n;
dmaengine_debug_unregister(device);
list_for_each_entry_safe(chan, n, &device->channels, device_node) list_for_each_entry_safe(chan, n, &device->channels, device_node)
__dma_async_device_channel_unregister(device, chan); __dma_async_device_channel_unregister(device, chan);
...@@ -1559,6 +1654,11 @@ static int __init dma_bus_init(void) ...@@ -1559,6 +1654,11 @@ static int __init dma_bus_init(void)
if (err) if (err)
return err; return err;
return class_register(&dma_devclass);
err = class_register(&dma_devclass);
if (!err)
dmaengine_debugfs_init();
return err;
} }
arch_initcall(dma_bus_init); arch_initcall(dma_bus_init);
...@@ -182,4 +182,20 @@ dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb) ...@@ -182,4 +182,20 @@ dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
struct dma_chan *dma_get_any_slave_channel(struct dma_device *device); struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
static inline struct dentry *
dmaengine_get_debugfs_root(struct dma_device *dma_dev) {
return dma_dev->dbg_dev_root;
}
#else
struct dentry;
static inline struct dentry *
dmaengine_get_debugfs_root(struct dma_device *dma_dev)
{
return NULL;
}
#endif /* CONFIG_DEBUG_FS */
#endif #endif
...@@ -790,6 +790,20 @@ static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev) ...@@ -790,6 +790,20 @@ static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)
return 0; return 0;
} }
static void dpaa2_qdma_shutdown(struct fsl_mc_device *ls_dev)
{
struct dpaa2_qdma_priv *priv;
struct device *dev;
dev = &ls_dev->dev;
priv = dev_get_drvdata(dev);
dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
dpaa2_dpdmai_dpio_unbind(priv);
dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
dpdmai_destroy(priv->mc_io, 0, ls_dev->mc_handle);
}
static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = { static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
{ {
.vendor = FSL_MC_VENDOR_FREESCALE, .vendor = FSL_MC_VENDOR_FREESCALE,
...@@ -805,6 +819,7 @@ static struct fsl_mc_driver dpaa2_qdma_driver = { ...@@ -805,6 +819,7 @@ static struct fsl_mc_driver dpaa2_qdma_driver = {
}, },
.probe = dpaa2_qdma_probe, .probe = dpaa2_qdma_probe,
.remove = dpaa2_qdma_remove, .remove = dpaa2_qdma_remove,
.shutdown = dpaa2_qdma_shutdown,
.match_id_table = dpaa2_qdma_id_table .match_id_table = dpaa2_qdma_id_table
}; };
......
...@@ -159,6 +159,27 @@ int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags, ...@@ -159,6 +159,27 @@ int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags,
return 0; return 0;
} }
/**
* dpdmai_destroy() - Destroy the DPDMAI object and release all its resources.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPDMAI object
*
* Return: '0' on Success; error code otherwise.
*/
int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
{
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY,
cmd_flags, token);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
EXPORT_SYMBOL_GPL(dpdmai_destroy);
/** /**
* dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames. * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames.
* @mc_io: Pointer to MC portal's I/O object * @mc_io: Pointer to MC portal's I/O object
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#define DPDMAI_CMDID_CLOSE DPDMAI_CMDID_FORMAT(0x800) #define DPDMAI_CMDID_CLOSE DPDMAI_CMDID_FORMAT(0x800)
#define DPDMAI_CMDID_OPEN DPDMAI_CMDID_FORMAT(0x80E) #define DPDMAI_CMDID_OPEN DPDMAI_CMDID_FORMAT(0x80E)
#define DPDMAI_CMDID_CREATE DPDMAI_CMDID_FORMAT(0x90E) #define DPDMAI_CMDID_CREATE DPDMAI_CMDID_FORMAT(0x90E)
#define DPDMAI_CMDID_DESTROY DPDMAI_CMDID_FORMAT(0x900)
#define DPDMAI_CMDID_ENABLE DPDMAI_CMDID_FORMAT(0x002) #define DPDMAI_CMDID_ENABLE DPDMAI_CMDID_FORMAT(0x002)
#define DPDMAI_CMDID_DISABLE DPDMAI_CMDID_FORMAT(0x003) #define DPDMAI_CMDID_DISABLE DPDMAI_CMDID_FORMAT(0x003)
...@@ -160,6 +161,7 @@ struct dpdmai_rx_queue_attr { ...@@ -160,6 +161,7 @@ struct dpdmai_rx_queue_attr {
int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags,
int dpdmai_id, u16 *token); int dpdmai_id, u16 *token);
int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags,
const struct dpdmai_cfg *cfg, u16 *token); const struct dpdmai_cfg *cfg, u16 *token);
int dpdmai_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); int dpdmai_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
......
...@@ -74,12 +74,10 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp) ...@@ -74,12 +74,10 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
struct idxd_device *idxd; struct idxd_device *idxd;
struct idxd_wq *wq; struct idxd_wq *wq;
struct device *dev; struct device *dev;
struct idxd_cdev *idxd_cdev;
wq = inode_wq(inode); wq = inode_wq(inode);
idxd = wq->idxd; idxd = wq->idxd;
dev = &idxd->pdev->dev; dev = &idxd->pdev->dev;
idxd_cdev = &wq->idxd_cdev;
dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq)); dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq));
...@@ -139,6 +137,8 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma) ...@@ -139,6 +137,8 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
dev_dbg(&pdev->dev, "%s called\n", __func__); dev_dbg(&pdev->dev, "%s called\n", __func__);
rc = check_vma(wq, vma, __func__); rc = check_vma(wq, vma, __func__);
if (rc < 0)
return rc;
vma->vm_flags |= VM_DONTCOPY; vma->vm_flags |= VM_DONTCOPY;
pfn = (base + idxd_get_wq_portal_full_offset(wq->id, pfn = (base + idxd_get_wq_portal_full_offset(wq->id,
......
...@@ -584,11 +584,11 @@ static void idxd_group_flags_setup(struct idxd_device *idxd) ...@@ -584,11 +584,11 @@ static void idxd_group_flags_setup(struct idxd_device *idxd)
struct idxd_group *group = &idxd->groups[i]; struct idxd_group *group = &idxd->groups[i];
if (group->tc_a == -1) if (group->tc_a == -1)
group->grpcfg.flags.tc_a = 0; group->tc_a = group->grpcfg.flags.tc_a = 0;
else else
group->grpcfg.flags.tc_a = group->tc_a; group->grpcfg.flags.tc_a = group->tc_a;
if (group->tc_b == -1) if (group->tc_b == -1)
group->grpcfg.flags.tc_b = 1; group->tc_b = group->grpcfg.flags.tc_b = 1;
else else
group->grpcfg.flags.tc_b = group->tc_b; group->grpcfg.flags.tc_b = group->tc_b;
group->grpcfg.flags.use_token_limit = group->use_token_limit; group->grpcfg.flags.use_token_limit = group->use_token_limit;
......
...@@ -419,7 +419,7 @@ static ssize_t engine_group_id_store(struct device *dev, ...@@ -419,7 +419,7 @@ static ssize_t engine_group_id_store(struct device *dev,
struct idxd_device *idxd = engine->idxd; struct idxd_device *idxd = engine->idxd;
long id; long id;
int rc; int rc;
struct idxd_group *prevg, *group; struct idxd_group *prevg;
rc = kstrtol(buf, 10, &id); rc = kstrtol(buf, 10, &id);
if (rc < 0) if (rc < 0)
...@@ -439,7 +439,6 @@ static ssize_t engine_group_id_store(struct device *dev, ...@@ -439,7 +439,6 @@ static ssize_t engine_group_id_store(struct device *dev,
return count; return count;
} }
group = &idxd->groups[id];
prevg = engine->group; prevg = engine->group;
if (prevg) if (prevg)
...@@ -513,9 +512,6 @@ static ssize_t group_tokens_reserved_store(struct device *dev, ...@@ -513,9 +512,6 @@ static ssize_t group_tokens_reserved_store(struct device *dev,
if (idxd->state == IDXD_DEV_ENABLED) if (idxd->state == IDXD_DEV_ENABLED)
return -EPERM; return -EPERM;
if (idxd->token_limit == 0)
return -EPERM;
if (val > idxd->max_tokens) if (val > idxd->max_tokens)
return -EINVAL; return -EINVAL;
...@@ -561,8 +557,6 @@ static ssize_t group_tokens_allowed_store(struct device *dev, ...@@ -561,8 +557,6 @@ static ssize_t group_tokens_allowed_store(struct device *dev,
if (idxd->state == IDXD_DEV_ENABLED) if (idxd->state == IDXD_DEV_ENABLED)
return -EPERM; return -EPERM;
if (idxd->token_limit == 0)
return -EPERM;
if (val < 4 * group->num_engines || if (val < 4 * group->num_engines ||
val > group->tokens_reserved + idxd->nr_tokens) val > group->tokens_reserved + idxd->nr_tokens)
return -EINVAL; return -EINVAL;
...@@ -1180,6 +1174,16 @@ static ssize_t op_cap_show(struct device *dev, ...@@ -1180,6 +1174,16 @@ static ssize_t op_cap_show(struct device *dev,
} }
static DEVICE_ATTR_RO(op_cap); static DEVICE_ATTR_RO(op_cap);
static ssize_t gen_cap_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct idxd_device *idxd =
container_of(dev, struct idxd_device, conf_dev);
return sprintf(buf, "%#llx\n", idxd->hw.gen_cap.bits);
}
static DEVICE_ATTR_RO(gen_cap);
static ssize_t configurable_show(struct device *dev, static ssize_t configurable_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
...@@ -1317,6 +1321,7 @@ static struct attribute *idxd_device_attributes[] = { ...@@ -1317,6 +1321,7 @@ static struct attribute *idxd_device_attributes[] = {
&dev_attr_max_batch_size.attr, &dev_attr_max_batch_size.attr,
&dev_attr_max_transfer_size.attr, &dev_attr_max_transfer_size.attr,
&dev_attr_op_cap.attr, &dev_attr_op_cap.attr,
&dev_attr_gen_cap.attr,
&dev_attr_configurable.attr, &dev_attr_configurable.attr,
&dev_attr_clients.attr, &dev_attr_clients.attr,
&dev_attr_state.attr, &dev_attr_state.attr,
......
...@@ -102,7 +102,7 @@ struct ioat_dca_priv { ...@@ -102,7 +102,7 @@ struct ioat_dca_priv {
int max_requesters; int max_requesters;
int requester_count; int requester_count;
u8 tag_map[IOAT_TAG_MAP_LEN]; u8 tag_map[IOAT_TAG_MAP_LEN];
struct ioat_dca_slot req_slots[0]; struct ioat_dca_slot req_slots[];
}; };
static int ioat_dca_dev_managed(struct dca_provider *dca, static int ioat_dca_dev_managed(struct dca_provider *dca,
......
...@@ -4303,7 +4303,7 @@ static ssize_t devices_show(struct device_driver *dev, char *buf) ...@@ -4303,7 +4303,7 @@ static ssize_t devices_show(struct device_driver *dev, char *buf)
for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++) { for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++) {
if (ppc440spe_adma_devices[i] == -1) if (ppc440spe_adma_devices[i] == -1)
continue; continue;
size += snprintf(buf + size, PAGE_SIZE - size, size += scnprintf(buf + size, PAGE_SIZE - size,
"PPC440SP(E)-ADMA.%d: %s\n", i, "PPC440SP(E)-ADMA.%d: %s\n", i,
ppc_adma_errors[ppc440spe_adma_devices[i]]); ppc_adma_errors[ppc440spe_adma_devices[i]]);
} }
......
...@@ -78,7 +78,7 @@ struct sa11x0_dma_desc { ...@@ -78,7 +78,7 @@ struct sa11x0_dma_desc {
bool cyclic; bool cyclic;
unsigned sglen; unsigned sglen;
struct sa11x0_dma_sg sg[0]; struct sa11x0_dma_sg sg[];
}; };
struct sa11x0_dma_phy; struct sa11x0_dma_phy;
......
...@@ -1219,7 +1219,7 @@ rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, ...@@ -1219,7 +1219,7 @@ rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
sg_len = buf_len / period_len; sg_len = buf_len / period_len;
if (sg_len > RCAR_DMAC_MAX_SG_LEN) { if (sg_len > RCAR_DMAC_MAX_SG_LEN) {
dev_err(chan->device->dev, dev_err(chan->device->dev,
"chan%u: sg length %d exceds limit %d", "chan%u: sg length %d exceeds limit %d",
rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN); rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN);
return NULL; return NULL;
} }
......
...@@ -709,7 +709,7 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( ...@@ -709,7 +709,7 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
BUG_ON(!schan->desc_num); BUG_ON(!schan->desc_num);
if (sg_len > SHDMA_MAX_SG_LEN) { if (sg_len > SHDMA_MAX_SG_LEN) {
dev_err(schan->dev, "sg length %d exceds limit %d", dev_err(schan->dev, "sg length %d exceeds limit %d",
sg_len, SHDMA_MAX_SG_LEN); sg_len, SHDMA_MAX_SG_LEN);
return NULL; return NULL;
} }
......
...@@ -212,7 +212,7 @@ struct sprd_dma_dev { ...@@ -212,7 +212,7 @@ struct sprd_dma_dev {
struct clk *ashb_clk; struct clk *ashb_clk;
int irq; int irq;
u32 total_chns; u32 total_chns;
struct sprd_dma_chn channels[0]; struct sprd_dma_chn channels[];
}; };
static void sprd_dma_free_desc(struct virt_dma_desc *vd); static void sprd_dma_free_desc(struct virt_dma_desc *vd);
...@@ -486,6 +486,28 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan) ...@@ -486,6 +486,28 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
return 0; return 0;
} }
static void sprd_dma_set_pending(struct sprd_dma_chn *schan, bool enable)
{
struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
u32 reg, val, req_id;
if (schan->dev_id == SPRD_DMA_SOFTWARE_UID)
return;
/* The DMA request id always starts from 0. */
req_id = schan->dev_id - 1;
if (req_id < 32) {
reg = SPRD_DMA_GLB_REQ_PEND0_EN;
val = BIT(req_id);
} else {
reg = SPRD_DMA_GLB_REQ_PEND1_EN;
val = BIT(req_id - 32);
}
sprd_dma_glb_update(sdev, reg, val, enable ? val : 0);
}
static void sprd_dma_set_chn_config(struct sprd_dma_chn *schan, static void sprd_dma_set_chn_config(struct sprd_dma_chn *schan,
struct sprd_dma_desc *sdesc) struct sprd_dma_desc *sdesc)
{ {
...@@ -532,6 +554,7 @@ static void sprd_dma_start(struct sprd_dma_chn *schan) ...@@ -532,6 +554,7 @@ static void sprd_dma_start(struct sprd_dma_chn *schan)
*/ */
sprd_dma_set_chn_config(schan, schan->cur_desc); sprd_dma_set_chn_config(schan, schan->cur_desc);
sprd_dma_set_uid(schan); sprd_dma_set_uid(schan);
sprd_dma_set_pending(schan, true);
sprd_dma_enable_chn(schan); sprd_dma_enable_chn(schan);
if (schan->dev_id == SPRD_DMA_SOFTWARE_UID && if (schan->dev_id == SPRD_DMA_SOFTWARE_UID &&
...@@ -543,6 +566,7 @@ static void sprd_dma_start(struct sprd_dma_chn *schan) ...@@ -543,6 +566,7 @@ static void sprd_dma_start(struct sprd_dma_chn *schan)
static void sprd_dma_stop(struct sprd_dma_chn *schan) static void sprd_dma_stop(struct sprd_dma_chn *schan)
{ {
sprd_dma_stop_and_disable(schan); sprd_dma_stop_and_disable(schan);
sprd_dma_set_pending(schan, false);
sprd_dma_unset_uid(schan); sprd_dma_unset_uid(schan);
sprd_dma_clear_int(schan); sprd_dma_clear_int(schan);
schan->cur_desc = NULL; schan->cur_desc = NULL;
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/iopoll.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -207,7 +208,6 @@ struct stm32_dma_device { ...@@ -207,7 +208,6 @@ struct stm32_dma_device {
struct dma_device ddev; struct dma_device ddev;
void __iomem *base; void __iomem *base;
struct clk *clk; struct clk *clk;
struct reset_control *rst;
bool mem2mem; bool mem2mem;
struct stm32_dma_chan chan[STM32_DMA_MAX_CHANNELS]; struct stm32_dma_chan chan[STM32_DMA_MAX_CHANNELS];
}; };
...@@ -422,29 +422,19 @@ static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags) ...@@ -422,29 +422,19 @@ static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags)
static int stm32_dma_disable_chan(struct stm32_dma_chan *chan) static int stm32_dma_disable_chan(struct stm32_dma_chan *chan)
{ {
struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
unsigned long timeout = jiffies + msecs_to_jiffies(5000); u32 dma_scr, id, reg;
u32 dma_scr, id;
id = chan->id; id = chan->id;
dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); reg = STM32_DMA_SCR(id);
dma_scr = stm32_dma_read(dmadev, reg);
if (dma_scr & STM32_DMA_SCR_EN) { if (dma_scr & STM32_DMA_SCR_EN) {
dma_scr &= ~STM32_DMA_SCR_EN; dma_scr &= ~STM32_DMA_SCR_EN;
stm32_dma_write(dmadev, STM32_DMA_SCR(id), dma_scr); stm32_dma_write(dmadev, reg, dma_scr);
do {
dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
dma_scr &= STM32_DMA_SCR_EN;
if (!dma_scr)
break;
if (time_after_eq(jiffies, timeout)) { return readl_relaxed_poll_timeout_atomic(dmadev->base + reg,
dev_err(chan2dev(chan), "%s: timeout!\n", dma_scr, !(dma_scr & STM32_DMA_SCR_EN),
__func__); 10, 1000000);
return -EBUSY;
}
cond_resched();
} while (1);
} }
return 0; return 0;
...@@ -488,7 +478,9 @@ static int stm32_dma_terminate_all(struct dma_chan *c) ...@@ -488,7 +478,9 @@ static int stm32_dma_terminate_all(struct dma_chan *c)
spin_lock_irqsave(&chan->vchan.lock, flags); spin_lock_irqsave(&chan->vchan.lock, flags);
if (chan->busy) { if (chan->desc) {
vchan_terminate_vdesc(&chan->desc->vdesc);
if (chan->busy)
stm32_dma_stop(chan); stm32_dma_stop(chan);
chan->desc = NULL; chan->desc = NULL;
} }
...@@ -545,6 +537,8 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan) ...@@ -545,6 +537,8 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
if (!vdesc) if (!vdesc)
return; return;
list_del(&vdesc->node);
chan->desc = to_stm32_dma_desc(vdesc); chan->desc = to_stm32_dma_desc(vdesc);
chan->next_sg = 0; chan->next_sg = 0;
} }
...@@ -555,6 +549,7 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan) ...@@ -555,6 +549,7 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
sg_req = &chan->desc->sg_req[chan->next_sg]; sg_req = &chan->desc->sg_req[chan->next_sg];
reg = &sg_req->chan_reg; reg = &sg_req->chan_reg;
reg->dma_scr &= ~STM32_DMA_SCR_EN;
stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr); stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
stm32_dma_write(dmadev, STM32_DMA_SPAR(chan->id), reg->dma_spar); stm32_dma_write(dmadev, STM32_DMA_SPAR(chan->id), reg->dma_spar);
stm32_dma_write(dmadev, STM32_DMA_SM0AR(chan->id), reg->dma_sm0ar); stm32_dma_write(dmadev, STM32_DMA_SM0AR(chan->id), reg->dma_sm0ar);
...@@ -622,7 +617,6 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan) ...@@ -622,7 +617,6 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan)
} else { } else {
chan->busy = false; chan->busy = false;
if (chan->next_sg == chan->desc->num_sgs) { if (chan->next_sg == chan->desc->num_sgs) {
list_del(&chan->desc->vdesc.node);
vchan_cookie_complete(&chan->desc->vdesc); vchan_cookie_complete(&chan->desc->vdesc);
chan->desc = NULL; chan->desc = NULL;
} }
...@@ -1275,6 +1269,7 @@ static int stm32_dma_probe(struct platform_device *pdev) ...@@ -1275,6 +1269,7 @@ static int stm32_dma_probe(struct platform_device *pdev)
struct dma_device *dd; struct dma_device *dd;
const struct of_device_id *match; const struct of_device_id *match;
struct resource *res; struct resource *res;
struct reset_control *rst;
int i, ret; int i, ret;
match = of_match_device(stm32_dma_of_match, &pdev->dev); match = of_match_device(stm32_dma_of_match, &pdev->dev);
...@@ -1296,8 +1291,10 @@ static int stm32_dma_probe(struct platform_device *pdev) ...@@ -1296,8 +1291,10 @@ static int stm32_dma_probe(struct platform_device *pdev)
dmadev->clk = devm_clk_get(&pdev->dev, NULL); dmadev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dmadev->clk)) { if (IS_ERR(dmadev->clk)) {
dev_err(&pdev->dev, "Error: Missing controller clock\n"); ret = PTR_ERR(dmadev->clk);
return PTR_ERR(dmadev->clk); if (ret != -EPROBE_DEFER)
dev_err(&pdev->dev, "Can't get clock\n");
return ret;
} }
ret = clk_prepare_enable(dmadev->clk); ret = clk_prepare_enable(dmadev->clk);
...@@ -1309,13 +1306,19 @@ static int stm32_dma_probe(struct platform_device *pdev) ...@@ -1309,13 +1306,19 @@ static int stm32_dma_probe(struct platform_device *pdev)
dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node, dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node,
"st,mem2mem"); "st,mem2mem");
dmadev->rst = devm_reset_control_get(&pdev->dev, NULL); rst = devm_reset_control_get(&pdev->dev, NULL);
if (!IS_ERR(dmadev->rst)) { if (IS_ERR(rst)) {
reset_control_assert(dmadev->rst); ret = PTR_ERR(rst);
if (ret == -EPROBE_DEFER)
goto clk_free;
} else {
reset_control_assert(rst);
udelay(2); udelay(2);
reset_control_deassert(dmadev->rst); reset_control_deassert(rst);
} }
dma_set_max_seg_size(&pdev->dev, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
dma_cap_set(DMA_SLAVE, dd->cap_mask); dma_cap_set(DMA_SLAVE, dd->cap_mask);
dma_cap_set(DMA_PRIVATE, dd->cap_mask); dma_cap_set(DMA_PRIVATE, dd->cap_mask);
dma_cap_set(DMA_CYCLIC, dd->cap_mask); dma_cap_set(DMA_CYCLIC, dd->cap_mask);
...@@ -1336,7 +1339,9 @@ static int stm32_dma_probe(struct platform_device *pdev) ...@@ -1336,7 +1339,9 @@ static int stm32_dma_probe(struct platform_device *pdev)
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
dd->copy_align = DMAENGINE_ALIGN_32_BYTES;
dd->max_burst = STM32_DMA_MAX_BURST; dd->max_burst = STM32_DMA_MAX_BURST;
dd->descriptor_reuse = true;
dd->dev = &pdev->dev; dd->dev = &pdev->dev;
INIT_LIST_HEAD(&dd->channels); INIT_LIST_HEAD(&dd->channels);
...@@ -1427,7 +1432,39 @@ static int stm32_dma_runtime_resume(struct device *dev) ...@@ -1427,7 +1432,39 @@ static int stm32_dma_runtime_resume(struct device *dev)
} }
#endif #endif
#ifdef CONFIG_PM_SLEEP
static int stm32_dma_suspend(struct device *dev)
{
struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
int id, ret, scr;
ret = pm_runtime_get_sync(dev);
if (ret < 0)
return ret;
for (id = 0; id < STM32_DMA_MAX_CHANNELS; id++) {
scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
if (scr & STM32_DMA_SCR_EN) {
dev_warn(dev, "Suspend is prevented by Chan %i\n", id);
return -EBUSY;
}
}
pm_runtime_put_sync(dev);
pm_runtime_force_suspend(dev);
return 0;
}
static int stm32_dma_resume(struct device *dev)
{
return pm_runtime_force_resume(dev);
}
#endif
static const struct dev_pm_ops stm32_dma_pm_ops = { static const struct dev_pm_ops stm32_dma_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(stm32_dma_suspend, stm32_dma_resume)
SET_RUNTIME_PM_OPS(stm32_dma_runtime_suspend, SET_RUNTIME_PM_OPS(stm32_dma_runtime_suspend,
stm32_dma_runtime_resume, NULL) stm32_dma_runtime_resume, NULL)
}; };
...@@ -1438,10 +1475,11 @@ static struct platform_driver stm32_dma_driver = { ...@@ -1438,10 +1475,11 @@ static struct platform_driver stm32_dma_driver = {
.of_match_table = stm32_dma_of_match, .of_match_table = stm32_dma_of_match,
.pm = &stm32_dma_pm_ops, .pm = &stm32_dma_pm_ops,
}, },
.probe = stm32_dma_probe,
}; };
static int __init stm32_dma_init(void) static int __init stm32_dma_init(void)
{ {
return platform_driver_probe(&stm32_dma_driver, stm32_dma_probe); return platform_driver_register(&stm32_dma_driver);
} }
subsys_initcall(stm32_dma_init); subsys_initcall(stm32_dma_init);
...@@ -35,12 +35,14 @@ struct stm32_dmamux { ...@@ -35,12 +35,14 @@ struct stm32_dmamux {
struct stm32_dmamux_data { struct stm32_dmamux_data {
struct dma_router dmarouter; struct dma_router dmarouter;
struct clk *clk; struct clk *clk;
struct reset_control *rst;
void __iomem *iomem; void __iomem *iomem;
u32 dma_requests; /* Number of DMA requests connected to DMAMUX */ u32 dma_requests; /* Number of DMA requests connected to DMAMUX */
u32 dmamux_requests; /* Number of DMA requests routed toward DMAs */ u32 dmamux_requests; /* Number of DMA requests routed toward DMAs */
spinlock_t lock; /* Protects register access */ spinlock_t lock; /* Protects register access */
unsigned long *dma_inuse; /* Used DMA channel */ unsigned long *dma_inuse; /* Used DMA channel */
u32 ccr[STM32_DMAMUX_MAX_DMA_REQUESTS]; /* Used to backup CCR register
* in suspend
*/
u32 dma_reqs[]; /* Number of DMA Request per DMA masters. u32 dma_reqs[]; /* Number of DMA Request per DMA masters.
* [0] holds number of DMA Masters. * [0] holds number of DMA Masters.
* To be kept at very end end of this structure * To be kept at very end end of this structure
...@@ -179,6 +181,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev) ...@@ -179,6 +181,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
struct stm32_dmamux_data *stm32_dmamux; struct stm32_dmamux_data *stm32_dmamux;
struct resource *res; struct resource *res;
void __iomem *iomem; void __iomem *iomem;
struct reset_control *rst;
int i, count, ret; int i, count, ret;
u32 dma_req; u32 dma_req;
...@@ -251,16 +254,26 @@ static int stm32_dmamux_probe(struct platform_device *pdev) ...@@ -251,16 +254,26 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
stm32_dmamux->clk = devm_clk_get(&pdev->dev, NULL); stm32_dmamux->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(stm32_dmamux->clk)) { if (IS_ERR(stm32_dmamux->clk)) {
ret = PTR_ERR(stm32_dmamux->clk); ret = PTR_ERR(stm32_dmamux->clk);
if (ret == -EPROBE_DEFER) if (ret != -EPROBE_DEFER)
dev_info(&pdev->dev, "Missing controller clock\n"); dev_err(&pdev->dev, "Missing clock controller\n");
return ret;
}
ret = clk_prepare_enable(stm32_dmamux->clk);
if (ret < 0) {
dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
return ret; return ret;
} }
stm32_dmamux->rst = devm_reset_control_get(&pdev->dev, NULL); rst = devm_reset_control_get(&pdev->dev, NULL);
if (!IS_ERR(stm32_dmamux->rst)) { if (IS_ERR(rst)) {
reset_control_assert(stm32_dmamux->rst); ret = PTR_ERR(rst);
if (ret == -EPROBE_DEFER)
goto err_clk;
} else {
reset_control_assert(rst);
udelay(2); udelay(2);
reset_control_deassert(stm32_dmamux->rst); reset_control_deassert(rst);
} }
stm32_dmamux->iomem = iomem; stm32_dmamux->iomem = iomem;
...@@ -271,14 +284,6 @@ static int stm32_dmamux_probe(struct platform_device *pdev) ...@@ -271,14 +284,6 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
pm_runtime_set_active(&pdev->dev); pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev); pm_runtime_enable(&pdev->dev);
if (!IS_ERR(stm32_dmamux->clk)) {
ret = clk_prepare_enable(stm32_dmamux->clk);
if (ret < 0) {
dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
return ret;
}
}
pm_runtime_get_noresume(&pdev->dev); pm_runtime_get_noresume(&pdev->dev);
/* Reset the dmamux */ /* Reset the dmamux */
...@@ -287,8 +292,17 @@ static int stm32_dmamux_probe(struct platform_device *pdev) ...@@ -287,8 +292,17 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
pm_runtime_put(&pdev->dev); pm_runtime_put(&pdev->dev);
return of_dma_router_register(node, stm32_dmamux_route_allocate, ret = of_dma_router_register(node, stm32_dmamux_route_allocate,
&stm32_dmamux->dmarouter); &stm32_dmamux->dmarouter);
if (ret)
goto err_clk;
return 0;
err_clk:
clk_disable_unprepare(stm32_dmamux->clk);
return ret;
} }
#ifdef CONFIG_PM #ifdef CONFIG_PM
...@@ -318,7 +332,54 @@ static int stm32_dmamux_runtime_resume(struct device *dev) ...@@ -318,7 +332,54 @@ static int stm32_dmamux_runtime_resume(struct device *dev)
} }
#endif #endif
#ifdef CONFIG_PM_SLEEP
static int stm32_dmamux_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
int i, ret;
ret = pm_runtime_get_sync(dev);
if (ret < 0)
return ret;
for (i = 0; i < stm32_dmamux->dma_requests; i++)
stm32_dmamux->ccr[i] = stm32_dmamux_read(stm32_dmamux->iomem,
STM32_DMAMUX_CCR(i));
pm_runtime_put_sync(dev);
pm_runtime_force_suspend(dev);
return 0;
}
static int stm32_dmamux_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
int i, ret;
ret = pm_runtime_force_resume(dev);
if (ret < 0)
return ret;
ret = pm_runtime_get_sync(dev);
if (ret < 0)
return ret;
for (i = 0; i < stm32_dmamux->dma_requests; i++)
stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i),
stm32_dmamux->ccr[i]);
pm_runtime_put_sync(dev);
return 0;
}
#endif
static const struct dev_pm_ops stm32_dmamux_pm_ops = { static const struct dev_pm_ops stm32_dmamux_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(stm32_dmamux_suspend, stm32_dmamux_resume)
SET_RUNTIME_PM_OPS(stm32_dmamux_runtime_suspend, SET_RUNTIME_PM_OPS(stm32_dmamux_runtime_suspend,
stm32_dmamux_runtime_resume, NULL) stm32_dmamux_runtime_resume, NULL)
}; };
......
...@@ -273,7 +273,6 @@ struct stm32_mdma_device { ...@@ -273,7 +273,6 @@ struct stm32_mdma_device {
void __iomem *base; void __iomem *base;
struct clk *clk; struct clk *clk;
int irq; int irq;
struct reset_control *rst;
u32 nr_channels; u32 nr_channels;
u32 nr_requests; u32 nr_requests;
u32 nr_ahb_addr_masks; u32 nr_ahb_addr_masks;
...@@ -1127,6 +1126,8 @@ static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan) ...@@ -1127,6 +1126,8 @@ static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan)
return; return;
} }
list_del(&vdesc->node);
chan->desc = to_stm32_mdma_desc(vdesc); chan->desc = to_stm32_mdma_desc(vdesc);
hwdesc = chan->desc->node[0].hwdesc; hwdesc = chan->desc->node[0].hwdesc;
chan->curr_hwdesc = 0; chan->curr_hwdesc = 0;
...@@ -1242,7 +1243,9 @@ static int stm32_mdma_terminate_all(struct dma_chan *c) ...@@ -1242,7 +1243,9 @@ static int stm32_mdma_terminate_all(struct dma_chan *c)
LIST_HEAD(head); LIST_HEAD(head);
spin_lock_irqsave(&chan->vchan.lock, flags); spin_lock_irqsave(&chan->vchan.lock, flags);
if (chan->busy) { if (chan->desc) {
vchan_terminate_vdesc(&chan->desc->vdesc);
if (chan->busy)
stm32_mdma_stop(chan); stm32_mdma_stop(chan);
chan->desc = NULL; chan->desc = NULL;
} }
...@@ -1331,7 +1334,6 @@ static enum dma_status stm32_mdma_tx_status(struct dma_chan *c, ...@@ -1331,7 +1334,6 @@ static enum dma_status stm32_mdma_tx_status(struct dma_chan *c,
static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan) static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan)
{ {
list_del(&chan->desc->vdesc.node);
vchan_cookie_complete(&chan->desc->vdesc); vchan_cookie_complete(&chan->desc->vdesc);
chan->desc = NULL; chan->desc = NULL;
chan->busy = false; chan->busy = false;
...@@ -1532,6 +1534,7 @@ static int stm32_mdma_probe(struct platform_device *pdev) ...@@ -1532,6 +1534,7 @@ static int stm32_mdma_probe(struct platform_device *pdev)
struct dma_device *dd; struct dma_device *dd;
struct device_node *of_node; struct device_node *of_node;
struct resource *res; struct resource *res;
struct reset_control *rst;
u32 nr_channels, nr_requests; u32 nr_channels, nr_requests;
int i, count, ret; int i, count, ret;
...@@ -1579,8 +1582,8 @@ static int stm32_mdma_probe(struct platform_device *pdev) ...@@ -1579,8 +1582,8 @@ static int stm32_mdma_probe(struct platform_device *pdev)
dmadev->clk = devm_clk_get(&pdev->dev, NULL); dmadev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dmadev->clk)) { if (IS_ERR(dmadev->clk)) {
ret = PTR_ERR(dmadev->clk); ret = PTR_ERR(dmadev->clk);
if (ret == -EPROBE_DEFER) if (ret != -EPROBE_DEFER)
dev_info(&pdev->dev, "Missing controller clock\n"); dev_err(&pdev->dev, "Missing clock controller\n");
return ret; return ret;
} }
...@@ -1590,11 +1593,15 @@ static int stm32_mdma_probe(struct platform_device *pdev) ...@@ -1590,11 +1593,15 @@ static int stm32_mdma_probe(struct platform_device *pdev)
return ret; return ret;
} }
dmadev->rst = devm_reset_control_get(&pdev->dev, NULL); rst = devm_reset_control_get(&pdev->dev, NULL);
if (!IS_ERR(dmadev->rst)) { if (IS_ERR(rst)) {
reset_control_assert(dmadev->rst); ret = PTR_ERR(rst);
if (ret == -EPROBE_DEFER)
goto err_clk;
} else {
reset_control_assert(rst);
udelay(2); udelay(2);
reset_control_deassert(dmadev->rst); reset_control_deassert(rst);
} }
dd = &dmadev->ddev; dd = &dmadev->ddev;
...@@ -1614,6 +1621,8 @@ static int stm32_mdma_probe(struct platform_device *pdev) ...@@ -1614,6 +1621,8 @@ static int stm32_mdma_probe(struct platform_device *pdev)
dd->device_resume = stm32_mdma_resume; dd->device_resume = stm32_mdma_resume;
dd->device_terminate_all = stm32_mdma_terminate_all; dd->device_terminate_all = stm32_mdma_terminate_all;
dd->device_synchronize = stm32_mdma_synchronize; dd->device_synchronize = stm32_mdma_synchronize;
dd->descriptor_reuse = true;
dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
...@@ -1637,25 +1646,27 @@ static int stm32_mdma_probe(struct platform_device *pdev) ...@@ -1637,25 +1646,27 @@ static int stm32_mdma_probe(struct platform_device *pdev)
} }
dmadev->irq = platform_get_irq(pdev, 0); dmadev->irq = platform_get_irq(pdev, 0);
if (dmadev->irq < 0) if (dmadev->irq < 0) {
return dmadev->irq; ret = dmadev->irq;
goto err_clk;
}
ret = devm_request_irq(&pdev->dev, dmadev->irq, stm32_mdma_irq_handler, ret = devm_request_irq(&pdev->dev, dmadev->irq, stm32_mdma_irq_handler,
0, dev_name(&pdev->dev), dmadev); 0, dev_name(&pdev->dev), dmadev);
if (ret) { if (ret) {
dev_err(&pdev->dev, "failed to request IRQ\n"); dev_err(&pdev->dev, "failed to request IRQ\n");
return ret; goto err_clk;
} }
ret = dmaenginem_async_device_register(dd); ret = dmaenginem_async_device_register(dd);
if (ret) if (ret)
return ret; goto err_clk;
ret = of_dma_controller_register(of_node, stm32_mdma_of_xlate, dmadev); ret = of_dma_controller_register(of_node, stm32_mdma_of_xlate, dmadev);
if (ret < 0) { if (ret < 0) {
dev_err(&pdev->dev, dev_err(&pdev->dev,
"STM32 MDMA DMA OF registration failed %d\n", ret); "STM32 MDMA DMA OF registration failed %d\n", ret);
goto err_unregister; goto err_clk;
} }
platform_set_drvdata(pdev, dmadev); platform_set_drvdata(pdev, dmadev);
...@@ -1668,7 +1679,9 @@ static int stm32_mdma_probe(struct platform_device *pdev) ...@@ -1668,7 +1679,9 @@ static int stm32_mdma_probe(struct platform_device *pdev)
return 0; return 0;
err_unregister: err_clk:
clk_disable_unprepare(dmadev->clk);
return ret; return ret;
} }
...@@ -1697,7 +1710,40 @@ static int stm32_mdma_runtime_resume(struct device *dev) ...@@ -1697,7 +1710,40 @@ static int stm32_mdma_runtime_resume(struct device *dev)
} }
#endif #endif
#ifdef CONFIG_PM_SLEEP
static int stm32_mdma_pm_suspend(struct device *dev)
{
struct stm32_mdma_device *dmadev = dev_get_drvdata(dev);
u32 ccr, id;
int ret;
ret = pm_runtime_get_sync(dev);
if (ret < 0)
return ret;
for (id = 0; id < dmadev->nr_channels; id++) {
ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(id));
if (ccr & STM32_MDMA_CCR_EN) {
dev_warn(dev, "Suspend is prevented by Chan %i\n", id);
return -EBUSY;
}
}
pm_runtime_put_sync(dev);
pm_runtime_force_suspend(dev);
return 0;
}
static int stm32_mdma_pm_resume(struct device *dev)
{
return pm_runtime_force_resume(dev);
}
#endif
static const struct dev_pm_ops stm32_mdma_pm_ops = { static const struct dev_pm_ops stm32_mdma_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(stm32_mdma_pm_suspend, stm32_mdma_pm_resume)
SET_RUNTIME_PM_OPS(stm32_mdma_runtime_suspend, SET_RUNTIME_PM_OPS(stm32_mdma_runtime_suspend,
stm32_mdma_runtime_resume, NULL) stm32_mdma_runtime_resume, NULL)
}; };
......
...@@ -697,11 +697,13 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len, ...@@ -697,11 +697,13 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
dest = sconfig->dst_addr; dest = sconfig->dst_addr;
endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) | endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) | SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) |
SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type); SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type) |
SUN4I_DMA_CFG_SRC_ADDR_MODE(linear_mode);
} else { } else {
src = sconfig->src_addr; src = sconfig->src_addr;
dest = buf; dest = buf;
endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) | endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) |
SUN4I_DMA_CFG_DST_ADDR_MODE(linear_mode) |
SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) | SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode); SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode);
} }
......
This diff is collapsed.
...@@ -164,7 +164,7 @@ struct tegra_adma { ...@@ -164,7 +164,7 @@ struct tegra_adma {
const struct tegra_adma_chip_data *cdata; const struct tegra_adma_chip_data *cdata;
/* Last member of the structure */ /* Last member of the structure */
struct tegra_adma_chan channels[0]; struct tegra_adma_chan channels[];
}; };
static inline void tdma_write(struct tegra_adma *tdma, u32 reg, u32 val) static inline void tdma_write(struct tegra_adma *tdma, u32 reg, u32 val)
......
...@@ -133,7 +133,6 @@ static int ti_am335x_xbar_probe(struct platform_device *pdev) ...@@ -133,7 +133,6 @@ static int ti_am335x_xbar_probe(struct platform_device *pdev)
const struct of_device_id *match; const struct of_device_id *match;
struct device_node *dma_node; struct device_node *dma_node;
struct ti_am335x_xbar_data *xbar; struct ti_am335x_xbar_data *xbar;
struct resource *res;
void __iomem *iomem; void __iomem *iomem;
int i, ret; int i, ret;
...@@ -173,8 +172,7 @@ static int ti_am335x_xbar_probe(struct platform_device *pdev) ...@@ -173,8 +172,7 @@ static int ti_am335x_xbar_probe(struct platform_device *pdev)
xbar->xbar_events = TI_AM335X_XBAR_LINES; xbar->xbar_events = TI_AM335X_XBAR_LINES;
} }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); iomem = devm_platform_ioremap_resource(pdev, 0);
iomem = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(iomem)) if (IS_ERR(iomem))
return PTR_ERR(iomem); return PTR_ERR(iomem);
...@@ -323,7 +321,6 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev) ...@@ -323,7 +321,6 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
struct device_node *dma_node; struct device_node *dma_node;
struct ti_dra7_xbar_data *xbar; struct ti_dra7_xbar_data *xbar;
struct property *prop; struct property *prop;
struct resource *res;
u32 safe_val; u32 safe_val;
int sz; int sz;
void __iomem *iomem; void __iomem *iomem;
...@@ -403,8 +400,7 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev) ...@@ -403,8 +400,7 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
kfree(rsv_events); kfree(rsv_events);
} }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); iomem = devm_platform_ioremap_resource(pdev, 0);
iomem = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(iomem)) if (IS_ERR(iomem))
return PTR_ERR(iomem); return PTR_ERR(iomem);
......
...@@ -1275,6 +1275,81 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy( ...@@ -1275,6 +1275,81 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
} }
static struct dma_async_tx_descriptor *
edma_prep_dma_interleaved(struct dma_chan *chan,
struct dma_interleaved_template *xt,
unsigned long tx_flags)
{
struct device *dev = chan->device->dev;
struct edma_chan *echan = to_edma_chan(chan);
struct edmacc_param *param;
struct edma_desc *edesc;
size_t src_icg, dst_icg;
int src_bidx, dst_bidx;
/* Slave mode is not supported */
if (is_slave_direction(xt->dir))
return NULL;
if (xt->frame_size != 1 || xt->numf == 0)
return NULL;
if (xt->sgl[0].size > SZ_64K || xt->numf > SZ_64K)
return NULL;
src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
if (src_icg) {
src_bidx = src_icg + xt->sgl[0].size;
} else if (xt->src_inc) {
src_bidx = xt->sgl[0].size;
} else {
dev_err(dev, "%s: SRC constant addressing is not supported\n",
__func__);
return NULL;
}
dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
if (dst_icg) {
dst_bidx = dst_icg + xt->sgl[0].size;
} else if (xt->dst_inc) {
dst_bidx = xt->sgl[0].size;
} else {
dev_err(dev, "%s: DST constant addressing is not supported\n",
__func__);
return NULL;
}
if (src_bidx > SZ_64K || dst_bidx > SZ_64K)
return NULL;
edesc = kzalloc(struct_size(edesc, pset, 1), GFP_ATOMIC);
if (!edesc)
return NULL;
edesc->direction = DMA_MEM_TO_MEM;
edesc->echan = echan;
edesc->pset_nr = 1;
param = &edesc->pset[0].param;
param->src = xt->src_start;
param->dst = xt->dst_start;
param->a_b_cnt = xt->numf << 16 | xt->sgl[0].size;
param->ccnt = 1;
param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
param->src_dst_cidx = 0;
param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
param->opt |= ITCCHEN;
/* Enable transfer complete interrupt if requested */
if (tx_flags & DMA_PREP_INTERRUPT)
param->opt |= TCINTEN;
else
edesc->polled = true;
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
}
static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction, size_t period_len, enum dma_transfer_direction direction,
...@@ -1917,7 +1992,9 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode) ...@@ -1917,7 +1992,9 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
"Legacy memcpy is enabled, things might not work\n"); "Legacy memcpy is enabled, things might not work\n");
dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask); dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask);
dma_cap_set(DMA_INTERLEAVE, s_ddev->cap_mask);
s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy; s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
s_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved;
s_ddev->directions = BIT(DMA_MEM_TO_MEM); s_ddev->directions = BIT(DMA_MEM_TO_MEM);
} }
...@@ -1953,8 +2030,10 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode) ...@@ -1953,8 +2030,10 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
dma_cap_zero(m_ddev->cap_mask); dma_cap_zero(m_ddev->cap_mask);
dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask); dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask);
dma_cap_set(DMA_INTERLEAVE, m_ddev->cap_mask);
m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy; m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
m_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved;
m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources; m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
m_ddev->device_free_chan_resources = edma_free_chan_resources; m_ddev->device_free_chan_resources = edma_free_chan_resources;
m_ddev->device_issue_pending = edma_issue_pending; m_ddev->device_issue_pending = edma_issue_pending;
......
...@@ -32,6 +32,7 @@ struct k3_udma_glue_common { ...@@ -32,6 +32,7 @@ struct k3_udma_glue_common {
bool epib; bool epib;
u32 psdata_size; u32 psdata_size;
u32 swdata_size; u32 swdata_size;
u32 atype;
}; };
struct k3_udma_glue_tx_channel { struct k3_udma_glue_tx_channel {
...@@ -121,6 +122,15 @@ static int of_k3_udma_glue_parse_chn(struct device_node *chn_np, ...@@ -121,6 +122,15 @@ static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
return -ENOENT; return -ENOENT;
thread_id = dma_spec.args[0]; thread_id = dma_spec.args[0];
if (dma_spec.args_count == 2) {
if (dma_spec.args[1] > 2) {
dev_err(common->dev, "Invalid channel atype: %u\n",
dma_spec.args[1]);
ret = -EINVAL;
goto out_put_spec;
}
common->atype = dma_spec.args[1];
}
if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) { if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
ret = -EINVAL; ret = -EINVAL;
...@@ -202,7 +212,8 @@ static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) ...@@ -202,7 +212,8 @@ static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID; TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
req.nav_id = tisci_rm->tisci_dev_id; req.nav_id = tisci_rm->tisci_dev_id;
req.index = tx_chn->udma_tchan_id; req.index = tx_chn->udma_tchan_id;
if (tx_chn->tx_pause_on_err) if (tx_chn->tx_pause_on_err)
...@@ -216,6 +227,7 @@ static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) ...@@ -216,6 +227,7 @@ static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
req.tx_supr_tdpkt = 1; req.tx_supr_tdpkt = 1;
req.tx_fetch_size = tx_chn->common.hdesc_size >> 2; req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq); req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
req.tx_atype = tx_chn->common.atype;
return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req); return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
} }
...@@ -502,7 +514,8 @@ static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) ...@@ -502,7 +514,8 @@ static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID; TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
req.nav_id = tisci_rm->tisci_dev_id; req.nav_id = tisci_rm->tisci_dev_id;
req.index = rx_chn->udma_rchan_id; req.index = rx_chn->udma_rchan_id;
...@@ -519,6 +532,7 @@ static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) ...@@ -519,6 +532,7 @@ static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
req.flowid_cnt = rx_chn->flow_num; req.flowid_cnt = rx_chn->flow_num;
} }
req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
req.rx_atype = rx_chn->common.atype;
ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req); ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
if (ret) if (ret)
......
...@@ -149,6 +149,7 @@ struct udma_dev { ...@@ -149,6 +149,7 @@ struct udma_dev {
struct udma_chan *channels; struct udma_chan *channels;
u32 psil_base; u32 psil_base;
u32 atype;
}; };
struct udma_desc { struct udma_desc {
...@@ -192,6 +193,7 @@ struct udma_chan_config { ...@@ -192,6 +193,7 @@ struct udma_chan_config {
u32 hdesc_size; /* Size of a packet descriptor in packet mode */ u32 hdesc_size; /* Size of a packet descriptor in packet mode */
bool notdpkt; /* Suppress sending TDC packet */ bool notdpkt; /* Suppress sending TDC packet */
int remote_thread_id; int remote_thread_id;
u32 atype;
u32 src_thread; u32 src_thread;
u32 dst_thread; u32 dst_thread;
enum psil_endpoint_type ep_type; enum psil_endpoint_type ep_type;
...@@ -1569,7 +1571,8 @@ static int udma_alloc_rx_resources(struct udma_chan *uc) ...@@ -1569,7 +1571,8 @@ static int udma_alloc_rx_resources(struct udma_chan *uc)
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID) TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
#define TISCI_RCHAN_VALID_PARAMS ( \ #define TISCI_RCHAN_VALID_PARAMS ( \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
...@@ -1579,7 +1582,8 @@ static int udma_alloc_rx_resources(struct udma_chan *uc) ...@@ -1579,7 +1582,8 @@ static int udma_alloc_rx_resources(struct udma_chan *uc)
TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID) TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
static int udma_tisci_m2m_channel_config(struct udma_chan *uc) static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
{ {
...@@ -1601,6 +1605,7 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc) ...@@ -1601,6 +1605,7 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
req_tx.txcq_qnum = tc_ring; req_tx.txcq_qnum = tc_ring;
req_tx.tx_atype = ud->atype;
ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
if (ret) { if (ret) {
...@@ -1614,6 +1619,7 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc) ...@@ -1614,6 +1619,7 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
req_rx.rxcq_qnum = tc_ring; req_rx.rxcq_qnum = tc_ring;
req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
req_rx.rx_atype = ud->atype;
ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
if (ret) if (ret)
...@@ -1649,6 +1655,7 @@ static int udma_tisci_tx_channel_config(struct udma_chan *uc) ...@@ -1649,6 +1655,7 @@ static int udma_tisci_tx_channel_config(struct udma_chan *uc)
req_tx.tx_supr_tdpkt = uc->config.notdpkt; req_tx.tx_supr_tdpkt = uc->config.notdpkt;
req_tx.tx_fetch_size = fetch_size >> 2; req_tx.tx_fetch_size = fetch_size >> 2;
req_tx.txcq_qnum = tc_ring; req_tx.txcq_qnum = tc_ring;
req_tx.tx_atype = uc->config.atype;
ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
if (ret) if (ret)
...@@ -1685,6 +1692,7 @@ static int udma_tisci_rx_channel_config(struct udma_chan *uc) ...@@ -1685,6 +1692,7 @@ static int udma_tisci_rx_channel_config(struct udma_chan *uc)
req_rx.rx_fetch_size = fetch_size >> 2; req_rx.rx_fetch_size = fetch_size >> 2;
req_rx.rxcq_qnum = rx_ring; req_rx.rxcq_qnum = rx_ring;
req_rx.rx_chan_type = mode; req_rx.rx_chan_type = mode;
req_rx.rx_atype = uc->config.atype;
ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
if (ret) { if (ret) {
...@@ -3063,13 +3071,18 @@ static void udma_free_chan_resources(struct dma_chan *chan) ...@@ -3063,13 +3071,18 @@ static void udma_free_chan_resources(struct dma_chan *chan)
static struct platform_driver udma_driver; static struct platform_driver udma_driver;
struct udma_filter_param {
int remote_thread_id;
u32 atype;
};
static bool udma_dma_filter_fn(struct dma_chan *chan, void *param) static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
{ {
struct udma_chan_config *ucc; struct udma_chan_config *ucc;
struct psil_endpoint_config *ep_config; struct psil_endpoint_config *ep_config;
struct udma_filter_param *filter_param;
struct udma_chan *uc; struct udma_chan *uc;
struct udma_dev *ud; struct udma_dev *ud;
u32 *args;
if (chan->device->dev->driver != &udma_driver.driver) if (chan->device->dev->driver != &udma_driver.driver)
return false; return false;
...@@ -3077,9 +3090,16 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param) ...@@ -3077,9 +3090,16 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
uc = to_udma_chan(chan); uc = to_udma_chan(chan);
ucc = &uc->config; ucc = &uc->config;
ud = uc->ud; ud = uc->ud;
args = param; filter_param = param;
if (filter_param->atype > 2) {
dev_err(ud->dev, "Invalid channel atype: %u\n",
filter_param->atype);
return false;
}
ucc->remote_thread_id = args[0]; ucc->remote_thread_id = filter_param->remote_thread_id;
ucc->atype = filter_param->atype;
if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
ucc->dir = DMA_MEM_TO_DEV; ucc->dir = DMA_MEM_TO_DEV;
...@@ -3092,6 +3112,7 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param) ...@@ -3092,6 +3112,7 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
ucc->remote_thread_id); ucc->remote_thread_id);
ucc->dir = DMA_MEM_TO_MEM; ucc->dir = DMA_MEM_TO_MEM;
ucc->remote_thread_id = -1; ucc->remote_thread_id = -1;
ucc->atype = 0;
return false; return false;
} }
...@@ -3130,13 +3151,20 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec, ...@@ -3130,13 +3151,20 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
{ {
struct udma_dev *ud = ofdma->of_dma_data; struct udma_dev *ud = ofdma->of_dma_data;
dma_cap_mask_t mask = ud->ddev.cap_mask; dma_cap_mask_t mask = ud->ddev.cap_mask;
struct udma_filter_param filter_param;
struct dma_chan *chan; struct dma_chan *chan;
if (dma_spec->args_count != 1) if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
return NULL; return NULL;
chan = __dma_request_channel(&mask, udma_dma_filter_fn, filter_param.remote_thread_id = dma_spec->args[0];
&dma_spec->args[0], ofdma->of_node); if (dma_spec->args_count == 2)
filter_param.atype = dma_spec->args[1];
else
filter_param.atype = 0;
chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
ofdma->of_node);
if (!chan) { if (!chan) {
dev_err(ud->dev, "get channel fail in %s.\n", __func__); dev_err(ud->dev, "get channel fail in %s.\n", __func__);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
...@@ -3473,6 +3501,66 @@ static int udma_setup_rx_flush(struct udma_dev *ud) ...@@ -3473,6 +3501,66 @@ static int udma_setup_rx_flush(struct udma_dev *ud)
return 0; return 0;
} }
#ifdef CONFIG_DEBUG_FS
static void udma_dbg_summary_show_chan(struct seq_file *s,
struct dma_chan *chan)
{
struct udma_chan *uc = to_udma_chan(chan);
struct udma_chan_config *ucc = &uc->config;
seq_printf(s, " %-13s| %s", dma_chan_name(chan),
chan->dbg_client_name ?: "in-use");
seq_printf(s, " (%s, ", dmaengine_get_direction_text(uc->config.dir));
switch (uc->config.dir) {
case DMA_MEM_TO_MEM:
seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id,
ucc->src_thread, ucc->dst_thread);
break;
case DMA_DEV_TO_MEM:
seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id,
ucc->src_thread, ucc->dst_thread);
break;
case DMA_MEM_TO_DEV:
seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id,
ucc->src_thread, ucc->dst_thread);
break;
default:
seq_printf(s, ")\n");
return;
}
if (ucc->ep_type == PSIL_EP_NATIVE) {
seq_printf(s, "PSI-L Native");
if (ucc->metadata_size) {
seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : "");
if (ucc->psd_size)
seq_printf(s, " PSDsize:%u", ucc->psd_size);
seq_printf(s, " ]");
}
} else {
seq_printf(s, "PDMA");
if (ucc->enable_acc32 || ucc->enable_burst)
seq_printf(s, "[%s%s ]",
ucc->enable_acc32 ? " ACC32" : "",
ucc->enable_burst ? " BURST" : "");
}
seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode");
}
static void udma_dbg_summary_show(struct seq_file *s,
struct dma_device *dma_dev)
{
struct dma_chan *chan;
list_for_each_entry(chan, &dma_dev->channels, device_node) {
if (chan->client_count)
udma_dbg_summary_show_chan(s, chan);
}
}
#endif /* CONFIG_DEBUG_FS */
#define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ #define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
...@@ -3519,6 +3607,12 @@ static int udma_probe(struct platform_device *pdev) ...@@ -3519,6 +3607,12 @@ static int udma_probe(struct platform_device *pdev)
return ret; return ret;
} }
ret = of_property_read_u32(navss_node, "ti,udma-atype", &ud->atype);
if (!ret && ud->atype > 2) {
dev_err(dev, "Invalid atype: %u\n", ud->atype);
return -EINVAL;
}
ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops; ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops; ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
...@@ -3553,6 +3647,9 @@ static int udma_probe(struct platform_device *pdev) ...@@ -3553,6 +3647,9 @@ static int udma_probe(struct platform_device *pdev)
ud->ddev.device_resume = udma_resume; ud->ddev.device_resume = udma_resume;
ud->ddev.device_terminate_all = udma_terminate_all; ud->ddev.device_terminate_all = udma_terminate_all;
ud->ddev.device_synchronize = udma_synchronize; ud->ddev.device_synchronize = udma_synchronize;
#ifdef CONFIG_DEBUG_FS
ud->ddev.dbg_summary_show = udma_dbg_summary_show;
#endif
ud->ddev.device_free_chan_resources = udma_free_chan_resources; ud->ddev.device_free_chan_resources = udma_free_chan_resources;
ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS; ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
......
...@@ -124,7 +124,7 @@ struct omap_desc { ...@@ -124,7 +124,7 @@ struct omap_desc {
uint32_t csdp; /* CSDP value */ uint32_t csdp; /* CSDP value */
unsigned sglen; unsigned sglen;
struct omap_sg sg[0]; struct omap_sg sg[];
}; };
enum { enum {
......
...@@ -68,7 +68,7 @@ struct uniphier_mdmac_device { ...@@ -68,7 +68,7 @@ struct uniphier_mdmac_device {
struct dma_device ddev; struct dma_device ddev;
struct clk *clk; struct clk *clk;
void __iomem *reg_base; void __iomem *reg_base;
struct uniphier_mdmac_chan channels[0]; struct uniphier_mdmac_chan channels[];
}; };
static struct uniphier_mdmac_chan * static struct uniphier_mdmac_chan *
......
This diff is collapsed.
...@@ -125,7 +125,9 @@ ...@@ -125,7 +125,9 @@
#define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0) #define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
/* HW specific definitions */ /* HW specific definitions */
#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20 #define XILINX_MCDMA_MAX_CHANS_PER_DEVICE 0x20
#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
#define XILINX_CDMA_MAX_CHANS_PER_DEVICE 0x1
#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \ #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
(XILINX_DMA_DMASR_FRM_CNT_IRQ | \ (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
...@@ -468,6 +470,7 @@ struct xilinx_dma_config { ...@@ -468,6 +470,7 @@ struct xilinx_dma_config {
struct clk **tx_clk, struct clk **txs_clk, struct clk **tx_clk, struct clk **txs_clk,
struct clk **rx_clk, struct clk **rxs_clk); struct clk **rx_clk, struct clk **rxs_clk);
irqreturn_t (*irq_handler)(int irq, void *data); irqreturn_t (*irq_handler)(int irq, void *data);
const int max_channels;
}; };
/** /**
...@@ -485,16 +488,15 @@ struct xilinx_dma_config { ...@@ -485,16 +488,15 @@ struct xilinx_dma_config {
* @txs_clk: DMA mm2s stream clock * @txs_clk: DMA mm2s stream clock
* @rx_clk: DMA s2mm clock * @rx_clk: DMA s2mm clock
* @rxs_clk: DMA s2mm stream clock * @rxs_clk: DMA s2mm stream clock
* @nr_channels: Number of channels DMA device supports * @s2mm_chan_id: DMA s2mm channel identifier
* @chan_id: DMA channel identifier * @mm2s_chan_id: DMA mm2s channel identifier
* @max_buffer_len: Max buffer length * @max_buffer_len: Max buffer length
* @s2mm_index: S2MM channel index
*/ */
struct xilinx_dma_device { struct xilinx_dma_device {
void __iomem *regs; void __iomem *regs;
struct device *dev; struct device *dev;
struct dma_device common; struct dma_device common;
struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; struct xilinx_dma_chan *chan[XILINX_MCDMA_MAX_CHANS_PER_DEVICE];
u32 flush_on_fsync; u32 flush_on_fsync;
bool ext_addr; bool ext_addr;
struct platform_device *pdev; struct platform_device *pdev;
...@@ -504,10 +506,9 @@ struct xilinx_dma_device { ...@@ -504,10 +506,9 @@ struct xilinx_dma_device {
struct clk *txs_clk; struct clk *txs_clk;
struct clk *rx_clk; struct clk *rx_clk;
struct clk *rxs_clk; struct clk *rxs_clk;
u32 nr_channels; u32 s2mm_chan_id;
u32 chan_id; u32 mm2s_chan_id;
u32 max_buffer_len; u32 max_buffer_len;
u32 s2mm_index;
}; };
/* Macros */ /* Macros */
...@@ -1745,7 +1746,7 @@ static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data) ...@@ -1745,7 +1746,7 @@ static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data)
return IRQ_NONE; return IRQ_NONE;
if (chan->direction == DMA_DEV_TO_MEM) if (chan->direction == DMA_DEV_TO_MEM)
chan_offset = chan->xdev->s2mm_index; chan_offset = chan->xdev->dma_config->max_channels / 2;
chan_offset = chan_offset + (chan_id - 1); chan_offset = chan_offset + (chan_id - 1);
chan = chan->xdev->chan[chan_offset]; chan = chan->xdev->chan[chan_offset];
...@@ -2404,16 +2405,17 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan) ...@@ -2404,16 +2405,17 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
u32 reg; u32 reg;
int err; int err;
if (chan->cyclic) if (!chan->cyclic) {
xilinx_dma_chan_reset(chan);
err = chan->stop_transfer(chan); err = chan->stop_transfer(chan);
if (err) { if (err) {
dev_err(chan->dev, "Cannot stop channel %p: %x\n", dev_err(chan->dev, "Cannot stop channel %p: %x\n",
chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); chan, dma_ctrl_read(chan,
XILINX_DMA_REG_DMASR));
chan->err = true; chan->err = true;
} }
}
xilinx_dma_chan_reset(chan);
/* Remove and free all of the descriptors in the lists */ /* Remove and free all of the descriptors in the lists */
xilinx_dma_free_descriptors(chan); xilinx_dma_free_descriptors(chan);
chan->idle = true; chan->idle = true;
...@@ -2730,12 +2732,11 @@ static void xdma_disable_allclks(struct xilinx_dma_device *xdev) ...@@ -2730,12 +2732,11 @@ static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
* *
* @xdev: Driver specific device structure * @xdev: Driver specific device structure
* @node: Device node * @node: Device node
* @chan_id: DMA Channel id
* *
* Return: '0' on success and failure value on error * Return: '0' on success and failure value on error
*/ */
static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
struct device_node *node, int chan_id) struct device_node *node)
{ {
struct xilinx_dma_chan *chan; struct xilinx_dma_chan *chan;
bool has_dre = false; bool has_dre = false;
...@@ -2787,8 +2788,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, ...@@ -2787,8 +2788,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") || of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
of_device_is_compatible(node, "xlnx,axi-cdma-channel")) { of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
chan->direction = DMA_MEM_TO_DEV; chan->direction = DMA_MEM_TO_DEV;
chan->id = chan_id; chan->id = xdev->mm2s_chan_id++;
chan->tdest = chan_id; chan->tdest = chan->id;
chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
...@@ -2804,9 +2805,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, ...@@ -2804,9 +2805,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
of_device_is_compatible(node, of_device_is_compatible(node,
"xlnx,axi-dma-s2mm-channel")) { "xlnx,axi-dma-s2mm-channel")) {
chan->direction = DMA_DEV_TO_MEM; chan->direction = DMA_DEV_TO_MEM;
chan->id = chan_id; chan->id = xdev->s2mm_chan_id++;
xdev->s2mm_index = xdev->nr_channels; chan->tdest = chan->id - xdev->dma_config->max_channels / 2;
chan->tdest = chan_id - xdev->nr_channels;
chan->has_vflip = of_property_read_bool(node, chan->has_vflip = of_property_read_bool(node,
"xlnx,enable-vert-flip"); "xlnx,enable-vert-flip");
if (chan->has_vflip) { if (chan->has_vflip) {
...@@ -2908,9 +2908,7 @@ static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, ...@@ -2908,9 +2908,7 @@ static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
dev_warn(xdev->dev, "missing dma-channels property\n"); dev_warn(xdev->dev, "missing dma-channels property\n");
for (i = 0; i < nr_channels; i++) for (i = 0; i < nr_channels; i++)
xilinx_dma_chan_probe(xdev, node, xdev->chan_id++); xilinx_dma_chan_probe(xdev, node);
xdev->nr_channels += nr_channels;
return 0; return 0;
} }
...@@ -2928,7 +2926,7 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, ...@@ -2928,7 +2926,7 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
struct xilinx_dma_device *xdev = ofdma->of_dma_data; struct xilinx_dma_device *xdev = ofdma->of_dma_data;
int chan_id = dma_spec->args[0]; int chan_id = dma_spec->args[0];
if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id]) if (chan_id >= xdev->dma_config->max_channels || !xdev->chan[chan_id])
return NULL; return NULL;
return dma_get_slave_channel(&xdev->chan[chan_id]->common); return dma_get_slave_channel(&xdev->chan[chan_id]->common);
...@@ -2938,23 +2936,27 @@ static const struct xilinx_dma_config axidma_config = { ...@@ -2938,23 +2936,27 @@ static const struct xilinx_dma_config axidma_config = {
.dmatype = XDMA_TYPE_AXIDMA, .dmatype = XDMA_TYPE_AXIDMA,
.clk_init = axidma_clk_init, .clk_init = axidma_clk_init,
.irq_handler = xilinx_dma_irq_handler, .irq_handler = xilinx_dma_irq_handler,
.max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
}; };
static const struct xilinx_dma_config aximcdma_config = { static const struct xilinx_dma_config aximcdma_config = {
.dmatype = XDMA_TYPE_AXIMCDMA, .dmatype = XDMA_TYPE_AXIMCDMA,
.clk_init = axidma_clk_init, .clk_init = axidma_clk_init,
.irq_handler = xilinx_mcdma_irq_handler, .irq_handler = xilinx_mcdma_irq_handler,
.max_channels = XILINX_MCDMA_MAX_CHANS_PER_DEVICE,
}; };
static const struct xilinx_dma_config axicdma_config = { static const struct xilinx_dma_config axicdma_config = {
.dmatype = XDMA_TYPE_CDMA, .dmatype = XDMA_TYPE_CDMA,
.clk_init = axicdma_clk_init, .clk_init = axicdma_clk_init,
.irq_handler = xilinx_dma_irq_handler, .irq_handler = xilinx_dma_irq_handler,
.max_channels = XILINX_CDMA_MAX_CHANS_PER_DEVICE,
}; };
static const struct xilinx_dma_config axivdma_config = { static const struct xilinx_dma_config axivdma_config = {
.dmatype = XDMA_TYPE_VDMA, .dmatype = XDMA_TYPE_VDMA,
.clk_init = axivdma_clk_init, .clk_init = axivdma_clk_init,
.irq_handler = xilinx_dma_irq_handler, .irq_handler = xilinx_dma_irq_handler,
.max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
}; };
static const struct of_device_id xilinx_dma_of_ids[] = { static const struct of_device_id xilinx_dma_of_ids[] = {
...@@ -3011,6 +3013,7 @@ static int xilinx_dma_probe(struct platform_device *pdev) ...@@ -3011,6 +3013,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
/* Retrieve the DMA engine properties from the device tree */ /* Retrieve the DMA engine properties from the device tree */
xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0); xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2;
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA || if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA ||
xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) { xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
...@@ -3104,7 +3107,7 @@ static int xilinx_dma_probe(struct platform_device *pdev) ...@@ -3104,7 +3107,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
} }
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
for (i = 0; i < xdev->nr_channels; i++) for (i = 0; i < xdev->dma_config->max_channels; i++)
if (xdev->chan[i]) if (xdev->chan[i])
xdev->chan[i]->num_frms = num_frames; xdev->chan[i]->num_frms = num_frames;
} }
...@@ -3134,7 +3137,7 @@ static int xilinx_dma_probe(struct platform_device *pdev) ...@@ -3134,7 +3137,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
disable_clks: disable_clks:
xdma_disable_allclks(xdev); xdma_disable_allclks(xdev);
error: error:
for (i = 0; i < xdev->nr_channels; i++) for (i = 0; i < xdev->dma_config->max_channels; i++)
if (xdev->chan[i]) if (xdev->chan[i])
xilinx_dma_chan_remove(xdev->chan[i]); xilinx_dma_chan_remove(xdev->chan[i]);
...@@ -3156,7 +3159,7 @@ static int xilinx_dma_remove(struct platform_device *pdev) ...@@ -3156,7 +3159,7 @@ static int xilinx_dma_remove(struct platform_device *pdev)
dma_async_device_unregister(&xdev->common); dma_async_device_unregister(&xdev->common);
for (i = 0; i < xdev->nr_channels; i++) for (i = 0; i < xdev->dma_config->max_channels; i++)
if (xdev->chan[i]) if (xdev->chan[i])
xilinx_dma_chan_remove(xdev->chan[i]); xilinx_dma_chan_remove(xdev->chan[i]);
......
...@@ -300,6 +300,8 @@ struct dma_router { ...@@ -300,6 +300,8 @@ struct dma_router {
* @chan_id: channel ID for sysfs * @chan_id: channel ID for sysfs
* @dev: class device for sysfs * @dev: class device for sysfs
* @name: backlink name for sysfs * @name: backlink name for sysfs
* @dbg_client_name: slave name for debugfs in format:
* dev_name(requester's dev):channel name, for example: "2b00000.mcasp:tx"
* @device_node: used to add this to the device chan list * @device_node: used to add this to the device chan list
* @local: per-cpu pointer to a struct dma_chan_percpu * @local: per-cpu pointer to a struct dma_chan_percpu
* @client_count: how many clients are using this channel * @client_count: how many clients are using this channel
...@@ -318,6 +320,9 @@ struct dma_chan { ...@@ -318,6 +320,9 @@ struct dma_chan {
int chan_id; int chan_id;
struct dma_chan_dev *dev; struct dma_chan_dev *dev;
const char *name; const char *name;
#ifdef CONFIG_DEBUG_FS
char *dbg_client_name;
#endif
struct list_head device_node; struct list_head device_node;
struct dma_chan_percpu __percpu *local; struct dma_chan_percpu __percpu *local;
...@@ -618,10 +623,11 @@ static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) ...@@ -618,10 +623,11 @@ static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx) static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
{ {
if (tx->unmap) { if (!tx->unmap)
return;
dmaengine_unmap_put(tx->unmap); dmaengine_unmap_put(tx->unmap);
tx->unmap = NULL; tx->unmap = NULL;
}
} }
#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
...@@ -805,7 +811,9 @@ struct dma_filter { ...@@ -805,7 +811,9 @@ struct dma_filter {
* called and there are no further references to this structure. This * called and there are no further references to this structure. This
* must be implemented to free resources however many existing drivers * must be implemented to free resources however many existing drivers
* do not and are therefore not safe to unbind while in use. * do not and are therefore not safe to unbind while in use.
* * @dbg_summary_show: optional routine to show contents in debugfs; default code
* will be used when this is omitted, but custom code can show extra,
* controller specific information.
*/ */
struct dma_device { struct dma_device {
struct kref ref; struct kref ref;
...@@ -891,6 +899,11 @@ struct dma_device { ...@@ -891,6 +899,11 @@ struct dma_device {
struct dma_tx_state *txstate); struct dma_tx_state *txstate);
void (*device_issue_pending)(struct dma_chan *chan); void (*device_issue_pending)(struct dma_chan *chan);
void (*device_release)(struct dma_device *dev); void (*device_release)(struct dma_device *dev);
/* debugfs support */
#ifdef CONFIG_DEBUG_FS
void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev);
struct dentry *dbg_dev_root;
#endif
}; };
static inline int dmaengine_slave_config(struct dma_chan *chan, static inline int dmaengine_slave_config(struct dma_chan *chan,
...@@ -1155,14 +1168,7 @@ static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc ...@@ -1155,14 +1168,7 @@ static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc
static inline bool dmaengine_check_align(enum dmaengine_alignment align, static inline bool dmaengine_check_align(enum dmaengine_alignment align,
size_t off1, size_t off2, size_t len) size_t off1, size_t off2, size_t len)
{ {
size_t mask; return !(((1 << align) - 1) & (off1 | off2 | len));
if (!align)
return true;
mask = (1 << align) - 1;
if (mask & (off1 | off2 | len))
return false;
return true;
} }
static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1, static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
...@@ -1236,9 +1242,9 @@ static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags) ...@@ -1236,9 +1242,9 @@ static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
{ {
if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags)) if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
return dma_dev_to_maxpq(dma); return dma_dev_to_maxpq(dma);
else if (dmaf_p_disabled_continue(flags)) if (dmaf_p_disabled_continue(flags))
return dma_dev_to_maxpq(dma) - 1; return dma_dev_to_maxpq(dma) - 1;
else if (dmaf_continue(flags)) if (dmaf_continue(flags))
return dma_dev_to_maxpq(dma) - 3; return dma_dev_to_maxpq(dma) - 3;
BUG(); BUG();
} }
...@@ -1249,7 +1255,7 @@ static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg, ...@@ -1249,7 +1255,7 @@ static inline size_t dmaengine_get_icg(bool inc, bool sgl, size_t icg,
if (inc) { if (inc) {
if (dir_icg) if (dir_icg)
return dir_icg; return dir_icg;
else if (sgl) if (sgl)
return icg; return icg;
} }
...@@ -1415,11 +1421,12 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, ...@@ -1415,11 +1421,12 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
static inline void static inline void
dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue) dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
{ {
if (st) { if (!st)
return;
st->last = last; st->last = last;
st->used = used; st->used = used;
st->residue = residue; st->residue = residue;
}
} }
#ifdef CONFIG_DMA_ENGINE #ifdef CONFIG_DMA_ENGINE
...@@ -1496,12 +1503,11 @@ static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx) ...@@ -1496,12 +1503,11 @@ static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
if (ret) if (ret)
return ret; return ret;
if (caps.descriptor_reuse) { if (!caps.descriptor_reuse)
return -EPERM;
tx->flags |= DMA_CTRL_REUSE; tx->flags |= DMA_CTRL_REUSE;
return 0; return 0;
} else {
return -EPERM;
}
} }
static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx) static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
...@@ -1517,10 +1523,10 @@ static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx) ...@@ -1517,10 +1523,10 @@ static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc) static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
{ {
/* this is supported for reusable desc, so check that */ /* this is supported for reusable desc, so check that */
if (dmaengine_desc_test_reuse(desc)) if (!dmaengine_desc_test_reuse(desc))
return desc->desc_free(desc);
else
return -EPERM; return -EPERM;
return desc->desc_free(desc);
} }
/* --- DMA device --- */ /* --- DMA device --- */
...@@ -1566,9 +1572,7 @@ dmaengine_get_direction_text(enum dma_transfer_direction dir) ...@@ -1566,9 +1572,7 @@ dmaengine_get_direction_text(enum dma_transfer_direction dir)
case DMA_DEV_TO_DEV: case DMA_DEV_TO_DEV:
return "DEV_TO_DEV"; return "DEV_TO_DEV";
default: default:
break;
}
return "invalid"; return "invalid";
}
} }
#endif /* DMAENGINE_H */ #endif /* DMAENGINE_H */
...@@ -83,21 +83,6 @@ enum dsa_completion_status { ...@@ -83,21 +83,6 @@ enum dsa_completion_status {
#define DSA_COMP_STATUS_MASK 0x7f #define DSA_COMP_STATUS_MASK 0x7f
#define DSA_COMP_STATUS_WRITE 0x80 #define DSA_COMP_STATUS_WRITE 0x80
struct dsa_batch_desc {
uint32_t pasid:20;
uint32_t rsvd:11;
uint32_t priv:1;
uint32_t flags:24;
uint32_t opcode:8;
uint64_t completion_addr;
uint64_t desc_list_addr;
uint64_t rsvd1;
uint32_t desc_count;
uint16_t interrupt_handle;
uint16_t rsvd2;
uint8_t rsvd3[24];
} __attribute__((packed));
struct dsa_hw_desc { struct dsa_hw_desc {
uint32_t pasid:20; uint32_t pasid:20;
uint32_t rsvd:11; uint32_t rsvd:11;
...@@ -109,6 +94,7 @@ struct dsa_hw_desc { ...@@ -109,6 +94,7 @@ struct dsa_hw_desc {
uint64_t src_addr; uint64_t src_addr;
uint64_t rdback_addr; uint64_t rdback_addr;
uint64_t pattern; uint64_t pattern;
uint64_t desc_list_addr;
}; };
union { union {
uint64_t dst_addr; uint64_t dst_addr;
...@@ -116,7 +102,10 @@ struct dsa_hw_desc { ...@@ -116,7 +102,10 @@ struct dsa_hw_desc {
uint64_t src2_addr; uint64_t src2_addr;
uint64_t comp_pattern; uint64_t comp_pattern;
}; };
union {
uint32_t xfer_size; uint32_t xfer_size;
uint32_t desc_count;
};
uint16_t int_handle; uint16_t int_handle;
uint16_t rsvd1; uint16_t rsvd1;
union { union {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment