Commit 31be1d0f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-6.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine updates from Vinod Koul:
 "New support / Core:

   - Remove DMA_MEMCPY_SG for lack of users

   - Tegra 234 dmaengine support

   - Mediatek MT8365 dma support

   - Apple ADMAC driver

  Updates:

   - Yaml conversion for ST-Ericsson DMA40 binding and Freescale edma

   - rz-dmac updates and device_synchronize support

   - Bunch of typo in comments fixes in drivers

   - multithread support in sf-pdma driver"

* tag 'dmaengine-6.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (50 commits)
  dmaengine: mediatek: mtk-hsdma: Fix typo 'the the' in comment
  dmaengine: axi-dmac: check cache coherency register
  dmaengine: sh: rz-dmac: Add device_synchronize callback
  dmaengine: sprd: Cleanup in .remove() after pm_runtime_get_sync() failed
  dmaengine: tegra: Add terminate() for Tegra234
  dt-bindings: dmaengine: Add compatible for Tegra234
  dmaengine: xilinx: use strscpy to replace strlcpy
  dmaengine: imx-sdma: Add FIFO stride support for multi FIFO script
  dmaengine: idxd: Correct IAX operation code names
  dmaengine: imx-dma: Cast of_device_get_match_data() with (uintptr_t)
  dmaengine: dw-axi-dmac: ignore interrupt if no descriptor
  dmaengine: dw-axi-dmac: do not print NULL LLI during error
  dmaengine: altera-msgdma: Fixed some inconsistent function name descriptions
  dmaengine: imx-sdma: Add missing struct documentation
  dmaengine: sf-pdma: Add multithread support for a DMA channel
  dt-bindings: dma: dw-axi-dmac: extend the number of interrupts
  dmaengine: dmatest: use strscpy to replace strlcpy
  dmaengine: ste_dma40: fix typo in comment
  dmaengine: jz4780: fix typo in comment
  dmaengine: s3c24xx: fix typo in comment
  ...
parents 36001a2f a1873f83
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/apple,admac.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Apple Audio DMA Controller (ADMAC)
description: |
Apple's Audio DMA Controller (ADMAC) is used to fetch and store audio samples
on SoCs from the "Apple Silicon" family.
The controller has been seen with up to 24 channels. Even-numbered channels
are TX-only, odd-numbered are RX-only. Individual channels are coupled to
fixed device endpoints.
maintainers:
- Martin Povišer <povik+lin@cutebit.org>
allOf:
- $ref: "dma-controller.yaml#"
properties:
compatible:
items:
- enum:
- apple,t6000-admac
- apple,t8103-admac
- const: apple,admac
reg:
maxItems: 1
'#dma-cells':
const: 1
description:
Clients specify a single cell with channel number.
dma-channels:
maximum: 24
interrupts:
minItems: 4
maxItems: 4
description:
Interrupts that correspond to the 4 IRQ outputs of the controller. Usually
only one of the controller outputs will be connected as an usable interrupt
source. The remaining interrupts will be left without a valid value, e.g.
in an interrupts-extended list the disconnected positions will contain
an empty phandle reference <0>.
required:
- compatible
- reg
- '#dma-cells'
- dma-channels
- interrupts
additionalProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/apple-aic.h>
#include <dt-bindings/interrupt-controller/irq.h>
aic: interrupt-controller {
interrupt-controller;
#interrupt-cells = <3>;
};
admac: dma-controller@238200000 {
compatible = "apple,t8103-admac", "apple,admac";
reg = <0x38200000 0x34000>;
dma-channels = <24>;
interrupts-extended = <0>,
<&aic AIC_IRQ 626 IRQ_TYPE_LEVEL_HIGH>,
<0>,
<0>;
#dma-cells = <1>;
};
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/fsl,edma.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Freescale enhanced Direct Memory Access(eDMA) Controller
description: |
The eDMA channels have multiplex capability by programmable
memory-mapped registers. channels are split into two groups, called
DMAMUX0 and DMAMUX1, specific DMA request source can only be multiplexed
by any channel of certain group, DMAMUX0 or DMAMUX1, but not both.
maintainers:
- Peng Fan <peng.fan@nxp.com>
properties:
compatible:
oneOf:
- enum:
- fsl,vf610-edma
- fsl,imx7ulp-edma
- items:
- const: fsl,ls1028a-edma
- const: fsl,vf610-edma
reg:
minItems: 2
maxItems: 3
interrupts:
minItems: 2
maxItems: 17
interrupt-names:
minItems: 2
maxItems: 17
"#dma-cells":
const: 2
dma-channels:
const: 32
clocks:
maxItems: 2
clock-names:
maxItems: 2
big-endian:
description: |
If present registers and hardware scatter/gather descriptors of the
eDMA are implemented in big endian mode, otherwise in little mode.
type: boolean
required:
- "#dma-cells"
- compatible
- reg
- interrupts
- clocks
- dma-channels
allOf:
- $ref: "dma-controller.yaml#"
- if:
properties:
compatible:
contains:
const: fsl,vf610-edma
then:
properties:
clock-names:
items:
- const: dmamux0
- const: dmamux1
interrupts:
maxItems: 2
interrupt-names:
items:
- const: edma-tx
- const: edma-err
reg:
maxItems: 3
- if:
properties:
compatible:
contains:
const: fsl,imx7ulp-edma
then:
properties:
clock-names:
items:
- const: dma
- const: dmamux0
interrupts:
maxItems: 17
reg:
maxItems: 2
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/vf610-clock.h>
edma0: dma-controller@40018000 {
#dma-cells = <2>;
compatible = "fsl,vf610-edma";
reg = <0x40018000 0x2000>,
<0x40024000 0x1000>,
<0x40025000 0x1000>;
interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>,
<0 9 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "edma-tx", "edma-err";
dma-channels = <32>;
clock-names = "dmamux0", "dmamux1";
clocks = <&clks VF610_CLK_DMAMUX0>, <&clks VF610_CLK_DMAMUX1>;
};
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/imx7ulp-clock.h>
edma1: dma-controller@40080000 {
#dma-cells = <2>;
compatible = "fsl,imx7ulp-edma";
reg = <0x40080000 0x2000>,
<0x40210000 0x1000>;
dma-channels = <32>;
interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
/* last is eDMA2-ERR interrupt */
<GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "dma", "dmamux0";
clocks = <&pcc2 IMX7ULP_CLK_DMA1>, <&pcc2 IMX7ULP_CLK_DMA_MUX1>;
};
* Freescale enhanced Direct Memory Access(eDMA) Controller
The eDMA channels have multiplex capability by programmble memory-mapped
registers. channels are split into two groups, called DMAMUX0 and DMAMUX1,
specific DMA request source can only be multiplexed by any channel of certain
group, DMAMUX0 or DMAMUX1, but not both.
* eDMA Controller
Required properties:
- compatible :
- "fsl,vf610-edma" for eDMA used similar to that on Vybrid vf610 SoC
- "fsl,imx7ulp-edma" for eDMA2 used similar to that on i.mx7ulp
- "fsl,ls1028a-edma" followed by "fsl,vf610-edma" for eDMA used on the
LS1028A SoC.
- reg : Specifies base physical address(s) and size of the eDMA registers.
The 1st region is eDMA control register's address and size.
The 2nd and the 3rd regions are programmable channel multiplexing
control register's address and size.
- interrupts : A list of interrupt-specifiers, one for each entry in
interrupt-names on vf610 similar SoC. But for i.mx7ulp per channel
per transmission interrupt, total 16 channel interrupt and 1
error interrupt(located in the last), no interrupt-names list on
i.mx7ulp for clean on dts.
- #dma-cells : Must be <2>.
The 1st cell specifies the DMAMUX(0 for DMAMUX0 and 1 for DMAMUX1).
Specific request source can only be multiplexed by specific channels
group called DMAMUX.
The 2nd cell specifies the request source(slot) ID.
See the SoC's reference manual for all the supported request sources.
- dma-channels : Number of channels supported by the controller
- clock-names : A list of channel group clock names. Should contain:
"dmamux0" - clock name of mux0 group
"dmamux1" - clock name of mux1 group
Note: No dmamux0 on i.mx7ulp, but another 'dma' clk added on i.mx7ulp.
- clocks : A list of phandle and clock-specifier pairs, one for each entry in
clock-names.
Optional properties:
- big-endian: If present registers and hardware scatter/gather descriptors
of the eDMA are implemented in big endian mode, otherwise in little
mode.
- interrupt-names : Should contain the below on vf610 similar SoC but not used
on i.mx7ulp similar SoC:
"edma-tx" - the transmission interrupt
"edma-err" - the error interrupt
Examples:
edma0: dma-controller@40018000 {
#dma-cells = <2>;
compatible = "fsl,vf610-edma";
reg = <0x40018000 0x2000>,
<0x40024000 0x1000>,
<0x40025000 0x1000>;
interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>,
<0 9 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "edma-tx", "edma-err";
dma-channels = <32>;
clock-names = "dmamux0", "dmamux1";
clocks = <&clks VF610_CLK_DMAMUX0>,
<&clks VF610_CLK_DMAMUX1>;
}; /* vf610 */
edma1: dma-controller@40080000 {
#dma-cells = <2>;
compatible = "fsl,imx7ulp-edma";
reg = <0x40080000 0x2000>,
<0x40210000 0x1000>;
dma-channels = <32>;
interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
/* last is eDMA2-ERR interrupt */
<GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "dma", "dmamux0";
clocks = <&pcc2 IMX7ULP_CLK_DMA1>,
<&pcc2 IMX7ULP_CLK_DMA_MUX1>;
}; /* i.mx7ulp */
* DMA clients
DMA client drivers that uses the DMA function must use the format described
in the dma.txt file, using a two-cell specifier for each channel: the 1st
specifies the channel group(DMAMUX) in which this request can be multiplexed,
and the 2nd specifies the request source.
Examples:
sai2: sai@40031000 {
compatible = "fsl,vf610-sai";
reg = <0x40031000 0x1000>;
interrupts = <0 86 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "sai";
clocks = <&clks VF610_CLK_SAI2>;
dma-names = "tx", "rx";
dmas = <&edma0 0 21>,
<&edma0 0 20>;
};
......@@ -22,6 +22,7 @@ properties:
- items:
- enum:
- mediatek,mt2712-uart-dma
- mediatek,mt8365-uart-dma
- mediatek,mt8516-uart-dma
- const: mediatek,mt6577-uart-dma
- enum:
......
......@@ -23,7 +23,9 @@ properties:
oneOf:
- const: nvidia,tegra186-gpcdma
- items:
- const: nvidia,tegra194-gpcdma
- enum:
- nvidia,tegra234-gpcdma
- nvidia,tegra194-gpcdma
- const: nvidia,tegra186-gpcdma
"#dma-cells":
......
......@@ -34,7 +34,12 @@ properties:
- const: axidma_apb_regs
interrupts:
maxItems: 1
description:
If the IP-core synthesis parameter DMAX_INTR_IO_TYPE is set to 1, this
will be per-channel interrupts. Otherwise, this is a single combined IRQ
for all channels.
minItems: 1
maxItems: 8
clocks:
items:
......
* DMA40 DMA Controller
Required properties:
- compatible: "stericsson,dma40"
- reg: Address range of the DMAC registers
- reg-names: Names of the above areas to use during resource look-up
- interrupt: Should contain the DMAC interrupt number
- #dma-cells: must be <3>
- memcpy-channels: Channels to be used for memcpy
Optional properties:
- dma-channels: Number of channels supported by hardware - if not present
the driver will attempt to obtain the information from H/W
- disabled-channels: Channels which can not be used
Example:
dma: dma-controller@801c0000 {
compatible = "stericsson,db8500-dma40", "stericsson,dma40";
reg = <0x801C0000 0x1000 0x40010000 0x800>;
reg-names = "base", "lcpa";
interrupt-parent = <&intc>;
interrupts = <0 25 0x4>;
#dma-cells = <2>;
memcpy-channels = <56 57 58 59 60>;
disabled-channels = <12>;
dma-channels = <8>;
};
Clients
Required properties:
- dmas: Comma separated list of dma channel requests
- dma-names: Names of the aforementioned requested channels
Each dmas request consists of 4 cells:
1. A phandle pointing to the DMA controller
2. Device signal number, the signal line for single and burst requests
connected from the device to the DMA40 engine
3. The DMA request line number (only when 'use fixed channel' is set)
4. A 32bit mask specifying; mode, direction and endianness
[NB: This list will grow]
0x00000001: Mode:
Logical channel when unset
Physical channel when set
0x00000002: Direction:
Memory to Device when unset
Device to Memory when set
0x00000004: Endianness:
Little endian when unset
Big endian when set
0x00000008: Use fixed channel:
Use automatic channel selection when unset
Use DMA request line number when set
0x00000010: Set channel as high priority:
Normal priority when unset
High priority when set
Existing signal numbers for the DB8500 ASIC. Unless specified, the signals are
bidirectional, i.e. the same for RX and TX operations:
0: SPI controller 0
1: SD/MMC controller 0 (unused)
2: SD/MMC controller 1 (unused)
3: SD/MMC controller 2 (unused)
4: I2C port 1
5: I2C port 3
6: I2C port 2
7: I2C port 4
8: Synchronous Serial Port SSP0
9: Synchronous Serial Port SSP1
10: Multi-Channel Display Engine MCDE RX
11: UART port 2
12: UART port 1
13: UART port 0
14: Multirate Serial Port MSP2
15: I2C port 0
16: USB OTG in/out endpoints 7 & 15
17: USB OTG in/out endpoints 6 & 14
18: USB OTG in/out endpoints 5 & 13
19: USB OTG in/out endpoints 4 & 12
20: SLIMbus or HSI channel 0
21: SLIMbus or HSI channel 1
22: SLIMbus or HSI channel 2
23: SLIMbus or HSI channel 3
24: Multimedia DSP SXA0
25: Multimedia DSP SXA1
26: Multimedia DSP SXA2
27: Multimedia DSP SXA3
28: SD/MM controller 2
29: SD/MM controller 0
30: MSP port 1 on DB8500 v1, MSP port 3 on DB8500 v2
31: MSP port 0 or SLIMbus channel 0
32: SD/MM controller 1
33: SPI controller 2
34: i2c3 RX2 TX2
35: SPI controller 1
36: USB OTG in/out endpoints 3 & 11
37: USB OTG in/out endpoints 2 & 10
38: USB OTG in/out endpoints 1 & 9
39: USB OTG in/out endpoints 8
40: SPI controller 3
41: SD/MM controller 3
42: SD/MM controller 4
43: SD/MM controller 5
44: Multimedia DSP SXA4
45: Multimedia DSP SXA5
46: SLIMbus channel 8 or Multimedia DSP SXA6
47: SLIMbus channel 9 or Multimedia DSP SXA7
48: Crypto Accelerator 1
49: Crypto Accelerator 1 TX or Hash Accelerator 1 TX
50: Hash Accelerator 1 TX
51: memcpy TX (to be used by the DMA driver for memcpy operations)
52: SLIMbus or HSI channel 4
53: SLIMbus or HSI channel 5
54: SLIMbus or HSI channel 6
55: SLIMbus or HSI channel 7
56: memcpy (to be used by the DMA driver for memcpy operations)
57: memcpy (to be used by the DMA driver for memcpy operations)
58: memcpy (to be used by the DMA driver for memcpy operations)
59: memcpy (to be used by the DMA driver for memcpy operations)
60: memcpy (to be used by the DMA driver for memcpy operations)
61: Crypto Accelerator 0
62: Crypto Accelerator 0 TX or Hash Accelerator 0 TX
63: Hash Accelerator 0 TX
Example:
uart@80120000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0x80120000 0x1000>;
interrupts = <0 11 0x4>;
dmas = <&dma 13 0 0x2>, /* Logical - DevToMem */
<&dma 13 0 0x0>; /* Logical - MemToDev */
dma-names = "rx", "rx";
};
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/stericsson,dma40.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: ST-Ericsson DMA40 DMA Engine
maintainers:
- Linus Walleij <linus.walleij@linaro.org>
allOf:
- $ref: "dma-controller.yaml#"
properties:
"#dma-cells":
const: 3
description: |
The first cell is the unique device channel number as indicated by this
table for DB8500 which is the only ASIC known to use DMA40:
0: SPI controller 0
1: SD/MMC controller 0 (unused)
2: SD/MMC controller 1 (unused)
3: SD/MMC controller 2 (unused)
4: I2C port 1
5: I2C port 3
6: I2C port 2
7: I2C port 4
8: Synchronous Serial Port SSP0
9: Synchronous Serial Port SSP1
10: Multi-Channel Display Engine MCDE RX
11: UART port 2
12: UART port 1
13: UART port 0
14: Multirate Serial Port MSP2
15: I2C port 0
16: USB OTG in/out endpoints 7 & 15
17: USB OTG in/out endpoints 6 & 14
18: USB OTG in/out endpoints 5 & 13
19: USB OTG in/out endpoints 4 & 12
20: SLIMbus or HSI channel 0
21: SLIMbus or HSI channel 1
22: SLIMbus or HSI channel 2
23: SLIMbus or HSI channel 3
24: Multimedia DSP SXA0
25: Multimedia DSP SXA1
26: Multimedia DSP SXA2
27: Multimedia DSP SXA3
28: SD/MMC controller 2
29: SD/MMC controller 0
30: MSP port 1 on DB8500 v1, MSP port 3 on DB8500 v2
31: MSP port 0 or SLIMbus channel 0
32: SD/MMC controller 1
33: SPI controller 2
34: i2c3 RX2 TX2
35: SPI controller 1
36: USB OTG in/out endpoints 3 & 11
37: USB OTG in/out endpoints 2 & 10
38: USB OTG in/out endpoints 1 & 9
39: USB OTG in/out endpoints 8
40: SPI controller 3
41: SD/MMC controller 3
42: SD/MMC controller 4
43: SD/MMC controller 5
44: Multimedia DSP SXA4
45: Multimedia DSP SXA5
46: SLIMbus channel 8 or Multimedia DSP SXA6
47: SLIMbus channel 9 or Multimedia DSP SXA7
48: Crypto Accelerator 1
49: Crypto Accelerator 1 TX or Hash Accelerator 1 TX
50: Hash Accelerator 1 TX
51: memcpy TX (to be used by the DMA driver for memcpy operations)
52: SLIMbus or HSI channel 4
53: SLIMbus or HSI channel 5
54: SLIMbus or HSI channel 6
55: SLIMbus or HSI channel 7
56: memcpy (to be used by the DMA driver for memcpy operations)
57: memcpy (to be used by the DMA driver for memcpy operations)
58: memcpy (to be used by the DMA driver for memcpy operations)
59: memcpy (to be used by the DMA driver for memcpy operations)
60: memcpy (to be used by the DMA driver for memcpy operations)
61: Crypto Accelerator 0
62: Crypto Accelerator 0 TX or Hash Accelerator 0 TX
63: Hash Accelerator 0 TX
The second cell is the DMA request line number. This is only used when
a fixed channel is allocated, and indicated by setting bit 3 in the
flags field (see below).
The third cell is a 32bit flags bitfield with the following possible
bits set:
0x00000001 (bit 0) - mode:
Logical channel when unset
Physical channel when set
0x00000002 (bit 1) - direction:
Memory to Device when unset
Device to Memory when set
0x00000004 (bit 2) - endianness:
Little endian when unset
Big endian when set
0x00000008 (bit 3) - use fixed channel:
Use automatic channel selection when unset
Use DMA request line number when set
0x00000010 (bit 4) - set channel as high priority:
Normal priority when unset
High priority when set
compatible:
items:
- const: stericsson,db8500-dma40
- const: stericsson,dma40
reg:
items:
- description: DMA40 memory base
- description: LCPA memory base
reg-names:
items:
- const: base
- const: lcpa
interrupts:
maxItems: 1
clocks:
maxItems: 1
memcpy-channels:
$ref: /schemas/types.yaml#/definitions/uint32-array
description: Array of u32 elements indicating which channels on the DMA
engine are elegible for memcpy transfers
required:
- "#dma-cells"
- compatible
- reg
- interrupts
- clocks
- memcpy-channels
additionalProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/mfd/dbx500-prcmu.h>
dma-controller@801C0000 {
compatible = "stericsson,db8500-dma40", "stericsson,dma40";
reg = <0x801C0000 0x1000>, <0x40010000 0x800>;
reg-names = "base", "lcpa";
interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
#dma-cells = <3>;
memcpy-channels = <56 57 58 59 60>;
clocks = <&prcmu_clk PRCMU_DMACLK>;
};
...
......@@ -162,16 +162,6 @@ Currently, the types available are:
- The device is able to do memory to memory copies
- - DMA_MEMCPY_SG
- The device supports memory to memory scatter-gather transfers.
- Even though a plain memcpy can look like a particular case of a
scatter-gather transfer, with a single chunk to copy, it's a distinct
transaction type in the mem2mem transfer case. This is because some very
simple devices might be able to do contiguous single-chunk memory copies,
but have no support for more complex SG transfers.
- No matter what the overall size of the combined chunks for source and
destination is, only as many bytes as the smallest of the two will be
transmitted. That means the number and size of the scatter-gather buffers in
......
......@@ -1851,6 +1851,7 @@ T: git https://github.com/AsahiLinux/linux.git
F: Documentation/devicetree/bindings/arm/apple.yaml
F: Documentation/devicetree/bindings/arm/apple/*
F: Documentation/devicetree/bindings/clock/apple,nco.yaml
F: Documentation/devicetree/bindings/dma/apple,admac.yaml
F: Documentation/devicetree/bindings/i2c/apple,i2c.yaml
F: Documentation/devicetree/bindings/interrupt-controller/apple,*
F: Documentation/devicetree/bindings/iommu/apple,dart.yaml
......@@ -1864,6 +1865,7 @@ F: Documentation/devicetree/bindings/power/apple*
F: Documentation/devicetree/bindings/watchdog/apple,wdt.yaml
F: arch/arm64/boot/dts/apple/
F: drivers/clk/clk-apple-nco.c
F: drivers/dma/apple-admac.c
F: drivers/i2c/busses/i2c-pasemi-core.c
F: drivers/i2c/busses/i2c-pasemi-platform.c
F: drivers/iommu/apple-dart.c
......@@ -6102,6 +6104,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine.git
F: Documentation/devicetree/bindings/dma/
F: Documentation/driver-api/dmaengine/
F: drivers/dma/
F: include/dt-bindings/dma/
F: include/linux/dma/
F: include/linux/dmaengine.h
F: include/linux/of_dma.h
......@@ -10188,7 +10191,8 @@ S: Supported
Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
F: drivers/dma/ioat*
INTEL IADX DRIVER
INTEL IDXD DRIVER
M: Fenghua Yu <fenghua.yu@intel.com>
M: Dave Jiang <dave.jiang@intel.com>
L: dmaengine@vger.kernel.org
S: Supported
......
......@@ -85,6 +85,14 @@ config AMCC_PPC440SPE_ADMA
help
Enable support for the AMCC PPC440SPe RAID engines.
config APPLE_ADMAC
tristate "Apple ADMAC support"
depends on ARCH_APPLE || COMPILE_TEST
select DMA_ENGINE
default ARCH_APPLE
help
Enable support for Audio DMA Controller found on Apple Silicon SoCs.
config AT_HDMAC
tristate "Atmel AHB DMA support"
depends on ARCH_AT91
......
......@@ -17,6 +17,7 @@ obj-$(CONFIG_ALTERA_MSGDMA) += altera-msgdma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
obj-$(CONFIG_AMD_PTDMA) += ptdma/
obj-$(CONFIG_APPLE_ADMAC) += apple-admac.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o
......
......@@ -749,7 +749,7 @@ static irqreturn_t msgdma_irq_handler(int irq, void *data)
}
/**
* msgdma_chan_remove - Channel remove function
* msgdma_dev_remove() - Device remove function
* @mdev: Pointer to the Altera mSGDMA device structure
*/
static void msgdma_dev_remove(struct msgdma_device *mdev)
......@@ -918,7 +918,7 @@ static int msgdma_probe(struct platform_device *pdev)
}
/**
* msgdma_dma_remove - Driver remove function
* msgdma_remove() - Driver remove function
* @pdev: Pointer to the platform_device structure
*
* Return: Always '0'
......
......@@ -231,7 +231,7 @@ enum pl08x_dma_chan_state {
/**
* struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
* @vc: wrappped virtual channel
* @vc: wrapped virtual channel
* @phychan: the physical channel utilized by this channel, if there is one
* @name: name of channel
* @cd: channel platform data
......
This diff is collapsed.
......@@ -649,7 +649,7 @@ static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
}
/*
* Only check that maxburst and addr width values are supported by the
* Only check that maxburst and addr width values are supported by
* the controller but not that the configuration is good to perform the
* transfer since we don't know the direction at this stage.
*/
......
......@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
......@@ -55,6 +56,9 @@
#define AXI_DMAC_DMA_DST_TYPE_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_TYPE_MSK, x)
#define AXI_DMAC_DMA_DST_WIDTH_MSK GENMASK(3, 0)
#define AXI_DMAC_DMA_DST_WIDTH_GET(x) FIELD_GET(AXI_DMAC_DMA_DST_WIDTH_MSK, x)
#define AXI_DMAC_REG_COHERENCY_DESC 0x14
#define AXI_DMAC_DST_COHERENT_MSK BIT(0)
#define AXI_DMAC_DST_COHERENT_GET(x) FIELD_GET(AXI_DMAC_DST_COHERENT_MSK, x)
#define AXI_DMAC_REG_IRQ_MASK 0x80
#define AXI_DMAC_REG_IRQ_PENDING 0x84
......@@ -979,6 +983,18 @@ static int axi_dmac_probe(struct platform_device *pdev)
axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
if (of_dma_is_coherent(pdev->dev.of_node)) {
ret = axi_dmac_read(dmac, AXI_DMAC_REG_COHERENCY_DESC);
if (version < ADI_AXI_PCORE_VER(4, 4, 'a') ||
!AXI_DMAC_DST_COHERENT_GET(ret)) {
dev_err(dmac->dma_dev.dev,
"Coherent DMA not supported in hardware");
ret = -EINVAL;
goto err_clk_disable;
}
}
ret = dma_async_device_register(dma_dev);
if (ret)
goto err_clk_disable;
......
......@@ -388,7 +388,7 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
if (i != (sg_len - 1) &&
!(jzdma->soc_data->flags & JZ_SOC_DATA_BREAK_LINKS)) {
/* Automatically proceeed to the next descriptor. */
/* Automatically proceed to the next descriptor. */
desc->desc[i].dcm |= JZ_DMA_DCM_LINK;
/*
......
......@@ -1153,13 +1153,6 @@ int dma_async_device_register(struct dma_device *device)
return -EIO;
}
if (dma_has_cap(DMA_MEMCPY_SG, device->cap_mask) && !device->device_prep_dma_memcpy_sg) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
"DMA_MEMCPY_SG");
return -EIO;
}
if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
......
......@@ -22,51 +22,50 @@
#include <linux/wait.h>
static unsigned int test_buf_size = 16384;
module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
module_param(test_buf_size, uint, 0644);
MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
static char test_device[32];
module_param_string(device, test_device, sizeof(test_device),
S_IRUGO | S_IWUSR);
module_param_string(device, test_device, sizeof(test_device), 0644);
MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
static unsigned int threads_per_chan = 1;
module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
module_param(threads_per_chan, uint, 0644);
MODULE_PARM_DESC(threads_per_chan,
"Number of threads to start per channel (default: 1)");
static unsigned int max_channels;
module_param(max_channels, uint, S_IRUGO | S_IWUSR);
module_param(max_channels, uint, 0644);
MODULE_PARM_DESC(max_channels,
"Maximum number of channels to use (default: all)");
static unsigned int iterations;
module_param(iterations, uint, S_IRUGO | S_IWUSR);
module_param(iterations, uint, 0644);
MODULE_PARM_DESC(iterations,
"Iterations before stopping test (default: infinite)");
static unsigned int dmatest;
module_param(dmatest, uint, S_IRUGO | S_IWUSR);
module_param(dmatest, uint, 0644);
MODULE_PARM_DESC(dmatest,
"dmatest 0-memcpy 1-memset (default: 0)");
static unsigned int xor_sources = 3;
module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
module_param(xor_sources, uint, 0644);
MODULE_PARM_DESC(xor_sources,
"Number of xor source buffers (default: 3)");
static unsigned int pq_sources = 3;
module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
module_param(pq_sources, uint, 0644);
MODULE_PARM_DESC(pq_sources,
"Number of p+q source buffers (default: 3)");
static int timeout = 3000;
module_param(timeout, int, S_IRUGO | S_IWUSR);
module_param(timeout, int, 0644);
MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
"Pass -1 for infinite timeout");
static bool noverify;
module_param(noverify, bool, S_IRUGO | S_IWUSR);
module_param(noverify, bool, 0644);
MODULE_PARM_DESC(noverify, "Disable data verification (default: verify)");
static bool norandom;
......@@ -74,7 +73,7 @@ module_param(norandom, bool, 0644);
MODULE_PARM_DESC(norandom, "Disable random offset setup (default: random)");
static bool verbose;
module_param(verbose, bool, S_IRUGO | S_IWUSR);
module_param(verbose, bool, 0644);
MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)");
static int alignment = -1;
......@@ -86,7 +85,7 @@ module_param(transfer_size, uint, 0644);
MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default: not used (0))");
static bool polled;
module_param(polled, bool, S_IRUGO | S_IWUSR);
module_param(polled, bool, 0644);
MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts");
/**
......@@ -154,7 +153,7 @@ static const struct kernel_param_ops run_ops = {
.get = dmatest_run_get,
};
static bool dmatest_run;
module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR);
module_param_cb(run, &run_ops, &dmatest_run, 0644);
MODULE_PARM_DESC(run, "Run the test (default: false)");
static int dmatest_chan_set(const char *val, const struct kernel_param *kp);
......@@ -290,7 +289,7 @@ static const struct kernel_param_ops wait_ops = {
.get = dmatest_wait_get,
.set = param_set_bool,
};
module_param_cb(wait, &wait_ops, &wait, S_IRUGO);
module_param_cb(wait, &wait_ops, &wait, 0444);
MODULE_PARM_DESC(wait, "Wait for tests to complete (default: false)");
static bool dmatest_match_channel(struct dmatest_params *params,
......@@ -579,10 +578,10 @@ static int dmatest_func(void *data)
unsigned int total_tests = 0;
dma_cookie_t cookie;
enum dma_status status;
enum dma_ctrl_flags flags;
enum dma_ctrl_flags flags;
u8 *pq_coefs = NULL;
int ret;
unsigned int buf_size;
unsigned int buf_size;
struct dmatest_data *src;
struct dmatest_data *dst;
int i;
......@@ -1095,8 +1094,8 @@ static void add_threaded_test(struct dmatest_info *info)
/* Copy test parameters */
params->buf_size = test_buf_size;
strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
strlcpy(params->device, strim(test_device), sizeof(params->device));
strscpy(params->channel, strim(test_channel), sizeof(params->channel));
strscpy(params->device, strim(test_device), sizeof(params->device));
params->threads_per_chan = threads_per_chan;
params->max_channels = max_channels;
params->iterations = iterations;
......@@ -1240,7 +1239,7 @@ static int dmatest_chan_set(const char *val, const struct kernel_param *kp)
dtc = list_last_entry(&info->channels,
struct dmatest_chan,
node);
strlcpy(chan_reset_val,
strscpy(chan_reset_val,
dma_chan_name(dtc->chan),
sizeof(chan_reset_val));
ret = -EBUSY;
......@@ -1263,14 +1262,14 @@ static int dmatest_chan_set(const char *val, const struct kernel_param *kp)
if ((strcmp(dma_chan_name(dtc->chan), strim(test_channel)) != 0)
&& (strcmp("", strim(test_channel)) != 0)) {
ret = -EINVAL;
strlcpy(chan_reset_val, dma_chan_name(dtc->chan),
strscpy(chan_reset_val, dma_chan_name(dtc->chan),
sizeof(chan_reset_val));
goto add_chan_err;
}
} else {
/* Clear test_channel if no channels were added successfully */
strlcpy(chan_reset_val, "", sizeof(chan_reset_val));
strscpy(chan_reset_val, "", sizeof(chan_reset_val));
ret = -EBUSY;
goto add_chan_err;
}
......@@ -1295,7 +1294,7 @@ static int dmatest_chan_get(char *val, const struct kernel_param *kp)
mutex_lock(&info->lock);
if (!is_threaded_test_run(info) && !is_threaded_test_pending(info)) {
stop_threaded_test(info);
strlcpy(test_channel, "", sizeof(test_channel));
strscpy(test_channel, "", sizeof(test_channel));
}
mutex_unlock(&info->lock);
......
......@@ -982,6 +982,11 @@ static int dw_axi_dma_chan_slave_config(struct dma_chan *dchan,
static void axi_chan_dump_lli(struct axi_dma_chan *chan,
struct axi_dma_hw_desc *desc)
{
if (!desc->lli) {
dev_err(dchan2dev(&chan->vc.chan), "NULL LLI\n");
return;
}
dev_err(dchan2dev(&chan->vc.chan),
"SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",
le64_to_cpu(desc->lli->sar),
......@@ -1049,6 +1054,11 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
/* The completed descriptor currently is in the head of vc list */
vd = vchan_next_desc(&chan->vc);
if (!vd) {
dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
axi_chan_name(chan));
goto out;
}
if (chan->cyclic) {
desc = vd_to_axi_desc(vd);
......@@ -1078,6 +1088,7 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
axi_chan_start_first_queued(chan);
}
out:
spin_unlock_irqrestore(&chan->vc.lock, flags);
}
......
......@@ -414,19 +414,11 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
SET_CH_32(dw, chan->dir, chan->id, ch_control1,
(DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
/* Linked list */
#ifdef CONFIG_64BIT
/* llp is not aligned on 64bit -> keep 32bit accesses */
SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
lower_32_bits(chunk->ll_region.paddr));
SET_CH_32(dw, chan->dir, chan->id, llp.msb,
upper_32_bits(chunk->ll_region.paddr));
#else /* CONFIG_64BIT */
SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
lower_32_bits(chunk->ll_region.paddr));
SET_CH_32(dw, chan->dir, chan->id, llp.msb,
upper_32_bits(chunk->ll_region.paddr));
#endif /* CONFIG_64BIT */
}
/* Doorbell */
SET_RW_32(dw, chan->dir, doorbell,
......
......@@ -102,10 +102,12 @@ static void *rzn1_dmamux_route_allocate(struct of_phandle_args *dma_spec,
return ERR_PTR(ret);
}
#ifdef CONFIG_OF
static const struct of_device_id rzn1_dmac_match[] = {
{ .compatible = "renesas,rzn1-dma" },
{}
};
#endif
static int rzn1_dmamux_probe(struct platform_device *pdev)
{
......@@ -140,6 +142,7 @@ static const struct of_device_id rzn1_dmamux_match[] = {
{ .compatible = "renesas,rzn1-dmamux" },
{}
};
MODULE_DEVICE_TABLE(of, rzn1_dmamux_match);
static struct platform_driver rzn1_dmamux_driver = {
.driver = {
......
......@@ -1183,7 +1183,7 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
*
* Synchronizes the DMA channel termination to the current context. When this
* function returns it is guaranteed that all transfers for previously issued
* descriptors have stopped and and it is safe to free the memory associated
* descriptors have stopped and it is safe to free the memory associated
* with them. Furthermore it is guaranteed that all complete callback functions
* for a previously submitted descriptor have finished running and it is safe to
* free resources accessed from within the complete callbacks.
......
......@@ -559,9 +559,6 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
}
for_each_sg(sgl, sg, sg_len, i) {
/* get next sg's physical address */
last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
if (direction == DMA_MEM_TO_DEV) {
src_addr = sg_dma_address(sg);
dst_addr = fsl_chan->dma_dev_addr;
......
......@@ -1047,7 +1047,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
return -ENOMEM;
imxdma->dev = &pdev->dev;
imxdma->devtype = (enum imx_dma_type)of_device_get_match_data(&pdev->dev);
imxdma->devtype = (uintptr_t)of_device_get_match_data(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
imxdma->base = devm_ioremap_resource(&pdev->dev, res);
......
......@@ -183,12 +183,14 @@
BIT(DMA_DEV_TO_DEV))
#define SDMA_WATERMARK_LEVEL_N_FIFOS GENMASK(15, 12)
#define SDMA_WATERMARK_LEVEL_OFF_FIFOS GENMASK(19, 16)
#define SDMA_WATERMARK_LEVEL_WORDS_PER_FIFO GENMASK(31, 28)
#define SDMA_WATERMARK_LEVEL_SW_DONE BIT(23)
#define SDMA_DONE0_CONFIG_DONE_SEL BIT(7)
#define SDMA_DONE0_CONFIG_DONE_DIS BIT(6)
/**
/*
* struct sdma_script_start_addrs - SDMA script start pointers
*
* start addresses of the different functions in the physical
......@@ -424,6 +426,14 @@ struct sdma_desc {
* @data: specific sdma interface structure
* @bd_pool: dma_pool for bd
* @terminate_worker: used to call back into terminate work function
* @terminated: terminated list
* @is_ram_script: flag for script in ram
* @n_fifos_src: number of source device fifos
* @n_fifos_dst: number of destination device fifos
* @sw_done: software done flag
* @stride_fifos_src: stride for source device FIFOs
* @stride_fifos_dst: stride for destination device FIFOs
* @words_per_fifo: copy number of words one time for one FIFO
*/
struct sdma_channel {
struct virt_dma_chan vc;
......@@ -451,6 +461,9 @@ struct sdma_channel {
bool is_ram_script;
unsigned int n_fifos_src;
unsigned int n_fifos_dst;
unsigned int stride_fifos_src;
unsigned int stride_fifos_dst;
unsigned int words_per_fifo;
bool sw_done;
};
......@@ -1240,17 +1253,29 @@ static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
static void sdma_set_watermarklevel_for_sais(struct sdma_channel *sdmac)
{
unsigned int n_fifos;
unsigned int stride_fifos;
unsigned int words_per_fifo;
if (sdmac->sw_done)
sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SW_DONE;
if (sdmac->direction == DMA_DEV_TO_MEM)
if (sdmac->direction == DMA_DEV_TO_MEM) {
n_fifos = sdmac->n_fifos_src;
else
stride_fifos = sdmac->stride_fifos_src;
} else {
n_fifos = sdmac->n_fifos_dst;
stride_fifos = sdmac->stride_fifos_dst;
}
words_per_fifo = sdmac->words_per_fifo;
sdmac->watermark_level |=
FIELD_PREP(SDMA_WATERMARK_LEVEL_N_FIFOS, n_fifos);
sdmac->watermark_level |=
FIELD_PREP(SDMA_WATERMARK_LEVEL_OFF_FIFOS, stride_fifos);
if (words_per_fifo)
sdmac->watermark_level |=
FIELD_PREP(SDMA_WATERMARK_LEVEL_WORDS_PER_FIFO, (words_per_fifo - 1));
}
static int sdma_config_channel(struct dma_chan *chan)
......@@ -1764,6 +1789,9 @@ static int sdma_config(struct dma_chan *chan,
}
sdmac->n_fifos_src = sdmacfg->n_fifos_src;
sdmac->n_fifos_dst = sdmacfg->n_fifos_dst;
sdmac->stride_fifos_src = sdmacfg->stride_fifos_src;
sdmac->stride_fifos_dst = sdmacfg->stride_fifos_dst;
sdmac->words_per_fifo = sdmacfg->words_per_fifo;
sdmac->sw_done = sdmacfg->sw_done;
}
......@@ -2183,8 +2211,8 @@ static int sdma_probe(struct platform_device *pdev)
if (ret)
goto err_clk;
ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
sdma);
ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0,
dev_name(&pdev->dev), sdma);
if (ret)
goto err_irq;
......
......@@ -373,7 +373,7 @@ static void mtk_cqdma_tasklet_cb(struct tasklet_struct *t)
/*
* free child CVD after completion.
* the parent CVD would be freeed with desc_free by user.
* the parent CVD would be freed with desc_free by user.
*/
if (cvd->parent != cvd)
kfree(cvd);
......
......@@ -138,7 +138,7 @@ struct mtk_hsdma_vdesc {
/**
* struct mtk_hsdma_cb - This is the struct holding extra info required for RX
* ring to know what relevant VD the the PD is being
* ring to know what relevant VD the PD is being
* mapped to.
* @vd: Pointer to the relevant VD.
* @flag: Flag indicating what action should be taken when VD
......@@ -761,7 +761,7 @@ static void mtk_hsdma_free_active_desc(struct dma_chan *c)
/*
* Once issue_synchronize is being set, which means once the hardware
* consumes all descriptors for the channel in the ring, the
* synchronization must be be notified immediately it is completed.
* synchronization must be notified immediately it is completed.
*/
spin_lock(&hvc->vc.lock);
if (!list_empty(&hvc->desc_hw_processing)) {
......
......@@ -313,7 +313,7 @@ mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
"%s sw_desc %p: async_tx %p\n",
__func__, sw_desc, &sw_desc->async_tx);
/* assign coookie */
/* assign cookie */
spin_lock_bh(&xor_dev->lock);
cookie = dma_cookie_assign(tx);
......
......@@ -193,7 +193,7 @@ struct owl_dma_pchan {
/**
* struct owl_dma_pchan - Wrapper for DMA ENGINE channel
* @vc: wrappped virtual channel
* @vc: wrapped virtual channel
* @pchan: the physical channel utilized by this channel
* @txd: active transaction on this channel
* @cfg: slave configuration for this channel
......
......@@ -202,7 +202,7 @@ struct s3c24xx_dma_phy {
* struct s3c24xx_dma_chan - this structure wraps a DMA ENGINE channel
* @id: the id of the channel
* @name: name of the channel
* @vc: wrappped virtual channel
* @vc: wrapped virtual channel
* @phy: the physical channel utilized by this channel, if there is one
* @runtime_addr: address for RX/TX according to the runtime config
* @at: active transaction on this channel
......
......@@ -52,16 +52,6 @@ static inline struct sf_pdma_desc *to_sf_pdma_desc(struct virt_dma_desc *vd)
static struct sf_pdma_desc *sf_pdma_alloc_desc(struct sf_pdma_chan *chan)
{
struct sf_pdma_desc *desc;
unsigned long flags;
spin_lock_irqsave(&chan->lock, flags);
if (chan->desc && !chan->desc->in_use) {
spin_unlock_irqrestore(&chan->lock, flags);
return chan->desc;
}
spin_unlock_irqrestore(&chan->lock, flags);
desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
if (!desc)
......@@ -111,7 +101,6 @@ sf_pdma_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dest, dma_addr_t src,
desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
spin_lock_irqsave(&chan->vchan.lock, iflags);
chan->desc = desc;
sf_pdma_fill_desc(desc, dest, src, len);
spin_unlock_irqrestore(&chan->vchan.lock, iflags);
......@@ -170,11 +159,17 @@ static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan,
unsigned long flags;
u64 residue = 0;
struct sf_pdma_desc *desc;
struct dma_async_tx_descriptor *tx;
struct dma_async_tx_descriptor *tx = NULL;
spin_lock_irqsave(&chan->vchan.lock, flags);
tx = &chan->desc->vdesc.tx;
list_for_each_entry(vd, &chan->vchan.desc_submitted, node)
if (vd->tx.cookie == cookie)
tx = &vd->tx;
if (!tx)
goto out;
if (cookie == tx->chan->completed_cookie)
goto out;
......@@ -241,6 +236,19 @@ static void sf_pdma_enable_request(struct sf_pdma_chan *chan)
writel(v, regs->ctrl);
}
static struct sf_pdma_desc *sf_pdma_get_first_pending_desc(struct sf_pdma_chan *chan)
{
struct virt_dma_chan *vchan = &chan->vchan;
struct virt_dma_desc *vdesc;
if (list_empty(&vchan->desc_issued))
return NULL;
vdesc = list_first_entry(&vchan->desc_issued, struct virt_dma_desc, node);
return container_of(vdesc, struct sf_pdma_desc, vdesc);
}
static void sf_pdma_xfer_desc(struct sf_pdma_chan *chan)
{
struct sf_pdma_desc *desc = chan->desc;
......@@ -268,8 +276,11 @@ static void sf_pdma_issue_pending(struct dma_chan *dchan)
spin_lock_irqsave(&chan->vchan.lock, flags);
if (vchan_issue_pending(&chan->vchan) && chan->desc)
if (!chan->desc && vchan_issue_pending(&chan->vchan)) {
/* vchan_issue_pending has made a check that desc in not NULL */
chan->desc = sf_pdma_get_first_pending_desc(chan);
sf_pdma_xfer_desc(chan);
}
spin_unlock_irqrestore(&chan->vchan.lock, flags);
}
......@@ -298,6 +309,11 @@ static void sf_pdma_donebh_tasklet(struct tasklet_struct *t)
spin_lock_irqsave(&chan->vchan.lock, flags);
list_del(&chan->desc->vdesc.node);
vchan_cookie_complete(&chan->desc->vdesc);
chan->desc = sf_pdma_get_first_pending_desc(chan);
if (chan->desc)
sf_pdma_xfer_desc(chan);
spin_unlock_irqrestore(&chan->vchan.lock, flags);
}
......
......@@ -12,6 +12,7 @@
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
......@@ -630,6 +631,21 @@ static void rz_dmac_virt_desc_free(struct virt_dma_desc *vd)
*/
}
static void rz_dmac_device_synchronize(struct dma_chan *chan)
{
struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
struct rz_dmac *dmac = to_rz_dmac(chan->device);
u32 chstat;
int ret;
ret = read_poll_timeout(rz_dmac_ch_readl, chstat, !(chstat & CHSTAT_EN),
100, 100000, false, channel, CHSTAT, 1);
if (ret < 0)
dev_warn(dmac->dev, "DMA Timeout");
rz_dmac_set_dmars_register(dmac, channel->index, 0);
}
/*
* -----------------------------------------------------------------------------
* IRQ handling
......@@ -909,6 +925,7 @@ static int rz_dmac_probe(struct platform_device *pdev)
engine->device_config = rz_dmac_config;
engine->device_terminate_all = rz_dmac_terminate_all;
engine->device_issue_pending = rz_dmac_issue_pending;
engine->device_synchronize = rz_dmac_device_synchronize;
engine->copy_align = DMAENGINE_ALIGN_1_BYTE;
dma_set_max_seg_size(engine->dev, U32_MAX);
......
......@@ -1237,11 +1237,8 @@ static int sprd_dma_remove(struct platform_device *pdev)
{
struct sprd_dma_dev *sdev = platform_get_drvdata(pdev);
struct sprd_dma_chn *c, *cn;
int ret;
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0)
return ret;
pm_runtime_get_sync(&pdev->dev);
/* explicitly free the irq */
if (sdev->irq > 0)
......
......@@ -1970,7 +1970,7 @@ static int d40_config_memcpy(struct d40_chan *d40c)
dma_has_cap(DMA_SLAVE, cap)) {
d40c->dma_cfg = dma40_memcpy_conf_phy;
/* Generate interrrupt at end of transfer or relink. */
/* Generate interrupt at end of transfer or relink. */
d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS);
/* Generate interrupt on error. */
......
......@@ -1328,12 +1328,7 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
return IRQ_NONE;
}
id = __ffs(status);
chan = &dmadev->chan[id];
if (!chan) {
dev_warn(mdma2dev(dmadev), "MDMA channel not initialized\n");
return IRQ_NONE;
}
/* Handle interrupt for the channel */
spin_lock(&chan->vchan.lock);
......
......@@ -7,6 +7,7 @@
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/dmapool.h>
#include <linux/interrupt.h>
......@@ -122,6 +123,15 @@
SUN4I_DDMA_PARA_DST_WAIT_CYCLES(2) | \
SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(2))
/*
* Normal DMA supports individual transfers (segments) up to 128k.
* Dedicated DMA supports transfers up to 16M. We can only report
* one size limit, so we have to use the smaller value.
*/
#define SUN4I_NDMA_MAX_SEG_SIZE SZ_128K
#define SUN4I_DDMA_MAX_SEG_SIZE SZ_16M
#define SUN4I_DMA_MAX_SEG_SIZE SUN4I_NDMA_MAX_SEG_SIZE
struct sun4i_dma_pchan {
/* Register base of channel */
void __iomem *base;
......@@ -155,7 +165,8 @@ struct sun4i_dma_contract {
struct virt_dma_desc vd;
struct list_head demands;
struct list_head completed_demands;
int is_cyclic;
bool is_cyclic : 1;
bool use_half_int : 1;
};
struct sun4i_dma_dev {
......@@ -372,7 +383,7 @@ static int __execute_vchan_pending(struct sun4i_dma_dev *priv,
if (promise) {
vchan->contract = contract;
vchan->pchan = pchan;
set_pchan_interrupt(priv, pchan, contract->is_cyclic, 1);
set_pchan_interrupt(priv, pchan, contract->use_half_int, 1);
configure_pchan(pchan, promise);
}
......@@ -735,12 +746,21 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
*
* Which requires half the engine programming for the same
* functionality.
*
* This only works if two periods fit in a single promise. That will
* always be the case for dedicated DMA, where the hardware has a much
* larger maximum transfer size than advertised to clients.
*/
nr_periods = DIV_ROUND_UP(len / period_len, 2);
if (vchan->is_dedicated || period_len <= SUN4I_NDMA_MAX_SEG_SIZE / 2) {
period_len *= 2;
contract->use_half_int = 1;
}
nr_periods = DIV_ROUND_UP(len, period_len);
for (i = 0; i < nr_periods; i++) {
/* Calculate the offset in the buffer and the length needed */
offset = i * period_len * 2;
plength = min((len - offset), (period_len * 2));
offset = i * period_len;
plength = min((len - offset), period_len);
if (dir == DMA_MEM_TO_DEV)
src = buf + offset;
else
......@@ -1149,6 +1169,8 @@ static int sun4i_dma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
spin_lock_init(&priv->lock);
dma_set_max_seg_size(&pdev->dev, SUN4I_DMA_MAX_SEG_SIZE);
dma_cap_zero(priv->slave.cap_mask);
dma_cap_set(DMA_PRIVATE, priv->slave.cap_mask);
dma_cap_set(DMA_MEMCPY, priv->slave.cap_mask);
......
......@@ -157,8 +157,8 @@
* If any burst is in flight and DMA paused then this is the time to complete
* on-flight burst and update DMA status register.
*/
#define TEGRA_GPCDMA_BURST_COMPLETE_TIME 20
#define TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT 100
#define TEGRA_GPCDMA_BURST_COMPLETE_TIME 10
#define TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT 5000 /* 5 msec */
/* Channel base address offset from GPCDMA base address */
#define TEGRA_GPCDMA_CHANNEL_BASE_ADD_OFFSET 0x20000
......@@ -432,6 +432,17 @@ static int tegra_dma_device_resume(struct dma_chan *dc)
return 0;
}
static inline int tegra_dma_pause_noerr(struct tegra_dma_channel *tdc)
{
/* Return 0 irrespective of PAUSE status.
* This is useful to recover channels that can exit out of flush
* state when the channel is disabled.
*/
tegra_dma_pause(tdc);
return 0;
}
static void tegra_dma_disable(struct tegra_dma_channel *tdc)
{
u32 csr, status;
......@@ -1292,6 +1303,14 @@ static const struct tegra_dma_chip_data tegra194_dma_chip_data = {
.terminate = tegra_dma_pause,
};
static const struct tegra_dma_chip_data tegra234_dma_chip_data = {
.nr_channels = 31,
.channel_reg_size = SZ_64K,
.max_dma_count = SZ_1G,
.hw_support_pause = true,
.terminate = tegra_dma_pause_noerr,
};
static const struct of_device_id tegra_dma_of_match[] = {
{
.compatible = "nvidia,tegra186-gpcdma",
......@@ -1299,6 +1318,9 @@ static const struct of_device_id tegra_dma_of_match[] = {
}, {
.compatible = "nvidia,tegra194-gpcdma",
.data = &tegra194_dma_chip_data,
}, {
.compatible = "nvidia,tegra234-gpcdma",
.data = &tegra234_dma_chip_data,
}, {
},
};
......
......@@ -112,6 +112,11 @@ static struct psil_ep j721s2_src_ep_map[] = {
PSIL_PDMA_XY_PKT(0x4707),
PSIL_PDMA_XY_PKT(0x4708),
PSIL_PDMA_XY_PKT(0x4709),
/* MAIN SA2UL */
PSIL_SA2UL(0x4a40, 0),
PSIL_SA2UL(0x4a41, 0),
PSIL_SA2UL(0x4a42, 0),
PSIL_SA2UL(0x4a43, 0),
/* CPSW0 */
PSIL_ETHERNET(0x7000),
/* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */
......@@ -144,6 +149,9 @@ static struct psil_ep j721s2_src_ep_map[] = {
/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
static struct psil_ep j721s2_dst_ep_map[] = {
/* MAIN SA2UL */
PSIL_SA2UL(0xca40, 1),
PSIL_SA2UL(0xca41, 1),
/* CPSW0 */
PSIL_ETHERNET(0xf000),
PSIL_ETHERNET(0xf001),
......
......@@ -2127,126 +2127,6 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
return NULL;
}
/**
* xilinx_cdma_prep_memcpy_sg - prepare descriptors for a memcpy_sg transaction
* @dchan: DMA channel
* @dst_sg: Destination scatter list
* @dst_sg_len: Number of entries in destination scatter list
* @src_sg: Source scatter list
* @src_sg_len: Number of entries in source scatter list
* @flags: transfer ack flags
*
* Return: Async transaction descriptor on success and NULL on failure
*/
static struct dma_async_tx_descriptor *xilinx_cdma_prep_memcpy_sg(
struct dma_chan *dchan, struct scatterlist *dst_sg,
unsigned int dst_sg_len, struct scatterlist *src_sg,
unsigned int src_sg_len, unsigned long flags)
{
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
struct xilinx_dma_tx_descriptor *desc;
struct xilinx_cdma_tx_segment *segment, *prev = NULL;
struct xilinx_cdma_desc_hw *hw;
size_t len, dst_avail, src_avail;
dma_addr_t dma_dst, dma_src;
if (unlikely(dst_sg_len == 0 || src_sg_len == 0))
return NULL;
if (unlikely(!dst_sg || !src_sg))
return NULL;
desc = xilinx_dma_alloc_tx_descriptor(chan);
if (!desc)
return NULL;
dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
desc->async_tx.tx_submit = xilinx_dma_tx_submit;
dst_avail = sg_dma_len(dst_sg);
src_avail = sg_dma_len(src_sg);
/*
* loop until there is either no more source or no more destination
* scatterlist entry
*/
while (true) {
len = min_t(size_t, src_avail, dst_avail);
len = min_t(size_t, len, chan->xdev->max_buffer_len);
if (len == 0)
goto fetch;
/* Allocate the link descriptor from DMA pool */
segment = xilinx_cdma_alloc_tx_segment(chan);
if (!segment)
goto error;
dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
dst_avail;
dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
src_avail;
hw = &segment->hw;
hw->control = len;
hw->src_addr = dma_src;
hw->dest_addr = dma_dst;
if (chan->ext_addr) {
hw->src_addr_msb = upper_32_bits(dma_src);
hw->dest_addr_msb = upper_32_bits(dma_dst);
}
if (prev) {
prev->hw.next_desc = segment->phys;
if (chan->ext_addr)
prev->hw.next_desc_msb =
upper_32_bits(segment->phys);
}
prev = segment;
dst_avail -= len;
src_avail -= len;
list_add_tail(&segment->node, &desc->segments);
fetch:
/* Fetch the next dst scatterlist entry */
if (dst_avail == 0) {
if (dst_sg_len == 0)
break;
dst_sg = sg_next(dst_sg);
if (dst_sg == NULL)
break;
dst_sg_len--;
dst_avail = sg_dma_len(dst_sg);
}
/* Fetch the next src scatterlist entry */
if (src_avail == 0) {
if (src_sg_len == 0)
break;
src_sg = sg_next(src_sg);
if (src_sg == NULL)
break;
src_sg_len--;
src_avail = sg_dma_len(src_sg);
}
}
if (list_empty(&desc->segments)) {
dev_err(chan->xdev->dev,
"%s: Zero-size SG transfer requested\n", __func__);
goto error;
}
/* Link the last hardware descriptor with the first. */
segment = list_first_entry(&desc->segments,
struct xilinx_cdma_tx_segment, node);
desc->async_tx.phys = segment->phys;
prev->hw.next_desc = segment->phys;
return &desc->async_tx;
error:
xilinx_dma_free_tx_descriptor(chan, desc);
return NULL;
}
/**
* xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
* @dchan: DMA channel
......@@ -3240,9 +3120,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
DMA_RESIDUE_GRANULARITY_SEGMENT;
} else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
dma_cap_set(DMA_MEMCPY_SG, xdev->common.cap_mask);
xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
xdev->common.device_prep_dma_memcpy_sg = xilinx_cdma_prep_memcpy_sg;
/* Residue calculation is supported by only AXI DMA and CDMA */
xdev->common.residue_granularity =
DMA_RESIDUE_GRANULARITY_SEGMENT;
......
......@@ -376,7 +376,7 @@ static ssize_t xilinx_dpdma_debugfs_read(struct file *f, char __user *buf,
if (ret < 0)
goto done;
} else {
strlcpy(kern_buff, "No testcase executed",
strscpy(kern_buff, "No testcase executed",
XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE);
}
......@@ -1652,10 +1652,8 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
dpdma_hw_init(xdev);
xdev->irq = platform_get_irq(pdev, 0);
if (xdev->irq < 0) {
dev_err(xdev->dev, "failed to get platform irq\n");
if (xdev->irq < 0)
return xdev->irq;
}
ret = request_irq(xdev->irq, xilinx_dpdma_irq_handler, IRQF_SHARED,
dev_name(xdev->dev), xdev);
......
......@@ -70,6 +70,16 @@ static inline int imx_dma_is_general_purpose(struct dma_chan *chan)
* struct sdma_peripheral_config - SDMA config for audio
* @n_fifos_src: Number of FIFOs for recording
* @n_fifos_dst: Number of FIFOs for playback
* @stride_fifos_src: FIFO address stride for recording, 0 means all FIFOs are
* continuous, 1 means 1 word stride between FIFOs. All stride
* between FIFOs should be same.
* @stride_fifos_dst: FIFO address stride for playback
* @words_per_fifo: numbers of words per FIFO fetch/fill, 1 means
* one channel per FIFO, 2 means 2 channels per FIFO..
* If 'n_fifos_src = 4' and 'words_per_fifo = 2', it
* means the first two words(channels) fetch from FIFO0
* and then jump to FIFO1 for next two words, and so on
* after the last FIFO3 fetched, roll back to FIFO0.
* @sw_done: Use software done. Needed for PDM (micfil)
*
* Some i.MX Audio devices (SAI, micfil) have multiple successive FIFO
......@@ -82,6 +92,9 @@ static inline int imx_dma_is_general_purpose(struct dma_chan *chan)
struct sdma_peripheral_config {
int n_fifos_src;
int n_fifos_dst;
int stride_fifos_src;
int stride_fifos_dst;
int words_per_fifo;
bool sw_done;
};
......
......@@ -26,7 +26,7 @@ enum spi_transfer_cmd {
* @clk_div: source clock divider
* @clk_src: serial clock
* @cmd: spi cmd
* @fragmentation: keep CS assserted at end of sequence
* @fragmentation: keep CS asserted at end of sequence
* @cs: chip select toggle
* @set_config: set peripheral config
* @rx_len: receive length for buffer
......
......@@ -50,7 +50,6 @@ enum dma_status {
*/
enum dma_transaction_type {
DMA_MEMCPY,
DMA_MEMCPY_SG,
DMA_XOR,
DMA_PQ,
DMA_XOR_VAL,
......@@ -887,11 +886,6 @@ struct dma_device {
struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
size_t len, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_memcpy_sg)(
struct dma_chan *chan,
struct scatterlist *dst_sg, unsigned int dst_nents,
struct scatterlist *src_sg, unsigned int src_nents,
unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
unsigned int src_cnt, size_t len, unsigned long flags);
......@@ -1060,20 +1054,6 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy(
len, flags);
}
static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy_sg(
struct dma_chan *chan,
struct scatterlist *dst_sg, unsigned int dst_nents,
struct scatterlist *src_sg, unsigned int src_nents,
unsigned long flags)
{
if (!chan || !chan->device || !chan->device->device_prep_dma_memcpy_sg)
return NULL;
return chan->device->device_prep_dma_memcpy_sg(chan, dst_sg, dst_nents,
src_sg, src_nents,
flags);
}
static inline bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan,
enum dma_desc_metadata_mode mode)
{
......
......@@ -89,14 +89,14 @@ enum iax_opcode {
IAX_OPCODE_CRC64,
IAX_OPCODE_ZERO_DECOMP_32 = 0x48,
IAX_OPCODE_ZERO_DECOMP_16,
IAX_OPCODE_DECOMP_32 = 0x4c,
IAX_OPCODE_DECOMP_16,
IAX_OPCODE_ZERO_COMP_32 = 0x4c,
IAX_OPCODE_ZERO_COMP_16,
IAX_OPCODE_SCAN = 0x50,
IAX_OPCODE_SET_MEMBER,
IAX_OPCODE_EXTRACT,
IAX_OPCODE_SELECT,
IAX_OPCODE_RLE_BURST,
IAX_OPCDE_FIND_UNIQUE,
IAX_OPCODE_FIND_UNIQUE,
IAX_OPCODE_EXPAND,
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment