Commit 04cbfba6 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-5.4-rc1' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine updates from Vinod Koul:

 - Move Dmaengine DT bindings to YAML and convert Allwinner to schema.

 - FSL dma device_synchronize implementation

 - DW split acpi and of helpers and updates to driver and support for
   Elkhart Lake

 - Move filter fn as private for omap-dma and edma drivers and
   improvements to these drivers

 - Mark expected switch fall-through in couple of drivers

 - Renames of shdma and nbpfaxi binding document

 - Minor updates to bunch of drivers

* tag 'dmaengine-5.4-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (55 commits)
  dmaengine: ti: edma: Use bitmap_set() instead of open coded edma_set_bits()
  dmaengine: ti: edma: Only reset region0 access registers
  dmaengine: ti: edma: Do not reset reserved paRAM slots
  dmaengine: iop-adma.c: fix printk format warning
  dmaengine: stm32-dma: Use struct_size() helper
  dt-bindings: dmaengine: dma-common: Fix the dma-channel-mask property
  dmanegine: ioat/dca: Use struct_size() helper
  dmaengine: iop-adma: remove set but not used variable 'slots_per_op'
  dmaengine: dmatest: Add support for completion polling
  dmaengine: ti: omap-dma: Remove variable override in omap_dma_tx_status()
  dmaengine: ti: omap-dma: Remove 'Assignment in if condition'
  dmaengine: ti: edma: Remove 'Assignment in if condition'
  dmaengine: dw: platform: Split OF helpers to separate module
  dmaengine: dw: platform: Split ACPI helpers to separate module
  dmaengine: dw: platform: Move handle check to dw_dma_acpi_controller_register()
  dmaengine: dw: platform: Switch to acpi_dma_controller_register()
  dmaengine: dw: platform: Use devm_platform_ioremap_resource()
  dmaengine: dw: platform: Enable iDMA 32-bit on Intel Elkhart Lake
  dmaengine: dw: platform: Use struct dw_dma_chip_pdata
  dmaengine: dw: Export struct dw_dma_chip_pdata for wider use
  ...
parents 4feaab05 c5c6faae
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/allwinner,sun4i-a10-dma.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Allwinner A10 DMA Controller Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- Maxime Ripard <maxime.ripard@bootlin.com>
allOf:
- $ref: "dma-controller.yaml#"
properties:
"#dma-cells":
const: 2
description:
The first cell is either 0 or 1, the former to use the normal
DMA, 1 for dedicated DMA. The second cell is the request line
number.
compatible:
const: allwinner,sun4i-a10-dma
reg:
maxItems: 1
interrupts:
maxItems: 1
clocks:
maxItems: 1
required:
- "#dma-cells"
- compatible
- reg
- interrupts
- clocks
additionalProperties: false
examples:
- |
dma: dma-controller@1c02000 {
compatible = "allwinner,sun4i-a10-dma";
reg = <0x01c02000 0x1000>;
interrupts = <27>;
clocks = <&ahb_gates 6>;
#dma-cells = <2>;
};
...
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/allwinner,sun50i-a64-dma.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Allwinner A64 DMA Controller Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- Maxime Ripard <maxime.ripard@bootlin.com>
allOf:
- $ref: "dma-controller.yaml#"
properties:
"#dma-cells":
const: 1
description: The cell is the request line number.
compatible:
enum:
- allwinner,sun50i-a64-dma
- allwinner,sun50i-h6-dma
reg:
maxItems: 1
interrupts:
maxItems: 1
clocks:
minItems: 1
maxItems: 2
clock-names:
items:
- const: bus
- const: mbus
resets:
maxItems: 1
required:
- "#dma-cells"
- compatible
- reg
- interrupts
- clocks
- resets
- dma-channels
if:
properties:
compatible:
const: allwinner,sun50i-h6-dma
then:
properties:
clocks:
maxItems: 2
required:
- clock-names
else:
properties:
clocks:
maxItems: 1
# FIXME: We should set it, but it would report all the generic
# properties as additional properties.
# additionalProperties: false
examples:
- |
dma: dma-controller@1c02000 {
compatible = "allwinner,sun50i-a64-dma";
reg = <0x01c02000 0x1000>;
interrupts = <0 50 4>;
clocks = <&ccu 30>;
dma-channels = <8>;
dma-requests = <27>;
resets = <&ccu 7>;
#dma-cells = <1>;
};
...
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/allwinner,sun6i-a31-dma.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Allwinner A31 DMA Controller Device Tree Bindings
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- Maxime Ripard <maxime.ripard@bootlin.com>
allOf:
- $ref: "dma-controller.yaml#"
properties:
"#dma-cells":
const: 1
description: The cell is the request line number.
compatible:
oneOf:
- const: allwinner,sun6i-a31-dma
- const: allwinner,sun8i-a23-dma
- const: allwinner,sun8i-a83t-dma
- const: allwinner,sun8i-h3-dma
- const: allwinner,sun8i-v3s-dma
reg:
maxItems: 1
interrupts:
maxItems: 1
clocks:
maxItems: 1
resets:
maxItems: 1
required:
- "#dma-cells"
- compatible
- reg
- interrupts
- clocks
- resets
additionalProperties: false
examples:
- |
dma: dma-controller@1c02000 {
compatible = "allwinner,sun6i-a31-dma";
reg = <0x01c02000 0x1000>;
interrupts = <0 50 4>;
clocks = <&ahb1_gates 6>;
resets = <&ahb1_rst 6>;
#dma-cells = <1>;
};
...
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/dma-common.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: DMA Engine Generic Binding
maintainers:
- Vinod Koul <vkoul@kernel.org>
description:
Generic binding to provide a way for a driver using DMA Engine to
retrieve the DMA request or channel information that goes from a
hardware device to a DMA controller.
select: false
properties:
"#dma-cells":
minimum: 1
# Should be enough
maximum: 255
description:
Used to provide DMA controller specific information.
dma-channel-mask:
$ref: /schemas/types.yaml#definitions/uint32
description:
Bitmask of available DMA channels in ascending order that are
not reserved by firmware and are available to the
kernel. i.e. first channel corresponds to LSB.
dma-channels:
$ref: /schemas/types.yaml#definitions/uint32
description:
Number of DMA channels supported by the controller.
dma-requests:
$ref: /schemas/types.yaml#definitions/uint32
description:
Number of DMA request signals supported by the controller.
required:
- "#dma-cells"
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/dma-controller.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: DMA Controller Generic Binding
maintainers:
- Vinod Koul <vkoul@kernel.org>
allOf:
- $ref: "dma-common.yaml#"
# Everything else is described in the common file
properties:
$nodename:
pattern: "^dma-controller(@.*)?$"
examples:
- |
dma: dma-controller@48000000 {
compatible = "ti,omap-sdma";
reg = <0x48000000 0x1000>;
interrupts = <0 12 0x4
0 13 0x4
0 14 0x4
0 15 0x4>;
#dma-cells = <1>;
dma-channels = <32>;
dma-requests = <127>;
dma-channel-mask = <0xfffe>;
};
...
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/dma-router.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: DMA Router Generic Binding
maintainers:
- Vinod Koul <vkoul@kernel.org>
allOf:
- $ref: "dma-common.yaml#"
description:
DMA routers are transparent IP blocks used to route DMA request
lines from devices to the DMA controller. Some SoCs (like TI DRA7x)
have more peripherals integrated with DMA requests than what the DMA
controller can handle directly.
properties:
$nodename:
pattern: "^dma-router(@.*)?$"
dma-masters:
$ref: /schemas/types.yaml#definitions/phandle-array
description:
Array of phandles to the DMA controllers the router can direct
the signal to.
dma-requests:
description:
Number of incoming request lines the router can handle.
required:
- "#dma-cells"
- dma-masters
examples:
- |
sdma_xbar: dma-router@4a002b78 {
compatible = "ti,dra7-dma-crossbar";
reg = <0x4a002b78 0xfc>;
#dma-cells = <1>;
dma-requests = <205>;
ti,dma-safe-map = <0>;
dma-masters = <&sdma>;
};
...
* Generic DMA Controller and DMA request bindings
Generic binding to provide a way for a driver using DMA Engine to retrieve the
DMA request or channel information that goes from a hardware device to a DMA
controller.
* DMA controller
Required property:
- #dma-cells: Must be at least 1. Used to provide DMA controller
specific information. See DMA client binding below for
more details.
Optional properties:
- dma-channels: Number of DMA channels supported by the controller.
- dma-requests: Number of DMA request signals supported by the
controller.
- dma-channel-mask: Bitmask of available DMA channels in ascending order
that are not reserved by firmware and are available to
the kernel. i.e. first channel corresponds to LSB.
Example:
dma: dma@48000000 {
compatible = "ti,omap-sdma";
reg = <0x48000000 0x1000>;
interrupts = <0 12 0x4
0 13 0x4
0 14 0x4
0 15 0x4>;
#dma-cells = <1>;
dma-channels = <32>;
dma-requests = <127>;
dma-channel-mask = <0xfffe>
};
* DMA router
DMA routers are transparent IP blocks used to route DMA request lines from
devices to the DMA controller. Some SoCs (like TI DRA7x) have more peripherals
integrated with DMA requests than what the DMA controller can handle directly.
Required property:
- dma-masters: phandle of the DMA controller or list of phandles for
the DMA controllers the router can direct the signal to.
- #dma-cells: Must be at least 1. Used to provide DMA router specific
information. See DMA client binding below for more
details.
Optional properties:
- dma-requests: Number of incoming request lines the router can handle.
- In the node pointed by the dma-masters:
- dma-requests: The router driver might need to look for this in order
to configure the routing.
Example:
sdma_xbar: dma-router@4a002b78 {
compatible = "ti,dra7-dma-crossbar";
reg = <0x4a002b78 0xfc>;
#dma-cells = <1>;
dma-requests = <205>;
ti,dma-safe-map = <0>;
dma-masters = <&sdma>;
};
* DMA client
Client drivers should specify the DMA property using a phandle to the controller
followed by DMA controller specific data.
Required property:
- dmas: List of one or more DMA specifiers, each consisting of
- A phandle pointing to DMA controller node
- A number of integer cells, as determined by the
#dma-cells property in the node referenced by phandle
containing DMA controller specific information. This
typically contains a DMA request line number or a
channel number, but can contain any data that is
required for configuring a channel.
- dma-names: Contains one identifier string for each DMA specifier in
the dmas property. The specific strings that can be used
are defined in the binding of the DMA client device.
Multiple DMA specifiers can be used to represent
alternatives and in this case the dma-names for those
DMA specifiers must be identical (see examples).
Examples:
1. A device with one DMA read channel, one DMA write channel:
i2c1: i2c@1 {
...
dmas = <&dma 2 /* read channel */
&dma 3>; /* write channel */
dma-names = "rx", "tx";
...
};
2. A single read-write channel with three alternative DMA controllers:
dmas = <&dma1 5
&dma2 7
&dma3 2>;
dma-names = "rx-tx", "rx-tx", "rx-tx";
3. A device with three channels, one of which has two alternatives:
dmas = <&dma1 2 /* read channel */
&dma1 3 /* write channel */
&dma2 0 /* error read */
&dma3 0>; /* alternative error read */
dma-names = "rx", "tx", "error", "error";
This file has been moved to dma-controller.yaml.
Allwinner A10 DMA Controller
This driver follows the generic DMA bindings defined in dma.txt.
Required properties:
- compatible: Must be "allwinner,sun4i-a10-dma"
- reg: Should contain the registers base address and length
- interrupts: Should contain a reference to the interrupt used by this device
- clocks: Should contain a reference to the parent AHB clock
- #dma-cells : Should be 2, first cell denoting normal or dedicated dma,
second cell holding the request line number.
Example:
dma: dma-controller@1c02000 {
compatible = "allwinner,sun4i-a10-dma";
reg = <0x01c02000 0x1000>;
interrupts = <27>;
clocks = <&ahb_gates 6>;
#dma-cells = <2>;
};
Clients:
DMA clients connected to the Allwinner A10 DMA controller must use the
format described in the dma.txt file, using a three-cell specifier for
each channel: a phandle plus two integer cells.
The three cells in order are:
1. A phandle pointing to the DMA controller.
2. Whether it is using normal (0) or dedicated (1) channels
3. The port ID as specified in the datasheet
Example:
spi2: spi@1c17000 {
compatible = "allwinner,sun4i-a10-spi";
reg = <0x01c17000 0x1000>;
interrupts = <0 12 4>;
clocks = <&ahb_gates 22>, <&spi2_clk>;
clock-names = "ahb", "mod";
dmas = <&dma 1 29>, <&dma 1 28>;
dma-names = "rx", "tx";
#address-cells = <1>;
#size-cells = <0>;
};
Allwinner A31 DMA Controller
This driver follows the generic DMA bindings defined in dma.txt.
Required properties:
- compatible: Must be one of
"allwinner,sun6i-a31-dma"
"allwinner,sun8i-a23-dma"
"allwinner,sun8i-a83t-dma"
"allwinner,sun8i-h3-dma"
"allwinner,sun8i-v3s-dma"
- reg: Should contain the registers base address and length
- interrupts: Should contain a reference to the interrupt used by this device
- clocks: Should contain a reference to the parent AHB clock
- resets: Should contain a reference to the reset controller asserting
this device in reset
- #dma-cells : Should be 1, a single cell holding a line request number
Example:
dma: dma-controller@1c02000 {
compatible = "allwinner,sun6i-a31-dma";
reg = <0x01c02000 0x1000>;
interrupts = <0 50 4>;
clocks = <&ahb1_gates 6>;
resets = <&ahb1_rst 6>;
#dma-cells = <1>;
};
------------------------------------------------------------------------------
For A64 and H6 DMA controller:
Required properties:
- compatible: Must be one of
"allwinner,sun50i-a64-dma"
"allwinner,sun50i-h6-dma"
- dma-channels: Number of DMA channels supported by the controller.
Refer to Documentation/devicetree/bindings/dma/dma.txt
- clocks: In addition to parent AHB clock, it should also contain mbus
clock (H6 only)
- clock-names: Should contain "bus" and "mbus" (H6 only)
- all properties above, i.e. reg, interrupts, clocks, resets and #dma-cells
Optional properties:
- dma-requests: Number of DMA request signals supported by the controller.
Refer to Documentation/devicetree/bindings/dma/dma.txt
Example:
dma: dma-controller@1c02000 {
compatible = "allwinner,sun50i-a64-dma";
reg = <0x01c02000 0x1000>;
interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&ccu CLK_BUS_DMA>;
dma-channels = <8>;
dma-requests = <27>;
resets = <&ccu RST_BUS_DMA>;
#dma-cells = <1>;
};
------------------------------------------------------------------------------
Clients:
DMA clients connected to the A31 DMA controller must use the format
described in the dma.txt file, using a two-cell specifier for each
channel: a phandle plus one integer cells.
The two cells in order are:
1. A phandle pointing to the DMA controller.
2. The port ID as specified in the datasheet
Example:
spi2: spi@1c6a000 {
compatible = "allwinner,sun6i-a31-spi";
reg = <0x01c6a000 0x1000>;
interrupts = <0 67 4>;
clocks = <&ahb1_gates 22>, <&spi2_clk>;
clock-names = "ahb", "mod";
dmas = <&dma 25>, <&dma 25>;
dma-names = "rx", "tx";
resets = <&ahb1_rst 22>;
};
......@@ -15532,7 +15532,7 @@ F: Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt
SYNOPSYS DESIGNWARE AXI DMAC DRIVER
M: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
S: Maintained
F: drivers/dma/dwi-axi-dmac/
F: drivers/dma/dw-axi-dmac/
F: Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt
SYNOPSYS DESIGNWARE DMAC DRIVER
......
......@@ -295,7 +295,7 @@ config INTEL_IOATDMA
config INTEL_IOP_ADMA
tristate "Intel IOP32x ADMA support"
depends on ARCH_IOP32X
depends on ARCH_IOP32X || COMPILE_TEST
select DMA_ENGINE
select ASYNC_TX_ENABLE_CHANNEL_SWITCH
help
......
......@@ -10,6 +10,7 @@
*/
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/kernel.h>
......@@ -82,6 +83,12 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
if (si->base_request_line == 0 && si->num_handshake_signals == 0)
return 0;
/* Set up DMA mask based on value from CSRT */
ret = dma_coerce_mask_and_coherent(&adev->dev,
DMA_BIT_MASK(si->dma_address_width));
if (ret)
return 0;
adma->base_request_line = si->base_request_line;
adma->end_request_line = si->base_request_line +
si->num_handshake_signals - 1;
......@@ -140,7 +147,7 @@ static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma)
* @dev: struct device of DMA controller
* @acpi_dma_xlate: translation function which converts a dma specifier
* into a dma_chan structure
* @data pointer to controller specific data to be used by
* @data: pointer to controller specific data to be used by
* translation function
*
* Allocated memory should be freed with appropriate acpi_dma_controller_free()
......@@ -224,7 +231,7 @@ static void devm_acpi_dma_release(struct device *dev, void *res)
* devm_acpi_dma_controller_register - resource managed acpi_dma_controller_register()
* @dev: device that is registering this DMA controller
* @acpi_dma_xlate: translation function
* @data pointer to controller specific data
* @data: pointer to controller specific data
*
* Managed acpi_dma_controller_register(). DMA controller registered by this
* function are automatically freed on driver detach. See
......@@ -257,6 +264,7 @@ EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_register);
/**
* devm_acpi_dma_controller_free - resource managed acpi_dma_controller_free()
* @dev: device that is unregistering as DMA controller
*
* Unregister a DMA controller registered with
* devm_acpi_dma_controller_register(). Normally this function will not need to
......
......@@ -896,8 +896,10 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc)
if (rc) {
dev_err(&pdev->dev, "Unable to set DMA mask\n");
return rc;
}
od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
if (!od)
......
......@@ -92,6 +92,7 @@
#define JZ_SOC_DATA_PROGRAMMABLE_DMA BIT(1)
#define JZ_SOC_DATA_PER_CHAN_PM BIT(2)
#define JZ_SOC_DATA_NO_DCKES_DCKEC BIT(3)
#define JZ_SOC_DATA_BREAK_LINKS BIT(4)
/**
* struct jz4780_dma_hwdesc - descriptor structure read by the DMA controller.
......@@ -355,6 +356,7 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
void *context)
{
struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
struct jz4780_dma_desc *desc;
unsigned int i;
int err;
......@@ -375,7 +377,8 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
desc->desc[i].dcm |= JZ_DMA_DCM_TIE;
if (i != (sg_len - 1)) {
if (i != (sg_len - 1) &&
!(jzdma->soc_data->flags & JZ_SOC_DATA_BREAK_LINKS)) {
/* Automatically proceeed to the next descriptor. */
desc->desc[i].dcm |= JZ_DMA_DCM_LINK;
......@@ -664,6 +667,8 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
static bool jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
struct jz4780_dma_chan *jzchan)
{
const unsigned int soc_flags = jzdma->soc_data->flags;
struct jz4780_dma_desc *desc = jzchan->desc;
uint32_t dcs;
bool ack = true;
......@@ -691,8 +696,11 @@ static bool jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
jz4780_dma_begin(jzchan);
} else if (dcs & JZ_DMA_DCS_TT) {
vchan_cookie_complete(&jzchan->desc->vdesc);
if (!(soc_flags & JZ_SOC_DATA_BREAK_LINKS) ||
(jzchan->curr_hwdesc + 1 == desc->count)) {
vchan_cookie_complete(&desc->vdesc);
jzchan->desc = NULL;
}
jz4780_dma_begin(jzchan);
} else {
......@@ -878,10 +886,8 @@ static int jz4780_dma_probe(struct platform_device *pdev)
}
ret = platform_get_irq(pdev, 0);
if (ret < 0) {
dev_err(dev, "failed to get IRQ: %d\n", ret);
if (ret < 0)
return ret;
}
jzdma->irq = ret;
......@@ -992,6 +998,7 @@ static int jz4780_dma_remove(struct platform_device *pdev)
static const struct jz4780_dma_soc_data jz4740_dma_soc_data = {
.nb_channels = 6,
.transfer_ord_max = 5,
.flags = JZ_SOC_DATA_BREAK_LINKS,
};
static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = {
......
......@@ -72,6 +72,10 @@ static bool norandom;
module_param(norandom, bool, 0644);
MODULE_PARM_DESC(norandom, "Disable random offset setup (default: random)");
static bool polled;
module_param(polled, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts");
static bool verbose;
module_param(verbose, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)");
......@@ -110,6 +114,7 @@ struct dmatest_params {
bool norandom;
int alignment;
unsigned int transfer_size;
bool polled;
};
/**
......@@ -651,6 +656,9 @@ static int dmatest_func(void *data)
/*
* src and dst buffers are freed by ourselves below
*/
if (params->polled)
flags = DMA_CTRL_ACK;
else
flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
ktime = ktime_get();
......@@ -780,8 +788,10 @@ static int dmatest_func(void *data)
}
done->done = false;
if (!params->polled) {
tx->callback = dmatest_callback;
tx->callback_param = done;
}
cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
......@@ -790,12 +800,22 @@ static int dmatest_func(void *data)
msleep(100);
goto error_unmap_continue;
}
if (params->polled) {
status = dma_sync_wait(chan, cookie);
dmaengine_terminate_sync(chan);
if (status == DMA_COMPLETE)
done->done = true;
} else {
dma_async_issue_pending(chan);
wait_event_freezable_timeout(thread->done_wait, done->done,
wait_event_freezable_timeout(thread->done_wait,
done->done,
msecs_to_jiffies(params->timeout));
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
status = dma_async_is_tx_complete(chan, cookie, NULL,
NULL);
}
if (!done->done) {
result("test timed out", total_tests, src->off, dst->off,
......@@ -1065,6 +1085,7 @@ static void add_threaded_test(struct dmatest_info *info)
params->norandom = norandom;
params->alignment = alignment;
params->transfer_size = transfer_size;
params->polled = polled;
request_channels(info, DMA_MEMCPY);
request_channels(info, DMA_MEMSET);
......
......@@ -3,7 +3,9 @@ obj-$(CONFIG_DW_DMAC_CORE) += dw_dmac_core.o
dw_dmac_core-objs := core.o dw.o idma32.o
obj-$(CONFIG_DW_DMAC) += dw_dmac.o
dw_dmac-objs := platform.o
dw_dmac-y := platform.o
dw_dmac-$(CONFIG_ACPI) += acpi.o
dw_dmac-$(CONFIG_OF) += of.o
obj-$(CONFIG_DW_DMAC_PCI) += dw_dmac_pci.o
dw_dmac_pci-objs := pci.o
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2013,2019 Intel Corporation
#include <linux/acpi.h>
#include <linux/acpi_dma.h>
#include "internal.h"
static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
{
struct acpi_dma_spec *dma_spec = param;
struct dw_dma_slave slave = {
.dma_dev = dma_spec->dev,
.src_id = dma_spec->slave_id,
.dst_id = dma_spec->slave_id,
.m_master = 0,
.p_master = 1,
};
return dw_dma_filter(chan, &slave);
}
void dw_dma_acpi_controller_register(struct dw_dma *dw)
{
struct device *dev = dw->dma.dev;
struct acpi_dma_filter_info *info;
int ret;
if (!has_acpi_companion(dev))
return;
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info)
return;
dma_cap_zero(info->dma_cap);
dma_cap_set(DMA_SLAVE, info->dma_cap);
info->filter_fn = dw_dma_acpi_filter;
ret = acpi_dma_controller_register(dev, acpi_dma_simple_xlate, info);
if (ret)
dev_err(dev, "could not register acpi_dma_controller\n");
}
void dw_dma_acpi_controller_free(struct dw_dma *dw)
{
struct device *dev = dw->dma.dev;
if (!has_acpi_companion(dev))
return;
acpi_dma_controller_free(dev);
}
......@@ -23,4 +23,55 @@ int do_dw_dma_enable(struct dw_dma_chip *chip);
extern bool dw_dma_filter(struct dma_chan *chan, void *param);
#ifdef CONFIG_ACPI
void dw_dma_acpi_controller_register(struct dw_dma *dw);
void dw_dma_acpi_controller_free(struct dw_dma *dw);
#else /* !CONFIG_ACPI */
static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
static inline void dw_dma_acpi_controller_free(struct dw_dma *dw) {}
#endif /* !CONFIG_ACPI */
struct platform_device;
#ifdef CONFIG_OF
struct dw_dma_platform_data *dw_dma_parse_dt(struct platform_device *pdev);
void dw_dma_of_controller_register(struct dw_dma *dw);
void dw_dma_of_controller_free(struct dw_dma *dw);
#else
static inline struct dw_dma_platform_data *dw_dma_parse_dt(struct platform_device *pdev)
{
return NULL;
}
static inline void dw_dma_of_controller_register(struct dw_dma *dw) {}
static inline void dw_dma_of_controller_free(struct dw_dma *dw) {}
#endif
struct dw_dma_chip_pdata {
const struct dw_dma_platform_data *pdata;
int (*probe)(struct dw_dma_chip *chip);
int (*remove)(struct dw_dma_chip *chip);
struct dw_dma_chip *chip;
};
static __maybe_unused const struct dw_dma_chip_pdata dw_dma_chip_pdata = {
.probe = dw_dma_probe,
.remove = dw_dma_remove,
};
static const struct dw_dma_platform_data idma32_pdata = {
.nr_channels = 8,
.chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
.chan_priority = CHAN_PRIORITY_ASCENDING,
.block_size = 131071,
.nr_masters = 1,
.data_width = {4},
.multi_block = {1, 1, 1, 1, 1, 1, 1, 1},
};
static __maybe_unused const struct dw_dma_chip_pdata idma32_chip_pdata = {
.pdata = &idma32_pdata,
.probe = idma32_dma_probe,
.remove = idma32_dma_remove,
};
#endif /* _DMA_DW_INTERNAL_H */
// SPDX-License-Identifier: GPL-2.0
/*
* Platform driver for the Synopsys DesignWare DMA Controller
*
* Copyright (C) 2007-2008 Atmel Corporation
* Copyright (C) 2010-2011 ST Microelectronics
* Copyright (C) 2013 Intel Corporation
*/
#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include "internal.h"
static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct dw_dma *dw = ofdma->of_dma_data;
struct dw_dma_slave slave = {
.dma_dev = dw->dma.dev,
};
dma_cap_mask_t cap;
if (dma_spec->args_count != 3)
return NULL;
slave.src_id = dma_spec->args[0];
slave.dst_id = dma_spec->args[0];
slave.m_master = dma_spec->args[1];
slave.p_master = dma_spec->args[2];
if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
slave.m_master >= dw->pdata->nr_masters ||
slave.p_master >= dw->pdata->nr_masters))
return NULL;
dma_cap_zero(cap);
dma_cap_set(DMA_SLAVE, cap);
/* TODO: there should be a simpler way to do this */
return dma_request_channel(cap, dw_dma_filter, &slave);
}
struct dw_dma_platform_data *dw_dma_parse_dt(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct dw_dma_platform_data *pdata;
u32 tmp, arr[DW_DMA_MAX_NR_MASTERS], mb[DW_DMA_MAX_NR_CHANNELS];
u32 nr_masters;
u32 nr_channels;
if (!np) {
dev_err(&pdev->dev, "Missing DT data\n");
return NULL;
}
if (of_property_read_u32(np, "dma-masters", &nr_masters))
return NULL;
if (nr_masters < 1 || nr_masters > DW_DMA_MAX_NR_MASTERS)
return NULL;
if (of_property_read_u32(np, "dma-channels", &nr_channels))
return NULL;
if (nr_channels > DW_DMA_MAX_NR_CHANNELS)
return NULL;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
pdata->nr_masters = nr_masters;
pdata->nr_channels = nr_channels;
if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
pdata->chan_allocation_order = (unsigned char)tmp;
if (!of_property_read_u32(np, "chan_priority", &tmp))
pdata->chan_priority = tmp;
if (!of_property_read_u32(np, "block_size", &tmp))
pdata->block_size = tmp;
if (!of_property_read_u32_array(np, "data-width", arr, nr_masters)) {
for (tmp = 0; tmp < nr_masters; tmp++)
pdata->data_width[tmp] = arr[tmp];
} else if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) {
for (tmp = 0; tmp < nr_masters; tmp++)
pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
}
if (!of_property_read_u32_array(np, "multi-block", mb, nr_channels)) {
for (tmp = 0; tmp < nr_channels; tmp++)
pdata->multi_block[tmp] = mb[tmp];
} else {
for (tmp = 0; tmp < nr_channels; tmp++)
pdata->multi_block[tmp] = 1;
}
if (!of_property_read_u32(np, "snps,dma-protection-control", &tmp)) {
if (tmp > CHAN_PROTCTL_MASK)
return NULL;
pdata->protctl = tmp;
}
return pdata;
}
void dw_dma_of_controller_register(struct dw_dma *dw)
{
struct device *dev = dw->dma.dev;
int ret;
if (!dev->of_node)
return;
ret = of_dma_controller_register(dev->of_node, dw_dma_of_xlate, dw);
if (ret)
dev_err(dev, "could not register of_dma_controller\n");
}
void dw_dma_of_controller_free(struct dw_dma *dw)
{
struct device *dev = dw->dma.dev;
if (!dev->of_node)
return;
of_dma_controller_free(dev->of_node);
}
......@@ -12,38 +12,10 @@
#include "internal.h"
struct dw_dma_pci_data {
const struct dw_dma_platform_data *pdata;
int (*probe)(struct dw_dma_chip *chip);
int (*remove)(struct dw_dma_chip *chip);
struct dw_dma_chip *chip;
};
static const struct dw_dma_pci_data dw_pci_data = {
.probe = dw_dma_probe,
.remove = dw_dma_remove,
};
static const struct dw_dma_platform_data idma32_pdata = {
.nr_channels = 8,
.chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
.chan_priority = CHAN_PRIORITY_ASCENDING,
.block_size = 131071,
.nr_masters = 1,
.data_width = {4},
.multi_block = {1, 1, 1, 1, 1, 1, 1, 1},
};
static const struct dw_dma_pci_data idma32_pci_data = {
.pdata = &idma32_pdata,
.probe = idma32_dma_probe,
.remove = idma32_dma_remove,
};
static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
{
const struct dw_dma_pci_data *drv_data = (void *)pid->driver_data;
struct dw_dma_pci_data *data;
const struct dw_dma_chip_pdata *drv_data = (void *)pid->driver_data;
struct dw_dma_chip_pdata *data;
struct dw_dma_chip *chip;
int ret;
......@@ -95,7 +67,7 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
static void dw_pci_remove(struct pci_dev *pdev)
{
struct dw_dma_pci_data *data = pci_get_drvdata(pdev);
struct dw_dma_chip_pdata *data = pci_get_drvdata(pdev);
struct dw_dma_chip *chip = data->chip;
int ret;
......@@ -108,7 +80,7 @@ static void dw_pci_remove(struct pci_dev *pdev)
static int dw_pci_suspend_late(struct device *dev)
{
struct dw_dma_pci_data *data = dev_get_drvdata(dev);
struct dw_dma_chip_pdata *data = dev_get_drvdata(dev);
struct dw_dma_chip *chip = data->chip;
return do_dw_dma_disable(chip);
......@@ -116,7 +88,7 @@ static int dw_pci_suspend_late(struct device *dev)
static int dw_pci_resume_early(struct device *dev)
{
struct dw_dma_pci_data *data = dev_get_drvdata(dev);
struct dw_dma_chip_pdata *data = dev_get_drvdata(dev);
struct dw_dma_chip *chip = data->chip;
return do_dw_dma_enable(chip);
......@@ -130,29 +102,29 @@ static const struct dev_pm_ops dw_pci_dev_pm_ops = {
static const struct pci_device_id dw_pci_id_table[] = {
/* Medfield (GPDMA) */
{ PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_pci_data },
{ PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_dma_chip_pdata },
/* BayTrail */
{ PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_pci_data },
{ PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_pci_data },
{ PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_dma_chip_pdata },
{ PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_dma_chip_pdata },
/* Merrifield */
{ PCI_VDEVICE(INTEL, 0x11a2), (kernel_ulong_t)&idma32_pci_data },
{ PCI_VDEVICE(INTEL, 0x11a2), (kernel_ulong_t)&idma32_chip_pdata },
/* Braswell */
{ PCI_VDEVICE(INTEL, 0x2286), (kernel_ulong_t)&dw_pci_data },
{ PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_pci_data },
{ PCI_VDEVICE(INTEL, 0x2286), (kernel_ulong_t)&dw_dma_chip_pdata },
{ PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_dma_chip_pdata },
/* Elkhart Lake iDMA 32-bit (OSE DMA) */
{ PCI_VDEVICE(INTEL, 0x4bb4), (kernel_ulong_t)&idma32_pci_data },
{ PCI_VDEVICE(INTEL, 0x4bb5), (kernel_ulong_t)&idma32_pci_data },
{ PCI_VDEVICE(INTEL, 0x4bb6), (kernel_ulong_t)&idma32_pci_data },
/* Elkhart Lake iDMA 32-bit (PSE DMA) */
{ PCI_VDEVICE(INTEL, 0x4bb4), (kernel_ulong_t)&idma32_chip_pdata },
{ PCI_VDEVICE(INTEL, 0x4bb5), (kernel_ulong_t)&idma32_chip_pdata },
{ PCI_VDEVICE(INTEL, 0x4bb6), (kernel_ulong_t)&idma32_chip_pdata },
/* Haswell */
{ PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_data },
{ PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_dma_chip_pdata },
/* Broadwell */
{ PCI_VDEVICE(INTEL, 0x9ce0), (kernel_ulong_t)&dw_pci_data },
{ PCI_VDEVICE(INTEL, 0x9ce0), (kernel_ulong_t)&dw_dma_chip_pdata },
{ }
};
......
......@@ -17,163 +17,28 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/acpi.h>
#include <linux/acpi_dma.h>
#include "internal.h"
#define DRV_NAME "dw_dmac"
static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct dw_dma *dw = ofdma->of_dma_data;
struct dw_dma_slave slave = {
.dma_dev = dw->dma.dev,
};
dma_cap_mask_t cap;
if (dma_spec->args_count != 3)
return NULL;
slave.src_id = dma_spec->args[0];
slave.dst_id = dma_spec->args[0];
slave.m_master = dma_spec->args[1];
slave.p_master = dma_spec->args[2];
if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
slave.m_master >= dw->pdata->nr_masters ||
slave.p_master >= dw->pdata->nr_masters))
return NULL;
dma_cap_zero(cap);
dma_cap_set(DMA_SLAVE, cap);
/* TODO: there should be a simpler way to do this */
return dma_request_channel(cap, dw_dma_filter, &slave);
}
#ifdef CONFIG_ACPI
static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
{
struct acpi_dma_spec *dma_spec = param;
struct dw_dma_slave slave = {
.dma_dev = dma_spec->dev,
.src_id = dma_spec->slave_id,
.dst_id = dma_spec->slave_id,
.m_master = 0,
.p_master = 1,
};
return dw_dma_filter(chan, &slave);
}
static void dw_dma_acpi_controller_register(struct dw_dma *dw)
{
struct device *dev = dw->dma.dev;
struct acpi_dma_filter_info *info;
int ret;
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info)
return;
dma_cap_zero(info->dma_cap);
dma_cap_set(DMA_SLAVE, info->dma_cap);
info->filter_fn = dw_dma_acpi_filter;
ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate,
info);
if (ret)
dev_err(dev, "could not register acpi_dma_controller\n");
}
#else /* !CONFIG_ACPI */
static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
#endif /* !CONFIG_ACPI */
#ifdef CONFIG_OF
static struct dw_dma_platform_data *
dw_dma_parse_dt(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct dw_dma_platform_data *pdata;
u32 tmp, arr[DW_DMA_MAX_NR_MASTERS], mb[DW_DMA_MAX_NR_CHANNELS];
u32 nr_masters;
u32 nr_channels;
if (!np) {
dev_err(&pdev->dev, "Missing DT data\n");
return NULL;
}
if (of_property_read_u32(np, "dma-masters", &nr_masters))
return NULL;
if (nr_masters < 1 || nr_masters > DW_DMA_MAX_NR_MASTERS)
return NULL;
if (of_property_read_u32(np, "dma-channels", &nr_channels))
return NULL;
if (nr_channels > DW_DMA_MAX_NR_CHANNELS)
return NULL;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
pdata->nr_masters = nr_masters;
pdata->nr_channels = nr_channels;
if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
pdata->chan_allocation_order = (unsigned char)tmp;
if (!of_property_read_u32(np, "chan_priority", &tmp))
pdata->chan_priority = tmp;
if (!of_property_read_u32(np, "block_size", &tmp))
pdata->block_size = tmp;
if (!of_property_read_u32_array(np, "data-width", arr, nr_masters)) {
for (tmp = 0; tmp < nr_masters; tmp++)
pdata->data_width[tmp] = arr[tmp];
} else if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) {
for (tmp = 0; tmp < nr_masters; tmp++)
pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
}
if (!of_property_read_u32_array(np, "multi-block", mb, nr_channels)) {
for (tmp = 0; tmp < nr_channels; tmp++)
pdata->multi_block[tmp] = mb[tmp];
} else {
for (tmp = 0; tmp < nr_channels; tmp++)
pdata->multi_block[tmp] = 1;
}
if (!of_property_read_u32(np, "snps,dma-protection-control", &tmp)) {
if (tmp > CHAN_PROTCTL_MASK)
return NULL;
pdata->protctl = tmp;
}
return pdata;
}
#else
static inline struct dw_dma_platform_data *
dw_dma_parse_dt(struct platform_device *pdev)
{
return NULL;
}
#endif
static int dw_probe(struct platform_device *pdev)
{
const struct dw_dma_chip_pdata *match;
struct dw_dma_chip_pdata *data;
struct dw_dma_chip *chip;
struct device *dev = &pdev->dev;
struct resource *mem;
const struct dw_dma_platform_data *pdata;
int err;
match = device_get_match_data(dev);
if (!match)
return -ENODEV;
data = devm_kmemdup(&pdev->dev, match, sizeof(*match), GFP_KERNEL);
if (!data)
return -ENOMEM;
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
......@@ -182,8 +47,7 @@ static int dw_probe(struct platform_device *pdev)
if (chip->irq < 0)
return chip->irq;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
chip->regs = devm_ioremap_resource(dev, mem);
chip->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->regs))
return PTR_ERR(chip->regs);
......@@ -191,13 +55,16 @@ static int dw_probe(struct platform_device *pdev)
if (err)
return err;
pdata = dev_get_platdata(dev);
if (!pdata)
pdata = dw_dma_parse_dt(pdev);
if (!data->pdata)
data->pdata = dev_get_platdata(dev);
if (!data->pdata)
data->pdata = dw_dma_parse_dt(pdev);
chip->dev = dev;
chip->id = pdev->id;
chip->pdata = pdata;
chip->pdata = data->pdata;
data->chip = chip;
chip->clk = devm_clk_get(chip->dev, "hclk");
if (IS_ERR(chip->clk))
......@@ -208,21 +75,14 @@ static int dw_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
err = dw_dma_probe(chip);
err = data->probe(chip);
if (err)
goto err_dw_dma_probe;
platform_set_drvdata(pdev, chip);
platform_set_drvdata(pdev, data);
if (pdev->dev.of_node) {
err = of_dma_controller_register(pdev->dev.of_node,
dw_dma_of_xlate, chip->dw);
if (err)
dev_err(&pdev->dev,
"could not register of_dma_controller\n");
}
dw_dma_of_controller_register(chip->dw);
if (ACPI_HANDLE(&pdev->dev))
dw_dma_acpi_controller_register(chip->dw);
return 0;
......@@ -235,12 +95,18 @@ static int dw_probe(struct platform_device *pdev)
static int dw_remove(struct platform_device *pdev)
{
struct dw_dma_chip *chip = platform_get_drvdata(pdev);
struct dw_dma_chip_pdata *data = platform_get_drvdata(pdev);
struct dw_dma_chip *chip = data->chip;
int ret;
dw_dma_acpi_controller_free(chip->dw);
if (pdev->dev.of_node)
of_dma_controller_free(pdev->dev.of_node);
dw_dma_of_controller_free(chip->dw);
ret = data->remove(chip);
if (ret)
dev_warn(chip->dev, "can't remove device properly: %d\n", ret);
dw_dma_remove(chip);
pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(chip->clk);
......@@ -249,7 +115,8 @@ static int dw_remove(struct platform_device *pdev)
static void dw_shutdown(struct platform_device *pdev)
{
struct dw_dma_chip *chip = platform_get_drvdata(pdev);
struct dw_dma_chip_pdata *data = platform_get_drvdata(pdev);
struct dw_dma_chip *chip = data->chip;
/*
* We have to call do_dw_dma_disable() to stop any ongoing transfer. On
......@@ -269,7 +136,7 @@ static void dw_shutdown(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id dw_dma_of_id_table[] = {
{ .compatible = "snps,dma-spear1340" },
{ .compatible = "snps,dma-spear1340", .data = &dw_dma_chip_pdata },
{}
};
MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
......@@ -277,9 +144,15 @@ MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
#ifdef CONFIG_ACPI
static const struct acpi_device_id dw_dma_acpi_id_table[] = {
{ "INTL9C60", 0 },
{ "80862286", 0 },
{ "808622C0", 0 },
{ "INTL9C60", (kernel_ulong_t)&dw_dma_chip_pdata },
{ "80862286", (kernel_ulong_t)&dw_dma_chip_pdata },
{ "808622C0", (kernel_ulong_t)&dw_dma_chip_pdata },
/* Elkhart Lake iDMA 32-bit (PSE DMA) */
{ "80864BB4", (kernel_ulong_t)&idma32_chip_pdata },
{ "80864BB5", (kernel_ulong_t)&idma32_chip_pdata },
{ "80864BB6", (kernel_ulong_t)&idma32_chip_pdata },
{ }
};
MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
......@@ -289,7 +162,8 @@ MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
static int dw_suspend_late(struct device *dev)
{
struct dw_dma_chip *chip = dev_get_drvdata(dev);
struct dw_dma_chip_pdata *data = dev_get_drvdata(dev);
struct dw_dma_chip *chip = data->chip;
do_dw_dma_disable(chip);
clk_disable_unprepare(chip->clk);
......@@ -299,7 +173,8 @@ static int dw_suspend_late(struct device *dev)
static int dw_resume_early(struct device *dev)
{
struct dw_dma_chip *chip = dev_get_drvdata(dev);
struct dw_dma_chip_pdata *data = dev_get_drvdata(dev);
struct dw_dma_chip *chip = data->chip;
int ret;
ret = clk_prepare_enable(chip->clk);
......
......@@ -90,6 +90,19 @@ static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
iowrite8(val8, addr + off);
}
static void mux_configure32(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
u32 off, u32 slot, bool enable)
{
u32 val;
if (enable)
val = EDMAMUX_CHCFG_ENBL << 24 | slot;
else
val = EDMAMUX_CHCFG_DIS;
iowrite32(val, addr + off * 4);
}
void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
unsigned int slot, bool enable)
{
......@@ -103,6 +116,9 @@ void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
slot = EDMAMUX_CHCFG_SOURCE(slot);
if (fsl_chan->edma->drvdata->version == v3)
mux_configure32(fsl_chan, muxaddr, ch_off, slot, enable);
else
mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
}
EXPORT_SYMBOL_GPL(fsl_edma_chan_mux);
......
......@@ -125,6 +125,7 @@ struct fsl_edma_chan {
dma_addr_t dma_dev_addr;
u32 dma_dev_size;
enum dma_data_direction dma_dir;
char chan_name[16];
};
struct fsl_edma_desc {
......@@ -139,11 +140,13 @@ struct fsl_edma_desc {
enum edma_version {
v1, /* 32ch, Vybrid, mpc57x, etc */
v2, /* 64ch Coldfire */
v3, /* 32ch, i.mx7ulp */
};
struct fsl_edma_drvdata {
enum edma_version version;
u32 dmamuxs;
bool has_dmaclk;
int (*setup_irq)(struct platform_device *pdev,
struct fsl_edma_engine *fsl_edma);
};
......@@ -153,6 +156,7 @@ struct fsl_edma_engine {
void __iomem *membase;
void __iomem *muxbase[DMAMUX_NR];
struct clk *muxclk[DMAMUX_NR];
struct clk *dmaclk;
struct mutex fsl_edma_mutex;
const struct fsl_edma_drvdata *drvdata;
u32 n_chans;
......
......@@ -20,6 +20,13 @@
#include "fsl-edma-common.h"
static void fsl_edma_synchronize(struct dma_chan *chan)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
vchan_synchronize(&fsl_chan->vchan);
}
static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
{
struct fsl_edma_engine *fsl_edma = dev_id;
......@@ -125,16 +132,12 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma
int ret;
fsl_edma->txirq = platform_get_irq_byname(pdev, "edma-tx");
if (fsl_edma->txirq < 0) {
dev_err(&pdev->dev, "Can't get edma-tx irq.\n");
if (fsl_edma->txirq < 0)
return fsl_edma->txirq;
}
fsl_edma->errirq = platform_get_irq_byname(pdev, "edma-err");
if (fsl_edma->errirq < 0) {
dev_err(&pdev->dev, "Can't get edma-err irq.\n");
if (fsl_edma->errirq < 0)
return fsl_edma->errirq;
}
if (fsl_edma->txirq == fsl_edma->errirq) {
ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
......@@ -162,6 +165,49 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma
return 0;
}
static int
fsl_edma2_irq_init(struct platform_device *pdev,
struct fsl_edma_engine *fsl_edma)
{
int i, ret, irq;
int count;
count = platform_irq_count(pdev);
dev_dbg(&pdev->dev, "%s Found %d interrupts\r\n", __func__, count);
if (count <= 2) {
dev_err(&pdev->dev, "Interrupts in DTS not correct.\n");
return -EINVAL;
}
/*
* 16 channel independent interrupts + 1 error interrupt on i.mx7ulp.
* 2 channel share one interrupt, for example, ch0/ch16, ch1/ch17...
* For now, just simply request irq without IRQF_SHARED flag, since 16
* channels are enough on i.mx7ulp whose M4 domain own some peripherals.
*/
for (i = 0; i < count; i++) {
irq = platform_get_irq(pdev, i);
if (irq < 0)
return -ENXIO;
sprintf(fsl_edma->chans[i].chan_name, "eDMA2-CH%02d", i);
/* The last IRQ is for eDMA err */
if (i == count - 1)
ret = devm_request_irq(&pdev->dev, irq,
fsl_edma_err_handler,
0, "eDMA2-ERR", fsl_edma);
else
ret = devm_request_irq(&pdev->dev, irq,
fsl_edma_tx_handler, 0,
fsl_edma->chans[i].chan_name,
fsl_edma);
if (ret)
return ret;
}
return 0;
}
static void fsl_edma_irq_exit(
struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
{
......@@ -187,8 +233,16 @@ static struct fsl_edma_drvdata vf610_data = {
.setup_irq = fsl_edma_irq_init,
};
static struct fsl_edma_drvdata imx7ulp_data = {
.version = v3,
.dmamuxs = 1,
.has_dmaclk = true,
.setup_irq = fsl_edma2_irq_init,
};
static const struct of_device_id fsl_edma_dt_ids[] = {
{ .compatible = "fsl,vf610-edma", .data = &vf610_data},
{ .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
......@@ -236,6 +290,20 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_edma_setup_regs(fsl_edma);
regs = &fsl_edma->regs;
if (drvdata->has_dmaclk) {
fsl_edma->dmaclk = devm_clk_get(&pdev->dev, "dma");
if (IS_ERR(fsl_edma->dmaclk)) {
dev_err(&pdev->dev, "Missing DMA block clock.\n");
return PTR_ERR(fsl_edma->dmaclk);
}
ret = clk_prepare_enable(fsl_edma->dmaclk);
if (ret) {
dev_err(&pdev->dev, "DMA clk block failed.\n");
return ret;
}
}
for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) {
char clkname[32];
......@@ -302,6 +370,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_edma->dma_dev.device_pause = fsl_edma_pause;
fsl_edma->dma_dev.device_resume = fsl_edma_resume;
fsl_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
fsl_edma->dma_dev.device_synchronize = fsl_edma_synchronize;
fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
fsl_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
......
......@@ -758,10 +758,8 @@ fsl_qdma_irq_init(struct platform_device *pdev,
fsl_qdma->error_irq =
platform_get_irq_byname(pdev, "qdma-error");
if (fsl_qdma->error_irq < 0) {
dev_err(&pdev->dev, "Can't get qdma controller irq.\n");
if (fsl_qdma->error_irq < 0)
return fsl_qdma->error_irq;
}
ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
fsl_qdma_error_handler, 0,
......@@ -776,11 +774,8 @@ fsl_qdma_irq_init(struct platform_device *pdev,
fsl_qdma->queue_irq[i] =
platform_get_irq_byname(pdev, irq_name);
if (fsl_qdma->queue_irq[i] < 0) {
dev_err(&pdev->dev,
"Can't get qdma queue %d irq.\n", i);
if (fsl_qdma->queue_irq[i] < 0)
return fsl_qdma->queue_irq[i];
}
ret = devm_request_irq(&pdev->dev,
fsl_qdma->queue_irq[i],
......
......@@ -556,6 +556,7 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
* We fall-through here intentionally, since a 2D transfer is
* similar to MEMCPY just adding the 2D slot configuration.
*/
/* Fall through */
case IMXDMA_DESC_MEMCPY:
imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel));
imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel));
......
......@@ -1886,10 +1886,6 @@ static int sdma_init(struct sdma_engine *sdma)
sdma->context_phys = ccb_phys +
MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
/* Zero-out the CCB structures array just allocated */
memset(sdma->channel_control, 0,
MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));
/* disable all channels */
for (i = 0; i < sdma->drvdata->num_events; i++)
writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
......
......@@ -286,8 +286,7 @@ struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
return NULL;
dca = alloc_dca_provider(&ioat_dca_ops,
sizeof(*ioatdca)
+ (sizeof(struct ioat_dca_slot) * slots));
struct_size(ioatdca, req_slots, slots));
if (!dca)
return NULL;
......
......@@ -116,9 +116,9 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
chain_node) {
pr_debug("\tcookie: %d slot: %d busy: %d "
"this_desc: %#x next_desc: %#llx ack: %d\n",
"this_desc: %pad next_desc: %#llx ack: %d\n",
iter->async_tx.cookie, iter->idx, busy,
iter->async_tx.phys, (u64)iop_desc_get_next_desc(iter),
&iter->async_tx.phys, (u64)iop_desc_get_next_desc(iter),
async_tx_test_ack(&iter->async_tx));
prefetch(_iter);
prefetch(&_iter->async_tx);
......@@ -364,13 +364,11 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan);
struct iop_adma_desc_slot *grp_start, *old_chain_tail;
int slot_cnt;
int slots_per_op;
dma_cookie_t cookie;
dma_addr_t next_dma;
grp_start = sw_desc->group_head;
slot_cnt = grp_start->slot_cnt;
slots_per_op = grp_start->slots_per_op;
spin_lock_bh(&iop_chan->lock);
cookie = dma_cookie_assign(tx);
......
......@@ -547,10 +547,8 @@ static int mtk_uart_apdma_probe(struct platform_device *pdev)
vchan_init(&c->vc, &mtkd->ddev);
rc = platform_get_irq(pdev, i);
if (rc < 0) {
dev_err(&pdev->dev, "failed to get IRQ[%d]\n", i);
if (rc < 0)
goto err_no_dma;
}
c->irq = rc;
}
......
......@@ -33,7 +33,6 @@
#define MV_XOR_V2_DMA_IMSG_CDAT_OFF 0x014
#define MV_XOR_V2_DMA_IMSG_THRD_OFF 0x018
#define MV_XOR_V2_DMA_IMSG_THRD_MASK 0x7FFF
#define MV_XOR_V2_DMA_IMSG_THRD_SHIFT 0x0
#define MV_XOR_V2_DMA_IMSG_TIMER_EN BIT(18)
#define MV_XOR_V2_DMA_DESQ_AWATTR_OFF 0x01C
/* Same flags as MV_XOR_V2_DMA_DESQ_ARATTR_OFF */
......@@ -50,7 +49,6 @@
#define MV_XOR_V2_DMA_DESQ_ADD_OFF 0x808
#define MV_XOR_V2_DMA_IMSG_TMOT 0x810
#define MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK 0x1FFF
#define MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT 0
/* XOR Global registers */
#define MV_XOR_V2_GLOB_BW_CTRL 0x4
......@@ -261,16 +259,15 @@ void mv_xor_v2_enable_imsg_thrd(struct mv_xor_v2_device *xor_dev)
/* Configure threshold of number of descriptors, and enable timer */
reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
reg |= (MV_XOR_V2_DONE_IMSG_THRD << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
reg &= ~MV_XOR_V2_DMA_IMSG_THRD_MASK;
reg |= MV_XOR_V2_DONE_IMSG_THRD;
reg |= MV_XOR_V2_DMA_IMSG_TIMER_EN;
writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
/* Configure Timer Threshold */
reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT);
reg &= (~MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK <<
MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT);
reg |= (MV_XOR_V2_TIMER_THRD << MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT);
reg &= ~MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK;
reg |= MV_XOR_V2_TIMER_THRD;
writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT);
}
......
......@@ -1922,9 +1922,10 @@ static int dmac_alloc_resources(struct pl330_dmac *pl330)
if (ret) {
dev_err(pl330->ddma.dev, "%s:%d Can't to create channels for DMAC!\n",
__func__, __LINE__);
dma_free_coherent(pl330->ddma.dev,
dma_free_attrs(pl330->ddma.dev,
chans * pl330->mcbufsz,
pl330->mcode_cpu, pl330->mcode_bus);
pl330->mcode_cpu, pl330->mcode_bus,
DMA_ATTR_PRIVILEGED);
return ret;
}
......@@ -2003,9 +2004,9 @@ static void pl330_del(struct pl330_dmac *pl330)
/* Free DMAC resources */
dmac_free_threads(pl330);
dma_free_coherent(pl330->ddma.dev,
dma_free_attrs(pl330->ddma.dev,
pl330->pcfg.num_chan * pl330->mcbufsz, pl330->mcode_cpu,
pl330->mcode_bus);
pl330->mcode_bus, DMA_ATTR_PRIVILEGED);
}
/* forward declaration */
......
......@@ -749,7 +749,6 @@ struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
if (!lldev->tre_ring)
return NULL;
memset(lldev->tre_ring, 0, (HIDMA_TRE_SIZE + 1) * nr_tres);
lldev->tre_ring_size = HIDMA_TRE_SIZE * nr_tres;
lldev->nr_tres = nr_tres;
......@@ -769,7 +768,6 @@ struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
if (!lldev->evre_ring)
return NULL;
memset(lldev->evre_ring, 0, (HIDMA_EVRE_SIZE + 1) * nr_tres);
lldev->evre_ring_size = HIDMA_EVRE_SIZE * nr_tres;
/* the EVRE ring has to be EVRE_SIZE aligned */
......
......@@ -183,7 +183,6 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "irq resources not found\n");
rc = irq;
goto out;
}
......@@ -388,7 +387,6 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
ret = PTR_ERR(new_pdev);
goto out;
}
of_node_get(child);
new_pdev->dev.of_node = child;
of_dma_configure(&new_pdev->dev, child, true);
/*
......@@ -396,9 +394,14 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
* platforms with or without MSI support.
*/
of_msi_configure(&new_pdev->dev, child);
of_node_put(child);
}
kfree(res);
return ret;
out:
of_node_put(child);
kfree(res);
return ret;
......
......@@ -1237,11 +1237,8 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
phy->host = s3cdma;
phy->irq = platform_get_irq(pdev, i);
if (phy->irq < 0) {
dev_err(&pdev->dev, "failed to get irq %d, err %d\n",
i, phy->irq);
if (phy->irq < 0)
continue;
}
ret = devm_request_irq(&pdev->dev, phy->irq, s3c24xx_dma_irq,
0, pdev->name, phy);
......
......@@ -1749,10 +1749,8 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
/* Request the channel interrupt. */
sprintf(pdev_irqname, "ch%u", index);
rchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
if (rchan->irq < 0) {
dev_err(dmac->dev, "no IRQ specified for channel %u\n", index);
if (rchan->irq < 0)
return -ENODEV;
}
irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
dev_name(dmac->dev), index);
......
......@@ -717,10 +717,8 @@ static int usb_dmac_chan_probe(struct usb_dmac *dmac,
/* Request the channel interrupt. */
sprintf(pdev_irqname, "ch%u", index);
uchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
if (uchan->irq < 0) {
dev_err(dmac->dev, "no IRQ specified for channel %u\n", index);
if (uchan->irq < 0)
return -ENODEV;
}
irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
dev_name(dmac->dev), index);
......
......@@ -771,10 +771,8 @@ static int st_fdma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fdev);
fdev->irq = platform_get_irq(pdev, 0);
if (fdev->irq < 0) {
dev_err(&pdev->dev, "Failed to get irq resource\n");
if (fdev->irq < 0)
return -EINVAL;
}
ret = devm_request_irq(&pdev->dev, fdev->irq, st_fdma_irq_handler, 0,
dev_name(&pdev->dev), fdev);
......
......@@ -243,12 +243,6 @@ static void stm32_dma_write(struct stm32_dma_device *dmadev, u32 reg, u32 val)
writel_relaxed(val, dmadev->base + reg);
}
static struct stm32_dma_desc *stm32_dma_alloc_desc(u32 num_sgs)
{
return kzalloc(sizeof(struct stm32_dma_desc) +
sizeof(struct stm32_dma_sg_req) * num_sgs, GFP_NOWAIT);
}
static int stm32_dma_get_width(struct stm32_dma_chan *chan,
enum dma_slave_buswidth width)
{
......@@ -853,7 +847,7 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
return NULL;
}
desc = stm32_dma_alloc_desc(sg_len);
desc = kzalloc(struct_size(desc, sg_req, sg_len), GFP_NOWAIT);
if (!desc)
return NULL;
......@@ -954,7 +948,7 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
num_periods = buf_len / period_len;
desc = stm32_dma_alloc_desc(num_periods);
desc = kzalloc(struct_size(desc, sg_req, num_periods), GFP_NOWAIT);
if (!desc)
return NULL;
......@@ -989,7 +983,7 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
int i;
num_sgs = DIV_ROUND_UP(len, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
desc = stm32_dma_alloc_desc(num_sgs);
desc = kzalloc(struct_size(desc, sg_req, num_sgs), GFP_NOWAIT);
if (!desc)
return NULL;
......@@ -1366,12 +1360,8 @@ static int stm32_dma_probe(struct platform_device *pdev)
for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
chan = &dmadev->chan[i];
ret = platform_get_irq(pdev, i);
if (ret < 0) {
if (ret != -EPROBE_DEFER)
dev_err(&pdev->dev,
"No irq resource for chan %d\n", i);
if (ret < 0)
goto err_unregister;
}
chan->irq = ret;
ret = devm_request_irq(&pdev->dev, chan->irq,
......
......@@ -185,8 +185,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
if (!node)
return -ENODEV;
count = device_property_read_u32_array(&pdev->dev, "dma-masters",
NULL, 0);
count = device_property_count_u32(&pdev->dev, "dma-masters");
if (count < 0) {
dev_err(&pdev->dev, "Can't get DMA master(s) node\n");
return -ENODEV;
......
......@@ -1555,8 +1555,7 @@ static int stm32_mdma_probe(struct platform_device *pdev)
nr_requests);
}
count = device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
NULL, 0);
count = device_property_count_u32(&pdev->dev, "st,ahb-addr-masks");
if (count < 0)
count = 0;
......@@ -1638,10 +1637,8 @@ static int stm32_mdma_probe(struct platform_device *pdev)
}
dmadev->irq = platform_get_irq(pdev, 0);
if (dmadev->irq < 0) {
dev_err(&pdev->dev, "failed to get IRQ\n");
if (dmadev->irq < 0)
return dmadev->irq;
}
ret = devm_request_irq(&pdev->dev, dmadev->irq, stm32_mdma_irq_handler,
0, dev_name(&pdev->dev), dmadev);
......
......@@ -1132,10 +1132,8 @@ static int sun4i_dma_probe(struct platform_device *pdev)
return PTR_ERR(priv->base);
priv->irq = platform_get_irq(pdev, 0);
if (priv->irq < 0) {
dev_err(&pdev->dev, "Cannot claim IRQ\n");
if (priv->irq < 0)
return priv->irq;
}
priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk)) {
......
......@@ -1251,10 +1251,8 @@ static int sun6i_dma_probe(struct platform_device *pdev)
return PTR_ERR(sdc->base);
sdc->irq = platform_get_irq(pdev, 0);
if (sdc->irq < 0) {
dev_err(&pdev->dev, "Cannot claim IRQ\n");
if (sdc->irq < 0)
return sdc->irq;
}
sdc->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(sdc->clk)) {
......
......@@ -152,6 +152,7 @@ struct tegra_dma_sg_req {
bool last_sg;
struct list_head node;
struct tegra_dma_desc *dma_desc;
unsigned int words_xferred;
};
/*
......@@ -496,6 +497,7 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
nsg_req->configured = true;
nsg_req->words_xferred = 0;
tegra_dma_resume(tdc);
}
......@@ -511,6 +513,7 @@ static void tdc_start_head_req(struct tegra_dma_channel *tdc)
typeof(*sg_req), node);
tegra_dma_start(tdc, sg_req);
sg_req->configured = true;
sg_req->words_xferred = 0;
tdc->busy = true;
}
......@@ -638,6 +641,8 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
dma_desc->cb_count++;
sgreq->words_xferred = 0;
/* If not last req then put at end of pending list */
if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
list_move_tail(&sgreq->node, &tdc->pending_sg_req);
......@@ -797,6 +802,65 @@ static int tegra_dma_terminate_all(struct dma_chan *dc)
return 0;
}
static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc,
struct tegra_dma_sg_req *sg_req)
{
unsigned long status, wcount = 0;
if (!list_is_first(&sg_req->node, &tdc->pending_sg_req))
return 0;
if (tdc->tdma->chip_data->support_separate_wcount_reg)
wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
if (!tdc->tdma->chip_data->support_separate_wcount_reg)
wcount = status;
if (status & TEGRA_APBDMA_STATUS_ISE_EOC)
return sg_req->req_len;
wcount = get_current_xferred_count(tdc, sg_req, wcount);
if (!wcount) {
/*
* If wcount wasn't ever polled for this SG before, then
* simply assume that transfer hasn't started yet.
*
* Otherwise it's the end of the transfer.
*
* The alternative would be to poll the status register
* until EOC bit is set or wcount goes UP. That's so
* because EOC bit is getting set only after the last
* burst's completion and counter is less than the actual
* transfer size by 4 bytes. The counter value wraps around
* in a cyclic mode before EOC is set(!), so we can't easily
* distinguish start of transfer from its end.
*/
if (sg_req->words_xferred)
wcount = sg_req->req_len - 4;
} else if (wcount < sg_req->words_xferred) {
/*
* This case will never happen for a non-cyclic transfer.
*
* For a cyclic transfer, although it is possible for the
* next transfer to have already started (resetting the word
* count), this case should still not happen because we should
* have detected that the EOC bit is set and hence the transfer
* was completed.
*/
WARN_ON_ONCE(1);
wcount = sg_req->req_len - 4;
} else {
sg_req->words_xferred = wcount;
}
return wcount;
}
static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
......@@ -806,6 +870,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
enum dma_status ret;
unsigned long flags;
unsigned int residual;
unsigned int bytes = 0;
ret = dma_cookie_status(dc, cookie, txstate);
if (ret == DMA_COMPLETE)
......@@ -825,6 +890,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
dma_desc = sg_req->dma_desc;
if (dma_desc->txd.cookie == cookie) {
bytes = tegra_dma_sg_bytes_xferred(tdc, sg_req);
ret = dma_desc->dma_status;
goto found;
}
......@@ -836,7 +902,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
found:
if (dma_desc && txstate) {
residual = dma_desc->bytes_requested -
(dma_desc->bytes_transferred %
((dma_desc->bytes_transferred + bytes) %
dma_desc->bytes_requested);
dma_set_residue(txstate, residual);
}
......@@ -1441,12 +1507,7 @@ static int tegra_dma_probe(struct platform_device *pdev)
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
/*
* XXX The hardware appears to support
* DMA_RESIDUE_GRANULARITY_BURST-level reporting, but it's
* only used by this driver during tegra_dma_terminate_all()
*/
tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
tdma->dma_dev.device_config = tegra_dma_slave_config;
tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all;
tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
......
......@@ -42,12 +42,8 @@
#define ADMA_CH_CONFIG_MAX_BUFS 8
#define ADMA_CH_FIFO_CTRL 0x2c
#define TEGRA210_ADMA_CH_FIFO_CTRL_OFLWTHRES(val) (((val) & 0xf) << 24)
#define TEGRA210_ADMA_CH_FIFO_CTRL_STRVTHRES(val) (((val) & 0xf) << 16)
#define TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(val) (((val) & 0xf) << 8)
#define TEGRA210_ADMA_CH_FIFO_CTRL_RXSIZE(val) ((val) & 0xf)
#define TEGRA186_ADMA_CH_FIFO_CTRL_OFLWTHRES(val) (((val) & 0x1f) << 24)
#define TEGRA186_ADMA_CH_FIFO_CTRL_STRVTHRES(val) (((val) & 0x1f) << 16)
#define TEGRA186_ADMA_CH_FIFO_CTRL_TXSIZE(val) (((val) & 0x1f) << 8)
#define TEGRA186_ADMA_CH_FIFO_CTRL_RXSIZE(val) ((val) & 0x1f)
......@@ -64,14 +60,10 @@
#define TEGRA_ADMA_BURST_COMPLETE_TIME 20
#define TEGRA210_FIFO_CTRL_DEFAULT (TEGRA210_ADMA_CH_FIFO_CTRL_OFLWTHRES(1) | \
TEGRA210_ADMA_CH_FIFO_CTRL_STRVTHRES(1) | \
TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(3) | \
#define TEGRA210_FIFO_CTRL_DEFAULT (TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(3) | \
TEGRA210_ADMA_CH_FIFO_CTRL_RXSIZE(3))
#define TEGRA186_FIFO_CTRL_DEFAULT (TEGRA186_ADMA_CH_FIFO_CTRL_OFLWTHRES(1) | \
TEGRA186_ADMA_CH_FIFO_CTRL_STRVTHRES(1) | \
TEGRA186_ADMA_CH_FIFO_CTRL_TXSIZE(3) | \
#define TEGRA186_FIFO_CTRL_DEFAULT (TEGRA186_ADMA_CH_FIFO_CTRL_TXSIZE(3) | \
TEGRA186_ADMA_CH_FIFO_CTRL_RXSIZE(3))
#define ADMA_CH_REG_FIELD_VAL(val, mask, shift) (((val) & mask) << shift)
......
This diff is collapsed.
......@@ -91,6 +91,7 @@ struct omap_desc {
bool using_ll;
enum dma_transfer_direction dir;
dma_addr_t dev_addr;
bool polled;
int32_t fi; /* for OMAP_DMA_SYNC_PACKET / double indexing */
int16_t ei; /* for double indexing */
......@@ -202,6 +203,7 @@ static const unsigned es_bytes[] = {
[CSDP_DATA_TYPE_32] = 4,
};
static bool omap_dma_filter_fn(struct dma_chan *chan, void *param);
static struct of_dma_filter_info omap_dma_info = {
.filter_fn = omap_dma_filter_fn,
};
......@@ -812,31 +814,22 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
struct omap_chan *c = to_omap_dma_chan(chan);
struct virt_dma_desc *vd;
enum dma_status ret;
unsigned long flags;
struct omap_desc *d = NULL;
ret = dma_cookie_status(chan, cookie, txstate);
if (!c->paused && c->running) {
uint32_t ccr = omap_dma_chan_read(c, CCR);
/*
* The channel is no longer active, set the return value
* accordingly
*/
if (!(ccr & CCR_ENABLE))
ret = DMA_COMPLETE;
}
if (ret == DMA_COMPLETE || !txstate)
if (ret == DMA_COMPLETE)
return ret;
spin_lock_irqsave(&c->vc.lock, flags);
vd = vchan_find_desc(&c->vc, cookie);
if (vd) {
txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
} else if (c->desc && c->desc->vd.tx.cookie == cookie) {
struct omap_desc *d = c->desc;
if (c->desc && c->desc->vd.tx.cookie == cookie)
d = c->desc;
if (!txstate)
goto out;
if (d) {
dma_addr_t pos;
if (d->dir == DMA_MEM_TO_DEV)
......@@ -848,10 +841,31 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
txstate->residue = omap_dma_desc_size_pos(d, pos);
} else {
struct virt_dma_desc *vd = vchan_find_desc(&c->vc, cookie);
if (vd)
txstate->residue = omap_dma_desc_size(
to_omap_dma_desc(&vd->tx));
else
txstate->residue = 0;
}
if (ret == DMA_IN_PROGRESS && c->paused)
out:
if (ret == DMA_IN_PROGRESS && c->paused) {
ret = DMA_PAUSED;
} else if (d && d->polled && c->running) {
uint32_t ccr = omap_dma_chan_read(c, CCR);
/*
* The channel is no longer active, set the return value
* accordingly and mark it as completed
*/
if (!(ccr & CCR_ENABLE)) {
ret = DMA_COMPLETE;
omap_dma_start_desc(c);
vchan_cookie_complete(&d->vd);
}
}
spin_unlock_irqrestore(&c->vc.lock, flags);
return ret;
......@@ -1178,7 +1192,10 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy(
d->ccr = c->ccr;
d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC;
d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
if (tx_flags & DMA_PREP_INTERRUPT)
d->cicr |= CICR_FRAME_IE;
else
d->polled = true;
d->csdp = data_type;
......@@ -1639,7 +1656,7 @@ static struct platform_driver omap_dma_driver = {
},
};
bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
static bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
{
if (chan->device->dev->driver == &omap_dma_driver.driver) {
struct omap_dmadev *od = to_omap_dma_dev(chan->device);
......@@ -1653,7 +1670,6 @@ bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
}
return false;
}
EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
static int omap_dma_init(void)
{
......
......@@ -354,11 +354,8 @@ static int uniphier_mdmac_chan_init(struct platform_device *pdev,
int irq, ret;
irq = platform_get_irq(pdev, chan_id);
if (irq < 0) {
dev_err(&pdev->dev, "failed to get IRQ number for ch%d\n",
chan_id);
if (irq < 0)
return irq;
}
irq_name = devm_kasprintf(dev, GFP_KERNEL, "uniphier-mio-dmac-ch%d",
chan_id);
......
......@@ -1678,20 +1678,16 @@ static int xgene_dma_get_resources(struct platform_device *pdev,
/* Get DMA error interrupt */
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
dev_err(&pdev->dev, "Failed to get Error IRQ\n");
if (irq <= 0)
return -ENXIO;
}
pdma->err_irq = irq;
/* Get DMA Rx ring descriptor interrupts for all DMA channels */
for (i = 1; i <= XGENE_DMA_MAX_CHANNEL; i++) {
irq = platform_get_irq(pdev, i);
if (irq <= 0) {
dev_err(&pdev->dev, "Failed to get Rx IRQ\n");
if (irq <= 0)
return -ENXIO;
}
pdma->chan[i - 1].rx_irq = irq;
}
......
/*
* TI EDMA DMA engine driver
*
* Copyright 2012 Texas Instruments
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __LINUX_EDMA_H
#define __LINUX_EDMA_H
struct dma_chan;
#if defined(CONFIG_TI_EDMA) || defined(CONFIG_TI_EDMA_MODULE)
bool edma_filter_fn(struct dma_chan *, void *);
#else
static inline bool edma_filter_fn(struct dma_chan *chan, void *param)
{
return false;
}
#endif
#endif
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_OMAP_DMA_H
#define __LINUX_OMAP_DMA_H
#include <linux/omap-dmaengine.h>
/*
* Legacy OMAP DMA handling defines and functions
*
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* OMAP DMA Engine support
*/
#ifndef __LINUX_OMAP_DMAENGINE_H
#define __LINUX_OMAP_DMAENGINE_H
struct dma_chan;
#if defined(CONFIG_DMA_OMAP) || (defined(CONFIG_DMA_OMAP_MODULE) && defined(MODULE))
bool omap_dma_filter_fn(struct dma_chan *, void *);
#else
static inline bool omap_dma_filter_fn(struct dma_chan *c, void *d)
{
return false;
}
#endif
#endif /* __LINUX_OMAP_DMAENGINE_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment