Commit 0aa25160 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-5.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine updates from Vinod Koul:
 "New drivers/devices
   - Support for Renesas RZ/G2L dma controller
   - New driver for AMD PTDMA controller

  Updates:
   - Big pile of idxd updates
   - Updates for Altera driver, stm32-dma, dw etc"

* tag 'dmaengine-5.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (83 commits)
  dmaengine: sh: fix some NULL dereferences
  dmaengine: sh: Fix unused initialization of pointer lmdesc
  MAINTAINERS: Fix AMD PTDMA DRIVER entry
  dmaengine: ptdma: remove PT_OFFSET to avoid redefnition
  dmaengine: ptdma: Add debugfs entries for PTDMA
  dmaengine: ptdma: register PTDMA controller as a DMA resource
  dmaengine: ptdma: Initial driver for the AMD PTDMA
  dmaengine: fsl-dpaa2-qdma: Fix spelling mistake "faile" -> "failed"
  dmaengine: idxd: remove interrupt disable for dev_lock
  dmaengine: idxd: remove interrupt disable for cmd_lock
  dmaengine: idxd: fix setting up priv mode for dwq
  dmaengine: xilinx_dma: Set DMA mask for coherent APIs
  dmaengine: ti: k3-psil-j721e: Add entry for CSI2RX
  dmaengine: sh: Add DMAC driver for RZ/G2L SoC
  dmaengine: Extend the dma_slave_width for 128 bytes
  dt-bindings: dma: Document RZ/G2L bindings
  dmaengine: ioat: depends on !UML
  dmaengine: idxd: set descriptor allocation size to threshold for swq
  dmaengine: idxd: make submit failure path consistent on desc freeing
  dmaengine: idxd: remove interrupt flag for completion list spinlock
  ...
parents a3fa7a10 11a427be
......@@ -128,6 +128,8 @@ Date: Aug 28, 2020
KernelVersion: 5.10.0
Contact: dmaengine@vger.kernel.org
Description: The last executed device administrative command's status/error.
Also last configuration error overloaded.
Writing to it will clear the status.
What: /sys/bus/dsa/devices/wq<m>.<n>/block_on_fault
Date: Oct 27, 2020
......@@ -211,6 +213,13 @@ Contact: dmaengine@vger.kernel.org
Description: Indicate whether ATS disable is turned on for the workqueue.
0 indicates ATS is on, and 1 indicates ATS is off for the workqueue.
What: /sys/bus/dsa/devices/wq<m>.<n>/occupancy
Date May 25, 2021
KernelVersion: 5.14.0
Contact: dmaengine@vger.kernel.org
Description: Show the current number of entries in this WQ if WQ Occupancy
Support bit WQ capabilities is 1.
What: /sys/bus/dsa/devices/engine<m>.<n>/group_id
Date: Oct 25, 2019
KernelVersion: 5.6.0
......
......@@ -1758,6 +1758,11 @@
support for the idxd driver. By default it is set to
true (1).
idxd.tc_override= [HW]
Format: <bool>
Allow override of default traffic class configuration
for the device. By default it is set to false (0).
ieee754= [MIPS] Select IEEE Std 754 conformance mode
Format: { strict | legacy | 2008 | relaxed }
Default: strict
......
......@@ -24,13 +24,15 @@ properties:
items:
- description: Control and Status Register Slave Port
- description: Descriptor Slave Port
- description: Response Slave Port
- description: Response Slave Port (Optional)
minItems: 2
reg-names:
items:
- const: csr
- const: desc
- const: resp
minItems: 2
interrupts:
maxItems: 1
......
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/renesas,rz-dmac.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Renesas RZ/G2L DMA Controller
maintainers:
- Biju Das <biju.das.jz@bp.renesas.com>
allOf:
- $ref: "dma-controller.yaml#"
properties:
compatible:
items:
- enum:
- renesas,r9a07g044-dmac # RZ/G2{L,LC}
- const: renesas,rz-dmac
reg:
items:
- description: Control and channel register block
- description: DMA extended resource selector block
interrupts:
maxItems: 17
interrupt-names:
items:
- const: error
- const: ch0
- const: ch1
- const: ch2
- const: ch3
- const: ch4
- const: ch5
- const: ch6
- const: ch7
- const: ch8
- const: ch9
- const: ch10
- const: ch11
- const: ch12
- const: ch13
- const: ch14
- const: ch15
clocks:
items:
- description: DMA main clock
- description: DMA register access clock
'#dma-cells':
const: 1
description:
The cell specifies the encoded MID/RID values of the DMAC port
connected to the DMA client and the slave channel configuration
parameters.
bits[0:9] - Specifies MID/RID value
bit[10] - Specifies DMA request high enable (HIEN)
bit[11] - Specifies DMA request detection type (LVL)
bits[12:14] - Specifies DMAACK output mode (AM)
bit[15] - Specifies Transfer Mode (TM)
dma-channels:
const: 16
power-domains:
maxItems: 1
resets:
items:
- description: Reset for DMA ARESETN reset terminal
- description: Reset for DMA RST_ASYNC reset terminal
required:
- compatible
- reg
- interrupts
- interrupt-names
- clocks
- '#dma-cells'
- dma-channels
- power-domains
- resets
additionalProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/r9a07g044-cpg.h>
dmac: dma-controller@11820000 {
compatible = "renesas,r9a07g044-dmac",
"renesas,rz-dmac";
reg = <0x11820000 0x10000>,
<0x11830000 0x10000>;
interrupts = <GIC_SPI 141 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 125 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 126 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 127 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 128 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 129 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 130 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 131 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 132 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 133 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 134 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 135 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 136 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 137 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 138 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 139 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 140 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "error",
"ch0", "ch1", "ch2", "ch3",
"ch4", "ch5", "ch6", "ch7",
"ch8", "ch9", "ch10", "ch11",
"ch12", "ch13", "ch14", "ch15";
clocks = <&cpg CPG_MOD R9A07G044_DMAC_ACLK>,
<&cpg CPG_MOD R9A07G044_DMAC_PCLK>;
power-domains = <&cpg>;
resets = <&cpg R9A07G044_DMAC_ARESETN>,
<&cpg R9A07G044_DMAC_RST_ASYNC>;
#dma-cells = <1>;
dma-channels = <16>;
};
......@@ -40,6 +40,13 @@ description: |
0x0: FIFO mode with threshold selectable with bit 0-1
0x1: Direct mode: each DMA request immediately initiates a transfer
from/to the memory, FIFO is bypassed.
-bit 4: alternative DMA request/acknowledge protocol
0x0: Use standard DMA ACK management, where ACK signal is maintained
up to the removal of request and transfer completion
0x1: Use alternative DMA ACK management, where ACK de-assertion does
not wait for the de-assertion of the REQuest, ACK is only managed
by transfer completion. This must only be used on channels
managing transfers for STM32 USART/UART.
maintainers:
......
......@@ -985,6 +985,12 @@ S: Supported
T: git https://gitlab.freedesktop.org/agd5f/linux.git
F: drivers/gpu/drm/amd/pm/powerplay/
AMD PTDMA DRIVER
M: Sanjay R Mehta <sanju.mehta@amd.com>
L: dmaengine@vger.kernel.org
S: Maintained
F: drivers/dma/ptdma/
AMD SEATTLE DEVICE TREE SUPPORT
M: Brijesh Singh <brijeshkumar.singh@amd.com>
M: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
......
......@@ -277,10 +277,15 @@ config INTEL_IDMA64
Enable DMA support for Intel Low Power Subsystem such as found on
Intel Skylake PCH.
config INTEL_IDXD_BUS
tristate
default INTEL_IDXD
config INTEL_IDXD
tristate "Intel Data Accelerators support"
depends on PCI && X86_64
depends on PCI && X86_64 && !UML
depends on PCI_MSI
depends on PCI_PASID
depends on SBITMAP
select DMA_ENGINE
help
......@@ -291,6 +296,23 @@ config INTEL_IDXD
If unsure, say N.
config INTEL_IDXD_COMPAT
bool "Legacy behavior for idxd driver"
depends on PCI && X86_64
select INTEL_IDXD_BUS
help
Compatible driver to support old /sys/bus/dsa/drivers/dsa behavior.
The old behavior performed driver bind/unbind for device and wq
devices all under the dsa driver. The compat driver will emulate
the legacy behavior in order to allow existing support apps (i.e.
accel-config) to continue function. It is expected that accel-config
v3.2 and earlier will need the compat mode. A distro with later
accel-config version can disable this compat config.
Say Y if you have old applications that require such behavior.
If unsure, say N.
# Config symbol that collects all the dependencies that's necessary to
# support shared virtual memory for the devices supported by idxd.
config INTEL_IDXD_SVM
......@@ -315,7 +337,7 @@ config INTEL_IDXD_PERFMON
config INTEL_IOATDMA
tristate "Intel I/OAT DMA support"
depends on PCI && X86_64
depends on PCI && X86_64 && !UML
select DMA_ENGINE
select DMA_ENGINE_RAID
select DCA
......@@ -716,6 +738,8 @@ source "drivers/dma/bestcomm/Kconfig"
source "drivers/dma/mediatek/Kconfig"
source "drivers/dma/ptdma/Kconfig"
source "drivers/dma/qcom/Kconfig"
source "drivers/dma/dw/Kconfig"
......
......@@ -16,6 +16,7 @@ obj-$(CONFIG_DMATEST) += dmatest.o
obj-$(CONFIG_ALTERA_MSGDMA) += altera-msgdma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
obj-$(CONFIG_AMD_PTDMA) += ptdma/
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o
......@@ -41,7 +42,7 @@ obj-$(CONFIG_IMX_DMA) += imx-dma.o
obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
obj-$(CONFIG_INTEL_IDMA64) += idma64.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-$(CONFIG_INTEL_IDXD) += idxd/
obj-y += idxd/
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
obj-$(CONFIG_K3_DMA) += k3dma.o
obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o
......
......@@ -70,10 +70,22 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
si = (const struct acpi_csrt_shared_info *)&grp[1];
/* Match device by MMIO and IRQ */
/* Match device by MMIO */
if (si->mmio_base_low != lower_32_bits(mem) ||
si->mmio_base_high != upper_32_bits(mem) ||
si->gsi_interrupt != irq)
si->mmio_base_high != upper_32_bits(mem))
return 0;
/*
* acpi_gsi_to_irq() can't be used because some platforms do not save
* registered IRQs in the MP table. Instead we just try to register
* the GSI, which is the core part of the above mentioned function.
*/
ret = acpi_register_gsi(NULL, si->gsi_interrupt, si->interrupt_mode, si->interrupt_polarity);
if (ret < 0)
return 0;
/* Match device by Linux vIRQ */
if (ret != irq)
return 0;
dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n",
......
......@@ -691,10 +691,14 @@ static void msgdma_tasklet(struct tasklet_struct *t)
spin_lock_irqsave(&mdev->lock, flags);
if (mdev->resp) {
/* Read number of responses that are available */
count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
dev_dbg(mdev->dev, "%s (%d): response count=%d\n",
__func__, __LINE__, count);
} else {
count = 1;
}
while (count--) {
/*
......@@ -703,8 +707,12 @@ static void msgdma_tasklet(struct tasklet_struct *t)
* have any real values, like transferred bytes or error
* bits. So we need to just drop these values.
*/
size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED);
status = ioread32(mdev->resp + MSGDMA_RESP_STATUS);
if (mdev->resp) {
size = ioread32(mdev->resp +
MSGDMA_RESP_BYTES_TRANSFERRED);
status = ioread32(mdev->resp +
MSGDMA_RESP_STATUS);
}
msgdma_complete_descriptor(mdev);
msgdma_chan_desc_cleanup(mdev);
......@@ -757,14 +765,21 @@ static void msgdma_dev_remove(struct msgdma_device *mdev)
}
static int request_and_map(struct platform_device *pdev, const char *name,
struct resource **res, void __iomem **ptr)
struct resource **res, void __iomem **ptr,
bool optional)
{
struct resource *region;
struct device *device = &pdev->dev;
*res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
if (*res == NULL) {
dev_err(device, "resource %s not defined\n", name);
if (optional) {
*ptr = NULL;
dev_info(device, "optional resource %s not defined\n",
name);
return 0;
}
dev_err(device, "mandatory resource %s not defined\n", name);
return -ENODEV;
}
......@@ -805,17 +820,17 @@ static int msgdma_probe(struct platform_device *pdev)
mdev->dev = &pdev->dev;
/* Map CSR space */
ret = request_and_map(pdev, "csr", &dma_res, &mdev->csr);
ret = request_and_map(pdev, "csr", &dma_res, &mdev->csr, false);
if (ret)
return ret;
/* Map (extended) descriptor space */
ret = request_and_map(pdev, "desc", &dma_res, &mdev->desc);
ret = request_and_map(pdev, "desc", &dma_res, &mdev->desc, false);
if (ret)
return ret;
/* Map response space */
ret = request_and_map(pdev, "resp", &dma_res, &mdev->resp);
ret = request_and_map(pdev, "resp", &dma_res, &mdev->resp, true);
if (ret)
return ret;
......
......@@ -2240,10 +2240,16 @@ static struct platform_driver at_xdmac_driver = {
static int __init at_xdmac_init(void)
{
return platform_driver_probe(&at_xdmac_driver, at_xdmac_probe);
return platform_driver_register(&at_xdmac_driver);
}
subsys_initcall(at_xdmac_init);
static void __exit at_xdmac_exit(void)
{
platform_driver_unregister(&at_xdmac_driver);
}
module_exit(at_xdmac_exit);
MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");
MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
MODULE_LICENSE("GPL");
......@@ -363,12 +363,16 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
DWAXIDMAC_TT_FC_MEM_TO_PER_DST :
DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC)
<< CH_CFG_H_TT_FC_POS;
if (chan->chip->apb_regs)
reg |= (chan->id << CH_CFG_H_DST_PER_POS);
break;
case DMA_DEV_TO_MEM:
reg |= (chan->config.device_fc ?
DWAXIDMAC_TT_FC_PER_TO_MEM_SRC :
DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC)
<< CH_CFG_H_TT_FC_POS;
if (chan->chip->apb_regs)
reg |= (chan->id << CH_CFG_H_SRC_PER_POS);
break;
default:
break;
......@@ -470,18 +474,13 @@ static void dma_chan_free_chan_resources(struct dma_chan *dchan)
pm_runtime_put(chan->chip->dev);
}
static void dw_axi_dma_set_hw_channel(struct axi_dma_chip *chip,
u32 handshake_num, bool set)
static void dw_axi_dma_set_hw_channel(struct axi_dma_chan *chan, bool set)
{
unsigned long start = 0;
unsigned long reg_value;
unsigned long reg_mask;
unsigned long reg_set;
unsigned long mask;
unsigned long val;
struct axi_dma_chip *chip = chan->chip;
unsigned long reg_value, val;
if (!chip->apb_regs) {
dev_dbg(chip->dev, "apb_regs not initialized\n");
dev_err(chip->dev, "apb_regs not initialized\n");
return;
}
......@@ -490,26 +489,22 @@ static void dw_axi_dma_set_hw_channel(struct axi_dma_chip *chip,
* Lock the DMA channel by assign a handshake number to the channel.
* Unlock the DMA channel by assign 0x3F to the channel.
*/
if (set) {
reg_set = UNUSED_CHANNEL;
val = handshake_num;
} else {
reg_set = handshake_num;
if (set)
val = chan->hw_handshake_num;
else
val = UNUSED_CHANNEL;
}
reg_value = lo_hi_readq(chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
for_each_set_clump8(start, reg_mask, &reg_value, 64) {
if (reg_mask == reg_set) {
mask = GENMASK_ULL(start + 7, start);
reg_value &= ~mask;
reg_value |= rol64(val, start);
lo_hi_writeq(reg_value,
chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
break;
}
}
/* Channel is already allocated, set handshake as per channel ID */
/* 64 bit write should handle for 8 channels */
reg_value &= ~(DMA_APB_HS_SEL_MASK <<
(chan->id * DMA_APB_HS_SEL_BIT_SIZE));
reg_value |= (val << (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
lo_hi_writeq(reg_value, chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
return;
}
/*
......@@ -742,7 +737,7 @@ dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr,
llp = hw_desc->llp;
} while (total_segments);
dw_axi_dma_set_hw_channel(chan->chip, chan->hw_handshake_num, true);
dw_axi_dma_set_hw_channel(chan, true);
return vchan_tx_prep(&chan->vc, &desc->vd, flags);
......@@ -822,7 +817,7 @@ dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
llp = hw_desc->llp;
} while (num_sgs);
dw_axi_dma_set_hw_channel(chan->chip, chan->hw_handshake_num, true);
dw_axi_dma_set_hw_channel(chan, true);
return vchan_tx_prep(&chan->vc, &desc->vd, flags);
......@@ -1098,8 +1093,7 @@ static int dma_chan_terminate_all(struct dma_chan *dchan)
"%s failed to stop\n", axi_chan_name(chan));
if (chan->direction != DMA_MEM_TO_MEM)
dw_axi_dma_set_hw_channel(chan->chip,
chan->hw_handshake_num, false);
dw_axi_dma_set_hw_channel(chan, false);
if (chan->direction == DMA_MEM_TO_DEV)
dw_axi_dma_set_byte_halfword(chan, false);
......@@ -1296,7 +1290,7 @@ static int parse_device_properties(struct axi_dma_chip *chip)
return -EINVAL;
chip->dw->hdata->restrict_axi_burst_len = true;
chip->dw->hdata->axi_rw_burst_len = tmp - 1;
chip->dw->hdata->axi_rw_burst_len = tmp;
}
return 0;
......@@ -1365,7 +1359,6 @@ static int dw_probe(struct platform_device *pdev)
if (ret)
return ret;
INIT_LIST_HEAD(&dw->dma.channels);
for (i = 0; i < hdata->nr_channels; i++) {
struct axi_dma_chan *chan = &dw->chan[i];
......@@ -1386,6 +1379,7 @@ static int dw_probe(struct platform_device *pdev)
/* DMA capabilities */
dw->dma.chancnt = hdata->nr_channels;
dw->dma.max_burst = hdata->axi_rw_burst_len;
dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;
dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;
dw->dma.directions = BIT(DMA_MEM_TO_MEM);
......
......@@ -184,6 +184,8 @@ static inline struct axi_dma_chan *dchan_to_axi_dma_chan(struct dma_chan *dchan)
#define DMAC_APB_HALFWORD_WR_CH_EN 0x020 /* DMAC Halfword write enables */
#define UNUSED_CHANNEL 0x3F /* Set unused DMA channel to 0x3F */
#define DMA_APB_HS_SEL_BIT_SIZE 0x08 /* HW handshake bits per channel */
#define DMA_APB_HS_SEL_MASK 0xFF /* HW handshake select masks */
#define MAX_BLOCK_SIZE 0x1000 /* 1024 blocks * 4 bytes data width */
/* DMAC_CFG */
......@@ -256,6 +258,8 @@ enum {
/* CH_CFG_H */
#define CH_CFG_H_PRIORITY_POS 17
#define CH_CFG_H_DST_PER_POS 12
#define CH_CFG_H_SRC_PER_POS 7
#define CH_CFG_H_HS_SEL_DST_POS 4
#define CH_CFG_H_HS_SEL_SRC_POS 3
enum {
......
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2013,2018 Intel Corporation
// Copyright (C) 2013,2018,2020-2021 Intel Corporation
#include <linux/bitops.h>
#include <linux/dmaengine.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/types.h>
#include "internal.h"
static void idma32_initialize_chan(struct dw_dma_chan *dwc)
#define DMA_CTL_CH(x) (0x1000 + (x) * 4)
#define DMA_SRC_ADDR_FILLIN(x) (0x1100 + (x) * 4)
#define DMA_DST_ADDR_FILLIN(x) (0x1200 + (x) * 4)
#define DMA_XBAR_SEL(x) (0x1300 + (x) * 4)
#define DMA_REGACCESS_CHID_CFG (0x1400)
#define CTL_CH_TRANSFER_MODE_MASK GENMASK(1, 0)
#define CTL_CH_TRANSFER_MODE_S2S 0
#define CTL_CH_TRANSFER_MODE_S2D 1
#define CTL_CH_TRANSFER_MODE_D2S 2
#define CTL_CH_TRANSFER_MODE_D2D 3
#define CTL_CH_RD_RS_MASK GENMASK(4, 3)
#define CTL_CH_WR_RS_MASK GENMASK(6, 5)
#define CTL_CH_RD_NON_SNOOP_BIT BIT(8)
#define CTL_CH_WR_NON_SNOOP_BIT BIT(9)
#define XBAR_SEL_DEVID_MASK GENMASK(15, 0)
#define XBAR_SEL_RX_TX_BIT BIT(16)
#define XBAR_SEL_RX_TX_SHIFT 16
#define REGACCESS_CHID_MASK GENMASK(2, 0)
static unsigned int idma32_get_slave_devfn(struct dw_dma_chan *dwc)
{
struct device *slave = dwc->chan.slave;
if (!slave || !dev_is_pci(slave))
return 0;
return to_pci_dev(slave)->devfn;
}
static void idma32_initialize_chan_xbar(struct dw_dma_chan *dwc)
{
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
void __iomem *misc = __dw_regs(dw);
u32 cfghi = 0, cfglo = 0;
u8 dst_id, src_id;
u32 value;
/* DMA Channel ID Configuration register must be programmed first */
value = readl(misc + DMA_REGACCESS_CHID_CFG);
value &= ~REGACCESS_CHID_MASK;
value |= dwc->chan.chan_id;
writel(value, misc + DMA_REGACCESS_CHID_CFG);
/* Configure channel attributes */
value = readl(misc + DMA_CTL_CH(dwc->chan.chan_id));
value &= ~(CTL_CH_RD_NON_SNOOP_BIT | CTL_CH_WR_NON_SNOOP_BIT);
value &= ~(CTL_CH_RD_RS_MASK | CTL_CH_WR_RS_MASK);
value &= ~CTL_CH_TRANSFER_MODE_MASK;
switch (dwc->direction) {
case DMA_MEM_TO_DEV:
value |= CTL_CH_TRANSFER_MODE_D2S;
value |= CTL_CH_WR_NON_SNOOP_BIT;
break;
case DMA_DEV_TO_MEM:
value |= CTL_CH_TRANSFER_MODE_S2D;
value |= CTL_CH_RD_NON_SNOOP_BIT;
break;
default:
/*
* Memory-to-Memory and Device-to-Device are ignored for now.
*
* For Memory-to-Memory transfers we would need to set mode
* and disable snooping on both sides.
*/
return;
}
writel(value, misc + DMA_CTL_CH(dwc->chan.chan_id));
/* Configure crossbar selection */
value = readl(misc + DMA_XBAR_SEL(dwc->chan.chan_id));
/* DEVFN selection */
value &= ~XBAR_SEL_DEVID_MASK;
value |= idma32_get_slave_devfn(dwc);
switch (dwc->direction) {
case DMA_MEM_TO_DEV:
value |= XBAR_SEL_RX_TX_BIT;
break;
case DMA_DEV_TO_MEM:
value &= ~XBAR_SEL_RX_TX_BIT;
break;
default:
/* Memory-to-Memory and Device-to-Device are ignored for now */
return;
}
writel(value, misc + DMA_XBAR_SEL(dwc->chan.chan_id));
/* Configure DMA channel low and high registers */
switch (dwc->direction) {
case DMA_MEM_TO_DEV:
dst_id = dwc->chan.chan_id;
src_id = dwc->dws.src_id;
break;
case DMA_DEV_TO_MEM:
dst_id = dwc->dws.dst_id;
src_id = dwc->chan.chan_id;
break;
default:
/* Memory-to-Memory and Device-to-Device are ignored for now */
return;
}
/* Set default burst alignment */
cfglo |= IDMA32C_CFGL_DST_BURST_ALIGN | IDMA32C_CFGL_SRC_BURST_ALIGN;
/* Low 4 bits of the request lines */
cfghi |= IDMA32C_CFGH_DST_PER(dst_id & 0xf);
cfghi |= IDMA32C_CFGH_SRC_PER(src_id & 0xf);
/* Request line extension (2 bits) */
cfghi |= IDMA32C_CFGH_DST_PER_EXT(dst_id >> 4 & 0x3);
cfghi |= IDMA32C_CFGH_SRC_PER_EXT(src_id >> 4 & 0x3);
channel_writel(dwc, CFG_LO, cfglo);
channel_writel(dwc, CFG_HI, cfghi);
}
static void idma32_initialize_chan_generic(struct dw_dma_chan *dwc)
{
u32 cfghi = 0;
u32 cfglo = 0;
......@@ -134,7 +263,10 @@ int idma32_dma_probe(struct dw_dma_chip *chip)
return -ENOMEM;
/* Channel operations */
dw->initialize_chan = idma32_initialize_chan;
if (chip->pdata->quirks & DW_DMA_QUIRK_XBAR_PRESENT)
dw->initialize_chan = idma32_initialize_chan_xbar;
else
dw->initialize_chan = idma32_initialize_chan_generic;
dw->suspend_chan = idma32_suspend_chan;
dw->resume_chan = idma32_resume_chan;
dw->prepare_ctllo = idma32_prepare_ctllo;
......
......@@ -74,4 +74,20 @@ static __maybe_unused const struct dw_dma_chip_pdata idma32_chip_pdata = {
.remove = idma32_dma_remove,
};
static const struct dw_dma_platform_data xbar_pdata = {
.nr_channels = 8,
.chan_allocation_order = CHAN_ALLOCATION_ASCENDING,
.chan_priority = CHAN_PRIORITY_ASCENDING,
.block_size = 131071,
.nr_masters = 1,
.data_width = {4},
.quirks = DW_DMA_QUIRK_XBAR_PRESENT,
};
static __maybe_unused const struct dw_dma_chip_pdata xbar_chip_pdata = {
.pdata = &xbar_pdata,
.probe = idma32_dma_probe,
.remove = idma32_dma_remove,
};
#endif /* _DMA_DW_INTERNAL_H */
......@@ -50,15 +50,10 @@ struct dw_dma_platform_data *dw_dma_parse_dt(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct dw_dma_platform_data *pdata;
u32 tmp, arr[DW_DMA_MAX_NR_MASTERS], mb[DW_DMA_MAX_NR_CHANNELS];
u32 tmp, arr[DW_DMA_MAX_NR_MASTERS];
u32 nr_masters;
u32 nr_channels;
if (!np) {
dev_err(&pdev->dev, "Missing DT data\n");
return NULL;
}
if (of_property_read_u32(np, "dma-masters", &nr_masters))
return NULL;
if (nr_masters < 1 || nr_masters > DW_DMA_MAX_NR_MASTERS)
......@@ -76,41 +71,29 @@ struct dw_dma_platform_data *dw_dma_parse_dt(struct platform_device *pdev)
pdata->nr_masters = nr_masters;
pdata->nr_channels = nr_channels;
if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
pdata->chan_allocation_order = (unsigned char)tmp;
of_property_read_u32(np, "chan_allocation_order", &pdata->chan_allocation_order);
of_property_read_u32(np, "chan_priority", &pdata->chan_priority);
if (!of_property_read_u32(np, "chan_priority", &tmp))
pdata->chan_priority = tmp;
of_property_read_u32(np, "block_size", &pdata->block_size);
if (!of_property_read_u32(np, "block_size", &tmp))
pdata->block_size = tmp;
if (!of_property_read_u32_array(np, "data-width", arr, nr_masters)) {
for (tmp = 0; tmp < nr_masters; tmp++)
pdata->data_width[tmp] = arr[tmp];
} else if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) {
/* Try deprecated property first */
if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) {
for (tmp = 0; tmp < nr_masters; tmp++)
pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
}
if (!of_property_read_u32_array(np, "multi-block", mb, nr_channels)) {
for (tmp = 0; tmp < nr_channels; tmp++)
pdata->multi_block[tmp] = mb[tmp];
} else {
for (tmp = 0; tmp < nr_channels; tmp++)
pdata->multi_block[tmp] = 1;
}
/* If "data_width" and "data-width" both provided use the latter one */
of_property_read_u32_array(np, "data-width", pdata->data_width, nr_masters);
memset32(pdata->multi_block, 1, nr_channels);
of_property_read_u32_array(np, "multi-block", pdata->multi_block, nr_channels);
if (of_property_read_u32_array(np, "snps,max-burst-len", pdata->max_burst,
nr_channels)) {
memset32(pdata->max_burst, DW_DMA_MAX_BURST, nr_channels);
}
of_property_read_u32_array(np, "snps,max-burst-len", pdata->max_burst, nr_channels);
if (!of_property_read_u32(np, "snps,dma-protection-control", &tmp)) {
if (tmp > CHAN_PROTCTL_MASK)
of_property_read_u32(np, "snps,dma-protection-control", &pdata->protctl);
if (pdata->protctl > CHAN_PROTCTL_MASK)
return NULL;
pdata->protctl = tmp;
}
return pdata;
}
......
......@@ -120,9 +120,9 @@ static const struct pci_device_id dw_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_dma_chip_pdata },
/* Elkhart Lake iDMA 32-bit (PSE DMA) */
{ PCI_VDEVICE(INTEL, 0x4bb4), (kernel_ulong_t)&idma32_chip_pdata },
{ PCI_VDEVICE(INTEL, 0x4bb5), (kernel_ulong_t)&idma32_chip_pdata },
{ PCI_VDEVICE(INTEL, 0x4bb6), (kernel_ulong_t)&idma32_chip_pdata },
{ PCI_VDEVICE(INTEL, 0x4bb4), (kernel_ulong_t)&xbar_chip_pdata },
{ PCI_VDEVICE(INTEL, 0x4bb5), (kernel_ulong_t)&xbar_chip_pdata },
{ PCI_VDEVICE(INTEL, 0x4bb6), (kernel_ulong_t)&xbar_chip_pdata },
/* Haswell */
{ PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_dma_chip_pdata },
......
......@@ -149,9 +149,9 @@ static const struct acpi_device_id dw_dma_acpi_id_table[] = {
{ "808622C0", (kernel_ulong_t)&dw_dma_chip_pdata },
/* Elkhart Lake iDMA 32-bit (PSE DMA) */
{ "80864BB4", (kernel_ulong_t)&idma32_chip_pdata },
{ "80864BB5", (kernel_ulong_t)&idma32_chip_pdata },
{ "80864BB6", (kernel_ulong_t)&idma32_chip_pdata },
{ "80864BB4", (kernel_ulong_t)&xbar_chip_pdata },
{ "80864BB5", (kernel_ulong_t)&xbar_chip_pdata },
{ "80864BB6", (kernel_ulong_t)&xbar_chip_pdata },
{ }
};
......
......@@ -897,7 +897,7 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
if (data && data->name)
name = data->name;
ret = clk_enable(edmac->clk);
ret = clk_prepare_enable(edmac->clk);
if (ret)
return ret;
......@@ -936,7 +936,7 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
fail_free_irq:
free_irq(edmac->irq, edmac);
fail_clk_disable:
clk_disable(edmac->clk);
clk_disable_unprepare(edmac->clk);
return ret;
}
......@@ -969,7 +969,7 @@ static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
list_for_each_entry_safe(desc, d, &list, node)
kfree(desc);
clk_disable(edmac->clk);
clk_disable_unprepare(edmac->clk);
free_irq(edmac->irq, edmac);
}
......
......@@ -291,8 +291,7 @@ static void dpaa2_qdma_issue_pending(struct dma_chan *chan)
err = dpaa2_io_service_enqueue_fq(NULL, dpaa2_chan->fqid, fd);
if (err) {
list_del(&dpaa2_comp->list);
list_add_tail(&dpaa2_comp->list,
list_move_tail(&dpaa2_comp->list,
&dpaa2_chan->comp_free);
}
}
......@@ -626,8 +625,7 @@ static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)
dpaa2_comp = to_fsl_qdma_comp(vdesc);
qchan = dpaa2_comp->qchan;
spin_lock_irqsave(&qchan->queue_lock, flags);
list_del(&dpaa2_comp->list);
list_add_tail(&dpaa2_comp->list, &qchan->comp_free);
list_move_tail(&dpaa2_comp->list, &qchan->comp_free);
spin_unlock_irqrestore(&qchan->queue_lock, flags);
}
......@@ -703,7 +701,7 @@ static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)
/* DPDMAI enable */
err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle);
if (err) {
dev_err(dev, "dpdmai_enable() faile\n");
dev_err(dev, "dpdmai_enable() failed\n");
goto err_enable;
}
......
......@@ -133,11 +133,6 @@ static inline void hisi_dma_update_bit(void __iomem *addr, u32 pos, bool val)
writel_relaxed(tmp, addr);
}
static void hisi_dma_free_irq_vectors(void *data)
{
pci_free_irq_vectors(data);
}
static void hisi_dma_pause_dma(struct hisi_dma_dev *hdma_dev, u32 index,
bool pause)
{
......@@ -544,6 +539,7 @@ static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, hdma_dev);
pci_set_master(pdev);
/* This will be freed by 'pcim_release()'. See 'pcim_enable_device()' */
ret = pci_alloc_irq_vectors(pdev, HISI_DMA_MSI_NUM, HISI_DMA_MSI_NUM,
PCI_IRQ_MSI);
if (ret < 0) {
......@@ -551,10 +547,6 @@ static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
}
ret = devm_add_action_or_reset(dev, hisi_dma_free_irq_vectors, pdev);
if (ret)
return ret;
dma_dev = &hdma_dev->dma_dev;
dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
dma_dev->device_free_chan_resources = hisi_dma_free_chan_resources;
......
ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=IDXD
obj-$(CONFIG_INTEL_IDXD) += idxd.o
idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o
idxd-$(CONFIG_INTEL_IDXD_PERFMON) += perfmon.o
obj-$(CONFIG_INTEL_IDXD_BUS) += idxd_bus.o
idxd_bus-y := bus.o
obj-$(CONFIG_INTEL_IDXD_COMPAT) += idxd_compat.o
idxd_compat-y := compat.o
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include "idxd.h"
int __idxd_driver_register(struct idxd_device_driver *idxd_drv, struct module *owner,
const char *mod_name)
{
struct device_driver *drv = &idxd_drv->drv;
if (!idxd_drv->type) {
pr_debug("driver type not set (%ps)\n", __builtin_return_address(0));
return -EINVAL;
}
drv->name = idxd_drv->name;
drv->bus = &dsa_bus_type;
drv->owner = owner;
drv->mod_name = mod_name;
return driver_register(drv);
}
EXPORT_SYMBOL_GPL(__idxd_driver_register);
void idxd_driver_unregister(struct idxd_device_driver *idxd_drv)
{
driver_unregister(&idxd_drv->drv);
}
EXPORT_SYMBOL_GPL(idxd_driver_unregister);
static int idxd_config_bus_match(struct device *dev,
struct device_driver *drv)
{
struct idxd_device_driver *idxd_drv =
container_of(drv, struct idxd_device_driver, drv);
struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
int i = 0;
while (idxd_drv->type[i] != IDXD_DEV_NONE) {
if (idxd_dev->type == idxd_drv->type[i])
return 1;
i++;
}
return 0;
}
static int idxd_config_bus_probe(struct device *dev)
{
struct idxd_device_driver *idxd_drv =
container_of(dev->driver, struct idxd_device_driver, drv);
struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
return idxd_drv->probe(idxd_dev);
}
static void idxd_config_bus_remove(struct device *dev)
{
struct idxd_device_driver *idxd_drv =
container_of(dev->driver, struct idxd_device_driver, drv);
struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
idxd_drv->remove(idxd_dev);
}
struct bus_type dsa_bus_type = {
.name = "dsa",
.match = idxd_config_bus_match,
.probe = idxd_config_bus_probe,
.remove = idxd_config_bus_remove,
};
EXPORT_SYMBOL_GPL(dsa_bus_type);
static int __init dsa_bus_init(void)
{
return bus_register(&dsa_bus_type);
}
module_init(dsa_bus_init);
static void __exit dsa_bus_exit(void)
{
bus_unregister(&dsa_bus_type);
}
module_exit(dsa_bus_exit);
MODULE_DESCRIPTION("IDXD driver dsa_bus_type driver");
MODULE_LICENSE("GPL v2");
......@@ -41,7 +41,7 @@ struct idxd_user_context {
static void idxd_cdev_dev_release(struct device *dev)
{
struct idxd_cdev *idxd_cdev = container_of(dev, struct idxd_cdev, dev);
struct idxd_cdev *idxd_cdev = dev_to_cdev(dev);
struct idxd_cdev_context *cdev_ctx;
struct idxd_wq *wq = idxd_cdev->wq;
......@@ -218,14 +218,13 @@ static __poll_t idxd_cdev_poll(struct file *filp,
struct idxd_user_context *ctx = filp->private_data;
struct idxd_wq *wq = ctx->wq;
struct idxd_device *idxd = wq->idxd;
unsigned long flags;
__poll_t out = 0;
poll_wait(filp, &wq->err_queue, wait);
spin_lock_irqsave(&idxd->dev_lock, flags);
spin_lock(&idxd->dev_lock);
if (idxd->sw_err.valid)
out = EPOLLIN | EPOLLRDNORM;
spin_unlock_irqrestore(&idxd->dev_lock, flags);
spin_unlock(&idxd->dev_lock);
return out;
}
......@@ -256,9 +255,10 @@ int idxd_wq_add_cdev(struct idxd_wq *wq)
if (!idxd_cdev)
return -ENOMEM;
idxd_cdev->idxd_dev.type = IDXD_DEV_CDEV;
idxd_cdev->wq = wq;
cdev = &idxd_cdev->cdev;
dev = &idxd_cdev->dev;
dev = cdev_dev(idxd_cdev);
cdev_ctx = &ictx[wq->idxd->data->type];
minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
if (minor < 0) {
......@@ -268,7 +268,7 @@ int idxd_wq_add_cdev(struct idxd_wq *wq)
idxd_cdev->minor = minor;
device_initialize(dev);
dev->parent = &wq->conf_dev;
dev->parent = wq_confdev(wq);
dev->bus = &dsa_bus_type;
dev->type = &idxd_cdev_device_type;
dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
......@@ -299,10 +299,67 @@ void idxd_wq_del_cdev(struct idxd_wq *wq)
idxd_cdev = wq->idxd_cdev;
wq->idxd_cdev = NULL;
cdev_device_del(&idxd_cdev->cdev, &idxd_cdev->dev);
put_device(&idxd_cdev->dev);
cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev));
put_device(cdev_dev(idxd_cdev));
}
static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
{
struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
struct idxd_device *idxd = wq->idxd;
int rc;
if (idxd->state != IDXD_DEV_ENABLED)
return -ENXIO;
mutex_lock(&wq->wq_lock);
wq->type = IDXD_WQT_USER;
rc = __drv_enable_wq(wq);
if (rc < 0)
goto err;
rc = idxd_wq_add_cdev(wq);
if (rc < 0) {
idxd->cmd_status = IDXD_SCMD_CDEV_ERR;
goto err_cdev;
}
idxd->cmd_status = 0;
mutex_unlock(&wq->wq_lock);
return 0;
err_cdev:
__drv_disable_wq(wq);
err:
wq->type = IDXD_WQT_NONE;
mutex_unlock(&wq->wq_lock);
return rc;
}
static void idxd_user_drv_remove(struct idxd_dev *idxd_dev)
{
struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
mutex_lock(&wq->wq_lock);
idxd_wq_del_cdev(wq);
__drv_disable_wq(wq);
wq->type = IDXD_WQT_NONE;
mutex_unlock(&wq->wq_lock);
}
static enum idxd_dev_type dev_types[] = {
IDXD_DEV_WQ,
IDXD_DEV_NONE,
};
struct idxd_device_driver idxd_user_drv = {
.probe = idxd_user_drv_probe,
.remove = idxd_user_drv_remove,
.name = "user",
.type = dev_types,
};
EXPORT_SYMBOL_GPL(idxd_user_drv);
int idxd_cdev_register(void)
{
int rc, i;
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/device/bus.h>
#include "idxd.h"
extern int device_driver_attach(struct device_driver *drv, struct device *dev);
extern void device_driver_detach(struct device *dev);
#define DRIVER_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
struct driver_attribute driver_attr_##_name = \
__ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
static ssize_t unbind_store(struct device_driver *drv, const char *buf, size_t count)
{
struct bus_type *bus = drv->bus;
struct device *dev;
int rc = -ENODEV;
dev = bus_find_device_by_name(bus, NULL, buf);
if (dev && dev->driver) {
device_driver_detach(dev);
rc = count;
}
return rc;
}
static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, 0200, NULL, unbind_store);
static ssize_t bind_store(struct device_driver *drv, const char *buf, size_t count)
{
struct bus_type *bus = drv->bus;
struct device *dev;
struct device_driver *alt_drv = NULL;
int rc = -ENODEV;
struct idxd_dev *idxd_dev;
dev = bus_find_device_by_name(bus, NULL, buf);
if (!dev || dev->driver || drv != &dsa_drv.drv)
return -ENODEV;
idxd_dev = confdev_to_idxd_dev(dev);
if (is_idxd_dev(idxd_dev)) {
alt_drv = driver_find("idxd", bus);
} else if (is_idxd_wq_dev(idxd_dev)) {
struct idxd_wq *wq = confdev_to_wq(dev);
if (is_idxd_wq_kernel(wq))
alt_drv = driver_find("dmaengine", bus);
else if (is_idxd_wq_user(wq))
alt_drv = driver_find("user", bus);
}
if (!alt_drv)
return -ENODEV;
rc = device_driver_attach(alt_drv, dev);
if (rc < 0)
return rc;
return count;
}
static DRIVER_ATTR_IGNORE_LOCKDEP(bind, 0200, NULL, bind_store);
static struct attribute *dsa_drv_compat_attrs[] = {
&driver_attr_bind.attr,
&driver_attr_unbind.attr,
NULL,
};
static const struct attribute_group dsa_drv_compat_attr_group = {
.attrs = dsa_drv_compat_attrs,
};
static const struct attribute_group *dsa_drv_compat_groups[] = {
&dsa_drv_compat_attr_group,
NULL,
};
static int idxd_dsa_drv_probe(struct idxd_dev *idxd_dev)
{
return -ENODEV;
}
static void idxd_dsa_drv_remove(struct idxd_dev *idxd_dev)
{
}
static enum idxd_dev_type dev_types[] = {
IDXD_DEV_NONE,
};
struct idxd_device_driver dsa_drv = {
.name = "dsa",
.probe = idxd_dsa_drv_probe,
.remove = idxd_dsa_drv_remove,
.type = dev_types,
.drv = {
.suppress_bind_attrs = true,
.groups = dsa_drv_compat_groups,
},
};
module_idxd_driver(dsa_drv);
MODULE_IMPORT_NS(IDXD);
This diff is collapsed.
......@@ -69,7 +69,11 @@ static inline void idxd_prep_desc_common(struct idxd_wq *wq,
hw->src_addr = addr_f1;
hw->dst_addr = addr_f2;
hw->xfer_size = len;
hw->priv = !!(wq->type == IDXD_WQT_KERNEL);
/*
* For dedicated WQ, this field is ignored and HW will use the WQCFG.priv
* field instead. This field should be set to 1 for kernel descriptors.
*/
hw->priv = 1;
hw->completion_addr = compl;
}
......@@ -149,10 +153,8 @@ static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
cookie = dma_cookie_assign(tx);
rc = idxd_submit_desc(wq, desc);
if (rc < 0) {
idxd_free_desc(wq, desc);
if (rc < 0)
return rc;
}
return cookie;
}
......@@ -245,7 +247,7 @@ int idxd_register_dma_channel(struct idxd_wq *wq)
wq->idxd_chan = idxd_chan;
idxd_chan->wq = wq;
get_device(&wq->conf_dev);
get_device(wq_confdev(wq));
return 0;
}
......@@ -260,5 +262,87 @@ void idxd_unregister_dma_channel(struct idxd_wq *wq)
list_del(&chan->device_node);
kfree(wq->idxd_chan);
wq->idxd_chan = NULL;
put_device(&wq->conf_dev);
put_device(wq_confdev(wq));
}
static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
{
struct device *dev = &idxd_dev->conf_dev;
struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
struct idxd_device *idxd = wq->idxd;
int rc;
if (idxd->state != IDXD_DEV_ENABLED)
return -ENXIO;
mutex_lock(&wq->wq_lock);
wq->type = IDXD_WQT_KERNEL;
rc = __drv_enable_wq(wq);
if (rc < 0) {
dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc);
rc = -ENXIO;
goto err;
}
rc = idxd_wq_alloc_resources(wq);
if (rc < 0) {
idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR;
dev_dbg(dev, "WQ resource alloc failed\n");
goto err_res_alloc;
}
rc = idxd_wq_init_percpu_ref(wq);
if (rc < 0) {
idxd->cmd_status = IDXD_SCMD_PERCPU_ERR;
dev_dbg(dev, "percpu_ref setup failed\n");
goto err_ref;
}
rc = idxd_register_dma_channel(wq);
if (rc < 0) {
idxd->cmd_status = IDXD_SCMD_DMA_CHAN_ERR;
dev_dbg(dev, "Failed to register dma channel\n");
goto err_dma;
}
idxd->cmd_status = 0;
mutex_unlock(&wq->wq_lock);
return 0;
err_dma:
idxd_wq_quiesce(wq);
err_ref:
idxd_wq_free_resources(wq);
err_res_alloc:
__drv_disable_wq(wq);
err:
wq->type = IDXD_WQT_NONE;
mutex_unlock(&wq->wq_lock);
return rc;
}
static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev)
{
struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
mutex_lock(&wq->wq_lock);
idxd_wq_quiesce(wq);
idxd_unregister_dma_channel(wq);
__drv_disable_wq(wq);
idxd_wq_free_resources(wq);
wq->type = IDXD_WQT_NONE;
mutex_unlock(&wq->wq_lock);
}
static enum idxd_dev_type dev_types[] = {
IDXD_DEV_WQ,
IDXD_DEV_NONE,
};
struct idxd_device_driver idxd_dmaengine_drv = {
.probe = idxd_dmaengine_drv_probe,
.remove = idxd_dmaengine_drv_remove,
.name = "dmaengine",
.type = dev_types,
};
EXPORT_SYMBOL_GPL(idxd_dmaengine_drv);
......@@ -11,14 +11,32 @@
#include <linux/idr.h>
#include <linux/pci.h>
#include <linux/perf_event.h>
#include <uapi/linux/idxd.h>
#include "registers.h"
#define IDXD_DRIVER_VERSION "1.00"
extern struct kmem_cache *idxd_desc_pool;
extern bool tc_override;
struct idxd_device;
struct idxd_wq;
struct idxd_dev;
enum idxd_dev_type {
IDXD_DEV_NONE = -1,
IDXD_DEV_DSA = 0,
IDXD_DEV_IAX,
IDXD_DEV_WQ,
IDXD_DEV_GROUP,
IDXD_DEV_ENGINE,
IDXD_DEV_CDEV,
IDXD_DEV_MAX_TYPE,
};
struct idxd_dev {
struct device conf_dev;
enum idxd_dev_type type;
};
#define IDXD_REG_TIMEOUT 50
#define IDXD_DRAIN_TIMEOUT 5000
......@@ -34,9 +52,18 @@ enum idxd_type {
#define IDXD_PMU_EVENT_MAX 64
struct idxd_device_driver {
const char *name;
enum idxd_dev_type *type;
int (*probe)(struct idxd_dev *idxd_dev);
void (*remove)(struct idxd_dev *idxd_dev);
struct device_driver drv;
};
extern struct idxd_device_driver dsa_drv;
extern struct idxd_device_driver idxd_drv;
extern struct idxd_device_driver idxd_dmaengine_drv;
extern struct idxd_device_driver idxd_user_drv;
struct idxd_irq_entry {
struct idxd_device *idxd;
int id;
......@@ -51,7 +78,7 @@ struct idxd_irq_entry {
};
struct idxd_group {
struct device conf_dev;
struct idxd_dev idxd_dev;
struct idxd_device *idxd;
struct grpcfg grpcfg;
int id;
......@@ -110,7 +137,7 @@ enum idxd_wq_type {
struct idxd_cdev {
struct idxd_wq *wq;
struct cdev cdev;
struct device dev;
struct idxd_dev idxd_dev;
int minor;
};
......@@ -136,9 +163,10 @@ struct idxd_dma_chan {
struct idxd_wq {
void __iomem *portal;
u32 portal_offset;
struct percpu_ref wq_active;
struct completion wq_dead;
struct device conf_dev;
struct idxd_dev idxd_dev;
struct idxd_cdev *idxd_cdev;
struct wait_queue_head err_queue;
struct idxd_device *idxd;
......@@ -153,7 +181,6 @@ struct idxd_wq {
enum idxd_wq_state state;
unsigned long flags;
union wqcfg *wqcfg;
u32 vec_ptr; /* interrupt steering */
struct dsa_hw_desc **hw_descs;
int num_descs;
union {
......@@ -174,7 +201,7 @@ struct idxd_wq {
};
struct idxd_engine {
struct device conf_dev;
struct idxd_dev idxd_dev;
int id;
struct idxd_group *group;
struct idxd_device *idxd;
......@@ -194,7 +221,6 @@ struct idxd_hw {
enum idxd_device_state {
IDXD_DEV_HALTED = -1,
IDXD_DEV_DISABLED = 0,
IDXD_DEV_CONF_READY,
IDXD_DEV_ENABLED,
};
......@@ -218,7 +244,7 @@ struct idxd_driver_data {
};
struct idxd_device {
struct device conf_dev;
struct idxd_dev idxd_dev;
struct idxd_driver_data *data;
struct list_head list;
struct idxd_hw hw;
......@@ -226,7 +252,7 @@ struct idxd_device {
unsigned long flags;
int id;
int major;
u8 cmd_status;
u32 cmd_status;
struct pci_dev *pdev;
void __iomem *reg_base;
......@@ -290,7 +316,6 @@ struct idxd_desc {
struct list_head list;
int id;
int cpu;
unsigned int vector;
struct idxd_wq *wq;
};
......@@ -302,11 +327,62 @@ enum idxd_completion_status {
IDXD_COMP_DESC_ABORT = 0xff,
};
#define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
#define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)
#define idxd_confdev(idxd) &idxd->idxd_dev.conf_dev
#define wq_confdev(wq) &wq->idxd_dev.conf_dev
#define engine_confdev(engine) &engine->idxd_dev.conf_dev
#define group_confdev(group) &group->idxd_dev.conf_dev
#define cdev_dev(cdev) &cdev->idxd_dev.conf_dev
#define confdev_to_idxd_dev(dev) container_of(dev, struct idxd_dev, conf_dev)
#define idxd_dev_to_idxd(idxd_dev) container_of(idxd_dev, struct idxd_device, idxd_dev)
#define idxd_dev_to_wq(idxd_dev) container_of(idxd_dev, struct idxd_wq, idxd_dev)
static inline struct idxd_device *confdev_to_idxd(struct device *dev)
{
struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
return idxd_dev_to_idxd(idxd_dev);
}
static inline struct idxd_wq *confdev_to_wq(struct device *dev)
{
struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
return idxd_dev_to_wq(idxd_dev);
}
static inline struct idxd_engine *confdev_to_engine(struct device *dev)
{
struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
return container_of(idxd_dev, struct idxd_engine, idxd_dev);
}
static inline struct idxd_group *confdev_to_group(struct device *dev)
{
struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
return container_of(idxd_dev, struct idxd_group, idxd_dev);
}
static inline struct idxd_cdev *dev_to_cdev(struct device *dev)
{
struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
return container_of(idxd_dev, struct idxd_cdev, idxd_dev);
}
static inline void idxd_dev_set_type(struct idxd_dev *idev, int type)
{
if (type >= IDXD_DEV_MAX_TYPE) {
idev->type = IDXD_DEV_NONE;
return;
}
idev->type = type;
}
extern struct bus_type dsa_bus_type;
extern struct bus_type iax_bus_type;
extern bool support_enqcmd;
extern struct ida idxd_ida;
......@@ -316,24 +392,24 @@ extern struct device_type idxd_wq_device_type;
extern struct device_type idxd_engine_device_type;
extern struct device_type idxd_group_device_type;
static inline bool is_dsa_dev(struct device *dev)
static inline bool is_dsa_dev(struct idxd_dev *idxd_dev)
{
return dev->type == &dsa_device_type;
return idxd_dev->type == IDXD_DEV_DSA;
}
static inline bool is_iax_dev(struct device *dev)
static inline bool is_iax_dev(struct idxd_dev *idxd_dev)
{
return dev->type == &iax_device_type;
return idxd_dev->type == IDXD_DEV_IAX;
}
static inline bool is_idxd_dev(struct device *dev)
static inline bool is_idxd_dev(struct idxd_dev *idxd_dev)
{
return is_dsa_dev(dev) || is_iax_dev(dev);
return is_dsa_dev(idxd_dev) || is_iax_dev(idxd_dev);
}
static inline bool is_idxd_wq_dev(struct device *dev)
static inline bool is_idxd_wq_dev(struct idxd_dev *idxd_dev)
{
return dev->type == &idxd_wq_device_type;
return idxd_dev->type == IDXD_DEV_WQ;
}
static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
......@@ -343,11 +419,16 @@ static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
return false;
}
static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
static inline bool is_idxd_wq_user(struct idxd_wq *wq)
{
return wq->type == IDXD_WQT_USER;
}
static inline bool is_idxd_wq_kernel(struct idxd_wq *wq)
{
return wq->type == IDXD_WQT_KERNEL;
}
static inline bool wq_dedicated(struct idxd_wq *wq)
{
return test_bit(WQ_FLAG_DEDICATED, &wq->flags);
......@@ -389,6 +470,24 @@ static inline int idxd_get_wq_portal_full_offset(int wq_id,
return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot);
}
#define IDXD_PORTAL_MASK (PAGE_SIZE - 1)
/*
* Even though this function can be accessed by multiple threads, it is safe to use.
* At worst the address gets used more than once before it gets incremented. We don't
* hit a threshold until iops becomes many million times a second. So the occasional
* reuse of the same address is tolerable compare to using an atomic variable. This is
* safe on a system that has atomic load/store for 32bit integers. Given that this is an
* Intel iEP device, that should not be a problem.
*/
static inline void __iomem *idxd_wq_portal_addr(struct idxd_wq *wq)
{
int ofs = wq->portal_offset;
wq->portal_offset = (ofs + sizeof(struct dsa_raw_desc)) & IDXD_PORTAL_MASK;
return wq->portal + ofs;
}
static inline void idxd_wq_get(struct idxd_wq *wq)
{
wq->client_count++;
......@@ -404,6 +503,16 @@ static inline int idxd_wq_refcount(struct idxd_wq *wq)
return wq->client_count;
};
int __must_check __idxd_driver_register(struct idxd_device_driver *idxd_drv,
struct module *module, const char *mod_name);
#define idxd_driver_register(driver) \
__idxd_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
void idxd_driver_unregister(struct idxd_device_driver *idxd_drv);
#define module_idxd_driver(__idxd_driver) \
module_driver(__idxd_driver, idxd_driver_register, idxd_driver_unregister)
int idxd_register_bus_type(void);
void idxd_unregister_bus_type(void);
int idxd_register_devices(struct idxd_device *idxd);
......@@ -424,13 +533,20 @@ void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
/* device control */
int idxd_register_idxd_drv(void);
void idxd_unregister_idxd_drv(void);
int idxd_device_drv_probe(struct idxd_dev *idxd_dev);
void idxd_device_drv_remove(struct idxd_dev *idxd_dev);
int drv_enable_wq(struct idxd_wq *wq);
int __drv_enable_wq(struct idxd_wq *wq);
void drv_disable_wq(struct idxd_wq *wq);
void __drv_disable_wq(struct idxd_wq *wq);
int idxd_device_init_reset(struct idxd_device *idxd);
int idxd_device_enable(struct idxd_device *idxd);
int idxd_device_disable(struct idxd_device *idxd);
void idxd_device_reset(struct idxd_device *idxd);
void idxd_device_cleanup(struct idxd_device *idxd);
void idxd_device_clear_state(struct idxd_device *idxd);
int idxd_device_config(struct idxd_device *idxd);
void idxd_device_wqs_clear_state(struct idxd_device *idxd);
void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid);
int idxd_device_load_config(struct idxd_device *idxd);
int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle,
......@@ -443,12 +559,11 @@ void idxd_wqs_unmap_portal(struct idxd_device *idxd);
int idxd_wq_alloc_resources(struct idxd_wq *wq);
void idxd_wq_free_resources(struct idxd_wq *wq);
int idxd_wq_enable(struct idxd_wq *wq);
int idxd_wq_disable(struct idxd_wq *wq);
int idxd_wq_disable(struct idxd_wq *wq, bool reset_config);
void idxd_wq_drain(struct idxd_wq *wq);
void idxd_wq_reset(struct idxd_wq *wq);
int idxd_wq_map_portal(struct idxd_wq *wq);
void idxd_wq_unmap_portal(struct idxd_wq *wq);
void idxd_wq_disable_cleanup(struct idxd_wq *wq);
int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid);
int idxd_wq_disable_pasid(struct idxd_wq *wq);
void idxd_wq_quiesce(struct idxd_wq *wq);
......
......@@ -26,11 +26,16 @@
MODULE_VERSION(IDXD_DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");
MODULE_IMPORT_NS(IDXD);
static bool sva = true;
module_param(sva, bool, 0644);
MODULE_PARM_DESC(sva, "Toggle SVA support on/off");
bool tc_override;
module_param(tc_override, bool, 0644);
MODULE_PARM_DESC(tc_override, "Override traffic class defaults");
#define DRV_NAME "idxd"
bool support_enqcmd;
......@@ -200,6 +205,7 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
struct idxd_wq *wq;
struct device *conf_dev;
int i, rc;
idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
......@@ -214,15 +220,17 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
goto err;
}
idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ);
conf_dev = wq_confdev(wq);
wq->id = i;
wq->idxd = idxd;
device_initialize(&wq->conf_dev);
wq->conf_dev.parent = &idxd->conf_dev;
wq->conf_dev.bus = &dsa_bus_type;
wq->conf_dev.type = &idxd_wq_device_type;
rc = dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
device_initialize(wq_confdev(wq));
conf_dev->parent = idxd_confdev(idxd);
conf_dev->bus = &dsa_bus_type;
conf_dev->type = &idxd_wq_device_type;
rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id);
if (rc < 0) {
put_device(&wq->conf_dev);
put_device(conf_dev);
goto err;
}
......@@ -233,7 +241,7 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
wq->max_batch_size = idxd->max_batch_size;
wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
if (!wq->wqcfg) {
put_device(&wq->conf_dev);
put_device(conf_dev);
rc = -ENOMEM;
goto err;
}
......@@ -243,8 +251,11 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
return 0;
err:
while (--i >= 0)
put_device(&idxd->wqs[i]->conf_dev);
while (--i >= 0) {
wq = idxd->wqs[i];
conf_dev = wq_confdev(wq);
put_device(conf_dev);
}
return rc;
}
......@@ -252,6 +263,7 @@ static int idxd_setup_engines(struct idxd_device *idxd)
{
struct idxd_engine *engine;
struct device *dev = &idxd->pdev->dev;
struct device *conf_dev;
int i, rc;
idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *),
......@@ -266,15 +278,17 @@ static int idxd_setup_engines(struct idxd_device *idxd)
goto err;
}
idxd_dev_set_type(&engine->idxd_dev, IDXD_DEV_ENGINE);
conf_dev = engine_confdev(engine);
engine->id = i;
engine->idxd = idxd;
device_initialize(&engine->conf_dev);
engine->conf_dev.parent = &idxd->conf_dev;
engine->conf_dev.bus = &dsa_bus_type;
engine->conf_dev.type = &idxd_engine_device_type;
rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id);
device_initialize(conf_dev);
conf_dev->parent = idxd_confdev(idxd);
conf_dev->bus = &dsa_bus_type;
conf_dev->type = &idxd_engine_device_type;
rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id);
if (rc < 0) {
put_device(&engine->conf_dev);
put_device(conf_dev);
goto err;
}
......@@ -284,14 +298,18 @@ static int idxd_setup_engines(struct idxd_device *idxd)
return 0;
err:
while (--i >= 0)
put_device(&idxd->engines[i]->conf_dev);
while (--i >= 0) {
engine = idxd->engines[i];
conf_dev = engine_confdev(engine);
put_device(conf_dev);
}
return rc;
}
static int idxd_setup_groups(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
struct device *conf_dev;
struct idxd_group *group;
int i, rc;
......@@ -307,28 +325,37 @@ static int idxd_setup_groups(struct idxd_device *idxd)
goto err;
}
idxd_dev_set_type(&group->idxd_dev, IDXD_DEV_GROUP);
conf_dev = group_confdev(group);
group->id = i;
group->idxd = idxd;
device_initialize(&group->conf_dev);
group->conf_dev.parent = &idxd->conf_dev;
group->conf_dev.bus = &dsa_bus_type;
group->conf_dev.type = &idxd_group_device_type;
rc = dev_set_name(&group->conf_dev, "group%d.%d", idxd->id, group->id);
device_initialize(conf_dev);
conf_dev->parent = idxd_confdev(idxd);
conf_dev->bus = &dsa_bus_type;
conf_dev->type = &idxd_group_device_type;
rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id);
if (rc < 0) {
put_device(&group->conf_dev);
put_device(conf_dev);
goto err;
}
idxd->groups[i] = group;
if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) {
group->tc_a = 1;
group->tc_b = 1;
} else {
group->tc_a = -1;
group->tc_b = -1;
}
}
return 0;
err:
while (--i >= 0)
put_device(&idxd->groups[i]->conf_dev);
while (--i >= 0) {
group = idxd->groups[i];
put_device(group_confdev(group));
}
return rc;
}
......@@ -337,11 +364,11 @@ static void idxd_cleanup_internals(struct idxd_device *idxd)
int i;
for (i = 0; i < idxd->max_groups; i++)
put_device(&idxd->groups[i]->conf_dev);
put_device(group_confdev(idxd->groups[i]));
for (i = 0; i < idxd->max_engines; i++)
put_device(&idxd->engines[i]->conf_dev);
put_device(engine_confdev(idxd->engines[i]));
for (i = 0; i < idxd->max_wqs; i++)
put_device(&idxd->wqs[i]->conf_dev);
put_device(wq_confdev(idxd->wqs[i]));
destroy_workqueue(idxd->wq);
}
......@@ -381,13 +408,13 @@ static int idxd_setup_internals(struct idxd_device *idxd)
err_wkq_create:
for (i = 0; i < idxd->max_groups; i++)
put_device(&idxd->groups[i]->conf_dev);
put_device(group_confdev(idxd->groups[i]));
err_group:
for (i = 0; i < idxd->max_engines; i++)
put_device(&idxd->engines[i]->conf_dev);
put_device(engine_confdev(idxd->engines[i]));
err_engine:
for (i = 0; i < idxd->max_wqs; i++)
put_device(&idxd->wqs[i]->conf_dev);
put_device(wq_confdev(idxd->wqs[i]));
err_wqs:
kfree(idxd->int_handles);
return rc;
......@@ -469,6 +496,7 @@ static void idxd_read_caps(struct idxd_device *idxd)
static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data)
{
struct device *dev = &pdev->dev;
struct device *conf_dev;
struct idxd_device *idxd;
int rc;
......@@ -476,19 +504,21 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_d
if (!idxd)
return NULL;
conf_dev = idxd_confdev(idxd);
idxd->pdev = pdev;
idxd->data = data;
idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type);
idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL);
if (idxd->id < 0)
return NULL;
device_initialize(&idxd->conf_dev);
idxd->conf_dev.parent = dev;
idxd->conf_dev.bus = &dsa_bus_type;
idxd->conf_dev.type = idxd->data->dev_type;
rc = dev_set_name(&idxd->conf_dev, "%s%d", idxd->data->name_prefix, idxd->id);
device_initialize(conf_dev);
conf_dev->parent = dev;
conf_dev->bus = &dsa_bus_type;
conf_dev->type = idxd->data->dev_type;
rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id);
if (rc < 0) {
put_device(&idxd->conf_dev);
put_device(conf_dev);
return NULL;
}
......@@ -639,15 +669,9 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
dev_dbg(dev, "Set DMA masks\n");
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc)
goto err;
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (rc)
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc)
goto err;
......@@ -668,8 +692,6 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_dev_register;
}
idxd->state = IDXD_DEV_CONF_READY;
dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
idxd->hw.version);
......@@ -680,7 +702,7 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err:
pci_iounmap(pdev, idxd->reg_base);
err_iomap:
put_device(&idxd->conf_dev);
put_device(idxd_confdev(idxd));
err_idxd_alloc:
pci_disable_device(pdev);
return rc;
......@@ -793,7 +815,7 @@ static void idxd_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
destroy_workqueue(idxd->wq);
perfmon_pmu_remove(idxd);
device_unregister(&idxd->conf_dev);
device_unregister(idxd_confdev(idxd));
}
static struct pci_driver idxd_pci_driver = {
......@@ -824,13 +846,17 @@ static int __init idxd_init_module(void)
perfmon_init();
err = idxd_register_bus_type();
err = idxd_driver_register(&idxd_drv);
if (err < 0)
return err;
goto err_idxd_driver_register;
err = idxd_register_driver();
err = idxd_driver_register(&idxd_dmaengine_drv);
if (err < 0)
goto err_idxd_driver_register;
goto err_idxd_dmaengine_driver_register;
err = idxd_driver_register(&idxd_user_drv);
if (err < 0)
goto err_idxd_user_driver_register;
err = idxd_cdev_register();
if (err)
......@@ -845,19 +871,23 @@ static int __init idxd_init_module(void)
err_pci_register:
idxd_cdev_remove();
err_cdev_register:
idxd_unregister_driver();
idxd_driver_unregister(&idxd_user_drv);
err_idxd_user_driver_register:
idxd_driver_unregister(&idxd_dmaengine_drv);
err_idxd_dmaengine_driver_register:
idxd_driver_unregister(&idxd_drv);
err_idxd_driver_register:
idxd_unregister_bus_type();
return err;
}
module_init(idxd_init_module);
static void __exit idxd_exit_module(void)
{
idxd_unregister_driver();
idxd_driver_unregister(&idxd_user_drv);
idxd_driver_unregister(&idxd_dmaengine_drv);
idxd_driver_unregister(&idxd_drv);
pci_unregister_driver(&idxd_pci_driver);
idxd_cdev_remove();
idxd_unregister_bus_type();
perfmon_exit();
}
module_exit(idxd_exit_module);
......@@ -22,13 +22,6 @@ struct idxd_fault {
struct idxd_device *idxd;
};
static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
enum irq_work_type wtype,
int *processed, u64 data);
static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
enum irq_work_type wtype,
int *processed, u64 data);
static void idxd_device_reinit(struct work_struct *work)
{
struct idxd_device *idxd = container_of(work, struct idxd_device, work);
......@@ -51,7 +44,7 @@ static void idxd_device_reinit(struct work_struct *work)
rc = idxd_wq_enable(wq);
if (rc < 0) {
dev_warn(dev, "Unable to re-enable wq %s\n",
dev_name(&wq->conf_dev));
dev_name(wq_confdev(wq)));
}
}
}
......@@ -59,47 +52,7 @@ static void idxd_device_reinit(struct work_struct *work)
return;
out:
idxd_device_wqs_clear_state(idxd);
}
static void idxd_device_fault_work(struct work_struct *work)
{
struct idxd_fault *fault = container_of(work, struct idxd_fault, work);
struct idxd_irq_entry *ie;
int i;
int processed;
int irqcnt = fault->idxd->num_wq_irqs + 1;
for (i = 1; i < irqcnt; i++) {
ie = &fault->idxd->irq_entries[i];
irq_process_work_list(ie, IRQ_WORK_PROCESS_FAULT,
&processed, fault->addr);
if (processed)
break;
irq_process_pending_llist(ie, IRQ_WORK_PROCESS_FAULT,
&processed, fault->addr);
if (processed)
break;
}
kfree(fault);
}
static int idxd_device_schedule_fault_process(struct idxd_device *idxd,
u64 fault_addr)
{
struct idxd_fault *fault;
fault = kmalloc(sizeof(*fault), GFP_ATOMIC);
if (!fault)
return -ENOMEM;
fault->addr = fault_addr;
fault->idxd = idxd;
INIT_WORK(&fault->work, idxd_device_fault_work);
queue_work(idxd->wq, &fault->work);
return 0;
idxd_device_clear_state(idxd);
}
static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
......@@ -111,7 +64,7 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
bool err = false;
if (cause & IDXD_INTC_ERR) {
spin_lock_bh(&idxd->dev_lock);
spin_lock(&idxd->dev_lock);
for (i = 0; i < 4; i++)
idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
IDXD_SWERR_OFFSET + i * sizeof(u64));
......@@ -136,7 +89,7 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
}
}
spin_unlock_bh(&idxd->dev_lock);
spin_unlock(&idxd->dev_lock);
val |= IDXD_INTC_ERR;
for (i = 0; i < 4; i++)
......@@ -168,15 +121,6 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
if (!err)
return 0;
/*
* This case should rarely happen and typically is due to software
* programming error by the driver.
*/
if (idxd->sw_err.valid &&
idxd->sw_err.desc_valid &&
idxd->sw_err.fault_addr)
idxd_device_schedule_fault_process(idxd, idxd->sw_err.fault_addr);
gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
if (gensts.state == IDXD_DEVICE_STATE_HALT) {
idxd->state = IDXD_DEV_HALTED;
......@@ -189,15 +133,15 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
INIT_WORK(&idxd->work, idxd_device_reinit);
queue_work(idxd->wq, &idxd->work);
} else {
spin_lock_bh(&idxd->dev_lock);
spin_lock(&idxd->dev_lock);
idxd_wqs_quiesce(idxd);
idxd_wqs_unmap_portal(idxd);
idxd_device_wqs_clear_state(idxd);
idxd_device_clear_state(idxd);
dev_err(&idxd->pdev->dev,
"idxd halted, need %s.\n",
gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
"FLR" : "system reset");
spin_unlock_bh(&idxd->dev_lock);
spin_unlock(&idxd->dev_lock);
return -ENXIO;
}
}
......@@ -228,127 +172,79 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
return IRQ_HANDLED;
}
static inline bool match_fault(struct idxd_desc *desc, u64 fault_addr)
{
/*
* Completion address can be bad as well. Check fault address match for descriptor
* and completion address.
*/
if ((u64)desc->hw == fault_addr || (u64)desc->completion == fault_addr) {
struct idxd_device *idxd = desc->wq->idxd;
struct device *dev = &idxd->pdev->dev;
dev_warn(dev, "desc with fault address: %#llx\n", fault_addr);
return true;
}
return false;
}
static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
enum irq_work_type wtype,
int *processed, u64 data)
static void irq_process_pending_llist(struct idxd_irq_entry *irq_entry)
{
struct idxd_desc *desc, *t;
struct llist_node *head;
int queued = 0;
unsigned long flags;
enum idxd_complete_type reason;
*processed = 0;
head = llist_del_all(&irq_entry->pending_llist);
if (!head)
goto out;
if (wtype == IRQ_WORK_NORMAL)
reason = IDXD_COMPLETE_NORMAL;
else
reason = IDXD_COMPLETE_DEV_FAIL;
return;
llist_for_each_entry_safe(desc, t, head, llnode) {
u8 status = desc->completion->status & DSA_COMP_STATUS_MASK;
if (status) {
if (unlikely(status == IDXD_COMP_DESC_ABORT)) {
/*
* Check against the original status as ABORT is software defined
* and 0xff, which DSA_COMP_STATUS_MASK can mask out.
*/
if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
complete_desc(desc, IDXD_COMPLETE_ABORT);
(*processed)++;
continue;
}
if (unlikely(status != DSA_COMP_SUCCESS))
match_fault(desc, data);
complete_desc(desc, reason);
(*processed)++;
complete_desc(desc, IDXD_COMPLETE_NORMAL);
} else {
spin_lock_irqsave(&irq_entry->list_lock, flags);
spin_lock(&irq_entry->list_lock);
list_add_tail(&desc->list,
&irq_entry->work_list);
spin_unlock_irqrestore(&irq_entry->list_lock, flags);
queued++;
spin_unlock(&irq_entry->list_lock);
}
}
out:
return queued;
}
static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
enum irq_work_type wtype,
int *processed, u64 data)
static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
{
int queued = 0;
unsigned long flags;
LIST_HEAD(flist);
struct idxd_desc *desc, *n;
enum idxd_complete_type reason;
*processed = 0;
if (wtype == IRQ_WORK_NORMAL)
reason = IDXD_COMPLETE_NORMAL;
else
reason = IDXD_COMPLETE_DEV_FAIL;
/*
* This lock protects list corruption from access of list outside of the irq handler
* thread.
*/
spin_lock_irqsave(&irq_entry->list_lock, flags);
spin_lock(&irq_entry->list_lock);
if (list_empty(&irq_entry->work_list)) {
spin_unlock_irqrestore(&irq_entry->list_lock, flags);
return 0;
spin_unlock(&irq_entry->list_lock);
return;
}
list_for_each_entry_safe(desc, n, &irq_entry->work_list, list) {
if (desc->completion->status) {
list_del(&desc->list);
(*processed)++;
list_add_tail(&desc->list, &flist);
} else {
queued++;
}
}
spin_unlock_irqrestore(&irq_entry->list_lock, flags);
spin_unlock(&irq_entry->list_lock);
list_for_each_entry(desc, &flist, list) {
u8 status = desc->completion->status & DSA_COMP_STATUS_MASK;
if (unlikely(status == IDXD_COMP_DESC_ABORT)) {
/*
* Check against the original status as ABORT is software defined
* and 0xff, which DSA_COMP_STATUS_MASK can mask out.
*/
if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
complete_desc(desc, IDXD_COMPLETE_ABORT);
continue;
}
if (unlikely(status != DSA_COMP_SUCCESS))
match_fault(desc, data);
complete_desc(desc, reason);
complete_desc(desc, IDXD_COMPLETE_NORMAL);
}
return queued;
}
static int idxd_desc_process(struct idxd_irq_entry *irq_entry)
irqreturn_t idxd_wq_thread(int irq, void *data)
{
int rc, processed, total = 0;
struct idxd_irq_entry *irq_entry = data;
/*
* There are two lists we are processing. The pending_llist is where
......@@ -367,31 +263,9 @@ static int idxd_desc_process(struct idxd_irq_entry *irq_entry)
* and process the completed entries.
* 4. If the entry is still waiting on hardware, list_add_tail() to
* the work_list.
* 5. Repeat until no more descriptors.
*/
do {
rc = irq_process_work_list(irq_entry, IRQ_WORK_NORMAL,
&processed, 0);
total += processed;
if (rc != 0)
continue;
rc = irq_process_pending_llist(irq_entry, IRQ_WORK_NORMAL,
&processed, 0);
total += processed;
} while (rc != 0);
return total;
}
irqreturn_t idxd_wq_thread(int irq, void *data)
{
struct idxd_irq_entry *irq_entry = data;
int processed;
processed = idxd_desc_process(irq_entry);
if (processed == 0)
return IRQ_NONE;
irq_process_work_list(irq_entry);
irq_process_pending_llist(irq_entry);
return IRQ_HANDLED;
}
......@@ -7,6 +7,9 @@
#define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25
#define PCI_DEVICE_ID_INTEL_IAX_SPR0 0x0cfe
#define DEVICE_VERSION_1 0x100
#define DEVICE_VERSION_2 0x200
#define IDXD_MMIO_BAR 0
#define IDXD_WQ_BAR 2
#define IDXD_PORTAL_SIZE PAGE_SIZE
......@@ -349,6 +352,9 @@ union wqcfg {
} __packed;
#define WQCFG_PASID_IDX 2
#define WQCFG_OCCUP_IDX 6
#define WQCFG_OCCUP_MASK 0xffff
/*
* This macro calculates the offset into the WQCFG register
......
......@@ -22,21 +22,13 @@ static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu)
desc->hw->pasid = idxd->pasid;
/*
* Descriptor completion vectors are 1...N for MSIX. We will round
* robin through the N vectors.
* On host, MSIX vecotr 0 is used for misc interrupt. Therefore when we match
* vector 1:1 to the WQ id, we need to add 1
*/
wq->vec_ptr = desc->vector = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
if (!idxd->int_handles) {
desc->hw->int_handle = wq->vec_ptr;
} else {
/*
* int_handles are only for descriptor completion. However for device
* MSIX enumeration, vec 0 is used for misc interrupts. Therefore even
* though we are rotating through 1...N for descriptor interrupts, we
* need to acqurie the int_handles from 0..N-1.
*/
desc->hw->int_handle = idxd->int_handles[desc->vector - 1];
}
if (!idxd->int_handles)
desc->hw->int_handle = wq->id + 1;
else
desc->hw->int_handle = idxd->int_handles[wq->id];
return desc;
}
......@@ -67,7 +59,7 @@ struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype)
if (signal_pending_state(TASK_INTERRUPTIBLE, current))
break;
idx = sbitmap_queue_get(sbq, &cpu);
if (idx > 0)
if (idx >= 0)
break;
schedule();
}
......@@ -114,14 +106,13 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
{
struct idxd_desc *d, *t, *found = NULL;
struct llist_node *head;
unsigned long flags;
desc->completion->status = IDXD_COMP_DESC_ABORT;
/*
* Grab the list lock so it will block the irq thread handler. This allows the
* abort code to locate the descriptor need to be aborted.
*/
spin_lock_irqsave(&ie->list_lock, flags);
spin_lock(&ie->list_lock);
head = llist_del_all(&ie->pending_llist);
if (head) {
llist_for_each_entry_safe(d, t, head, llnode) {
......@@ -135,7 +126,7 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
if (!found)
found = list_abort_desc(wq, ie, desc);
spin_unlock_irqrestore(&ie->list_lock, flags);
spin_unlock(&ie->list_lock);
if (found)
complete_desc(found, IDXD_COMPLETE_ABORT);
......@@ -148,13 +139,17 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
void __iomem *portal;
int rc;
if (idxd->state != IDXD_DEV_ENABLED)
if (idxd->state != IDXD_DEV_ENABLED) {
idxd_free_desc(wq, desc);
return -EIO;
}
if (!percpu_ref_tryget_live(&wq->wq_active))
if (!percpu_ref_tryget_live(&wq->wq_active)) {
idxd_free_desc(wq, desc);
return -ENXIO;
}
portal = wq->portal;
portal = idxd_wq_portal_addr(wq);
/*
* The wmb() flushes writes to coherent DMA data before
......@@ -168,7 +163,7 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
* that we designated the descriptor to.
*/
if (desc->hw->flags & IDXD_OP_FLAG_RCI) {
ie = &idxd->irq_entries[desc->vector];
ie = &idxd->irq_entries[wq->id + 1];
llist_add(&desc->llnode, &ie->pending_llist);
}
......@@ -183,8 +178,12 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
*/
rc = enqcmds(portal, desc->hw);
if (rc < 0) {
percpu_ref_put(&wq->wq_active);
/* abort operation frees the descriptor */
if (ie)
llist_abort_desc(wq, ie, desc);
else
idxd_free_desc(wq, desc);
return rc;
}
}
......
This diff is collapsed.
......@@ -4319,6 +4319,7 @@ static ssize_t enable_store(struct device_driver *dev, const char *buf,
size_t count)
{
unsigned long val;
int err;
if (!count || count > 11)
return -EINVAL;
......@@ -4327,7 +4328,10 @@ static ssize_t enable_store(struct device_driver *dev, const char *buf,
return -EFAULT;
/* Write a key */
sscanf(buf, "%lx", &val);
err = kstrtoul(buf, 16, &val);
if (err)
return err;
dcr_write(ppc440spe_mq_dcr_host, DCRN_MQ0_XORBA, val);
isync();
......@@ -4368,7 +4372,7 @@ static ssize_t poly_store(struct device_driver *dev, const char *buf,
size_t count)
{
unsigned long reg, val;
int err;
#ifdef CONFIG_440SP
/* 440SP uses default 0x14D polynomial only */
return -EINVAL;
......@@ -4378,7 +4382,9 @@ static ssize_t poly_store(struct device_driver *dev, const char *buf,
return -EINVAL;
/* e.g., 0x14D or 0x11D */
sscanf(buf, "%lx", &val);
err = kstrtoul(buf, 16, &val);
if (err)
return err;
if (val & ~0x1FF)
return -EINVAL;
......
# SPDX-License-Identifier: GPL-2.0-only
config AMD_PTDMA
tristate "AMD PassThru DMA Engine"
depends on X86_64 && PCI
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
Enable support for the AMD PTDMA controller. This controller
provides DMA capabilities to perform high bandwidth memory to
memory and IO copy operations. It performs DMA transfer through
queue-based descriptor management. This DMA controller is intended
to be used with AMD Non-Transparent Bridge devices and not for
general purpose peripheral DMA.
# SPDX-License-Identifier: GPL-2.0-only
#
# AMD Passthru DMA driver
#
obj-$(CONFIG_AMD_PTDMA) += ptdma.o
ptdma-objs := ptdma-dev.o ptdma-dmaengine.o ptdma-debugfs.o
ptdma-$(CONFIG_PCI) += ptdma-pci.o
// SPDX-License-Identifier: GPL-2.0-only
/*
* AMD Passthrough DMA device driver
* -- Based on the CCP driver
*
* Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
*
* Author: Sanjay R Mehta <sanju.mehta@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
*/
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include "ptdma.h"
/* DebugFS helpers */
#define RI_VERSION_NUM 0x0000003F
#define RI_NUM_VQM 0x00078000
#define RI_NVQM_SHIFT 15
static int pt_debugfs_info_show(struct seq_file *s, void *p)
{
struct pt_device *pt = s->private;
unsigned int regval;
seq_printf(s, "Device name: %s\n", dev_name(pt->dev));
seq_printf(s, " # Queues: %d\n", 1);
seq_printf(s, " # Cmds: %d\n", pt->cmd_count);
regval = ioread32(pt->io_regs + CMD_PT_VERSION);
seq_printf(s, " Version: %d\n", regval & RI_VERSION_NUM);
seq_puts(s, " Engines:");
seq_puts(s, "\n");
seq_printf(s, " Queues: %d\n", (regval & RI_NUM_VQM) >> RI_NVQM_SHIFT);
return 0;
}
/*
* Return a formatted buffer containing the current
* statistics of queue for PTDMA
*/
static int pt_debugfs_stats_show(struct seq_file *s, void *p)
{
struct pt_device *pt = s->private;
seq_printf(s, "Total Interrupts Handled: %ld\n", pt->total_interrupts);
return 0;
}
static int pt_debugfs_queue_show(struct seq_file *s, void *p)
{
struct pt_cmd_queue *cmd_q = s->private;
unsigned int regval;
if (!cmd_q)
return 0;
seq_printf(s, " Pass-Thru: %ld\n", cmd_q->total_pt_ops);
regval = ioread32(cmd_q->reg_control + 0x000C);
seq_puts(s, " Enabled Interrupts:");
if (regval & INT_EMPTY_QUEUE)
seq_puts(s, " EMPTY");
if (regval & INT_QUEUE_STOPPED)
seq_puts(s, " STOPPED");
if (regval & INT_ERROR)
seq_puts(s, " ERROR");
if (regval & INT_COMPLETION)
seq_puts(s, " COMPLETION");
seq_puts(s, "\n");
return 0;
}
DEFINE_SHOW_ATTRIBUTE(pt_debugfs_info);
DEFINE_SHOW_ATTRIBUTE(pt_debugfs_queue);
DEFINE_SHOW_ATTRIBUTE(pt_debugfs_stats);
void ptdma_debugfs_setup(struct pt_device *pt)
{
struct pt_cmd_queue *cmd_q;
struct dentry *debugfs_q_instance;
if (!debugfs_initialized())
return;
debugfs_create_file("info", 0400, pt->dma_dev.dbg_dev_root, pt,
&pt_debugfs_info_fops);
debugfs_create_file("stats", 0400, pt->dma_dev.dbg_dev_root, pt,
&pt_debugfs_stats_fops);
cmd_q = &pt->cmd_q;
debugfs_q_instance =
debugfs_create_dir("q", pt->dma_dev.dbg_dev_root);
debugfs_create_file("stats", 0400, debugfs_q_instance, cmd_q,
&pt_debugfs_queue_fops);
}
// SPDX-License-Identifier: GPL-2.0-only
/*
* AMD Passthru DMA device driver
* -- Based on the CCP driver
*
* Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
*
* Author: Sanjay R Mehta <sanju.mehta@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
*/
#include <linux/bitfield.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include "ptdma.h"
/* Human-readable error strings */
static char *pt_error_codes[] = {
"",
"ERR 01: ILLEGAL_ENGINE",
"ERR 03: ILLEGAL_FUNCTION_TYPE",
"ERR 04: ILLEGAL_FUNCTION_MODE",
"ERR 06: ILLEGAL_FUNCTION_SIZE",
"ERR 08: ILLEGAL_FUNCTION_RSVD",
"ERR 09: ILLEGAL_BUFFER_LENGTH",
"ERR 10: VLSB_FAULT",
"ERR 11: ILLEGAL_MEM_ADDR",
"ERR 12: ILLEGAL_MEM_SEL",
"ERR 13: ILLEGAL_CONTEXT_ID",
"ERR 15: 0xF Reserved",
"ERR 18: CMD_TIMEOUT",
"ERR 19: IDMA0_AXI_SLVERR",
"ERR 20: IDMA0_AXI_DECERR",
"ERR 21: 0x15 Reserved",
"ERR 22: IDMA1_AXI_SLAVE_FAULT",
"ERR 23: IDMA1_AIXI_DECERR",
"ERR 24: 0x18 Reserved",
"ERR 27: 0x1B Reserved",
"ERR 38: ODMA0_AXI_SLVERR",
"ERR 39: ODMA0_AXI_DECERR",
"ERR 40: 0x28 Reserved",
"ERR 41: ODMA1_AXI_SLVERR",
"ERR 42: ODMA1_AXI_DECERR",
"ERR 43: LSB_PARITY_ERR",
};
static void pt_log_error(struct pt_device *d, int e)
{
dev_err(d->dev, "PTDMA error: %s (0x%x)\n", pt_error_codes[e], e);
}
void pt_start_queue(struct pt_cmd_queue *cmd_q)
{
/* Turn on the run bit */
iowrite32(cmd_q->qcontrol | CMD_Q_RUN, cmd_q->reg_control);
}
void pt_stop_queue(struct pt_cmd_queue *cmd_q)
{
/* Turn off the run bit */
iowrite32(cmd_q->qcontrol & ~CMD_Q_RUN, cmd_q->reg_control);
}
static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd_q)
{
bool soc = FIELD_GET(DWORD0_SOC, desc->dw0);
u8 *q_desc = (u8 *)&cmd_q->qbase[cmd_q->qidx];
u32 tail;
if (soc) {
desc->dw0 |= FIELD_PREP(DWORD0_IOC, desc->dw0);
desc->dw0 &= ~DWORD0_SOC;
}
mutex_lock(&cmd_q->q_mutex);
/* Copy 32-byte command descriptor to hw queue. */
memcpy(q_desc, desc, 32);
cmd_q->qidx = (cmd_q->qidx + 1) % CMD_Q_LEN;
/* The data used by this command must be flushed to memory */
wmb();
/* Write the new tail address back to the queue register */
tail = lower_32_bits(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE);
iowrite32(tail, cmd_q->reg_control + 0x0004);
/* Turn the queue back on using our cached control register */
pt_start_queue(cmd_q);
mutex_unlock(&cmd_q->q_mutex);
return 0;
}
int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q,
struct pt_passthru_engine *pt_engine)
{
struct ptdma_desc desc;
cmd_q->cmd_error = 0;
cmd_q->total_pt_ops++;
memset(&desc, 0, sizeof(desc));
desc.dw0 = CMD_DESC_DW0_VAL;
desc.length = pt_engine->src_len;
desc.src_lo = lower_32_bits(pt_engine->src_dma);
desc.dw3.src_hi = upper_32_bits(pt_engine->src_dma);
desc.dst_lo = lower_32_bits(pt_engine->dst_dma);
desc.dw5.dst_hi = upper_32_bits(pt_engine->dst_dma);
return pt_core_execute_cmd(&desc, cmd_q);
}
static inline void pt_core_disable_queue_interrupts(struct pt_device *pt)
{
iowrite32(0, pt->cmd_q.reg_control + 0x000C);
}
static inline void pt_core_enable_queue_interrupts(struct pt_device *pt)
{
iowrite32(SUPPORTED_INTERRUPTS, pt->cmd_q.reg_control + 0x000C);
}
static void pt_do_cmd_complete(unsigned long data)
{
struct pt_tasklet_data *tdata = (struct pt_tasklet_data *)data;
struct pt_cmd *cmd = tdata->cmd;
struct pt_cmd_queue *cmd_q = &cmd->pt->cmd_q;
u32 tail;
if (cmd_q->cmd_error) {
/*
* Log the error and flush the queue by
* moving the head pointer
*/
tail = lower_32_bits(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE);
pt_log_error(cmd_q->pt, cmd_q->cmd_error);
iowrite32(tail, cmd_q->reg_control + 0x0008);
}
cmd->pt_cmd_callback(cmd->data, cmd->ret);
}
static irqreturn_t pt_core_irq_handler(int irq, void *data)
{
struct pt_device *pt = data;
struct pt_cmd_queue *cmd_q = &pt->cmd_q;
u32 status;
pt_core_disable_queue_interrupts(pt);
pt->total_interrupts++;
status = ioread32(cmd_q->reg_control + 0x0010);
if (status) {
cmd_q->int_status = status;
cmd_q->q_status = ioread32(cmd_q->reg_control + 0x0100);
cmd_q->q_int_status = ioread32(cmd_q->reg_control + 0x0104);
/* On error, only save the first error value */
if ((status & INT_ERROR) && !cmd_q->cmd_error)
cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
/* Acknowledge the interrupt */
iowrite32(status, cmd_q->reg_control + 0x0010);
pt_core_enable_queue_interrupts(pt);
pt_do_cmd_complete((ulong)&pt->tdata);
}
return IRQ_HANDLED;
}
int pt_core_init(struct pt_device *pt)
{
char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
struct pt_cmd_queue *cmd_q = &pt->cmd_q;
u32 dma_addr_lo, dma_addr_hi;
struct device *dev = pt->dev;
struct dma_pool *dma_pool;
int ret;
/* Allocate a dma pool for the queue */
snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q", dev_name(pt->dev));
dma_pool = dma_pool_create(dma_pool_name, dev,
PT_DMAPOOL_MAX_SIZE,
PT_DMAPOOL_ALIGN, 0);
if (!dma_pool)
return -ENOMEM;
/* ptdma core initialisation */
iowrite32(CMD_CONFIG_VHB_EN, pt->io_regs + CMD_CONFIG_OFFSET);
iowrite32(CMD_QUEUE_PRIO, pt->io_regs + CMD_QUEUE_PRIO_OFFSET);
iowrite32(CMD_TIMEOUT_DISABLE, pt->io_regs + CMD_TIMEOUT_OFFSET);
iowrite32(CMD_CLK_GATE_CONFIG, pt->io_regs + CMD_CLK_GATE_CTL_OFFSET);
iowrite32(CMD_CONFIG_REQID, pt->io_regs + CMD_REQID_CONFIG_OFFSET);
cmd_q->pt = pt;
cmd_q->dma_pool = dma_pool;
mutex_init(&cmd_q->q_mutex);
/* Page alignment satisfies our needs for N <= 128 */
cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
cmd_q->qbase = dma_alloc_coherent(dev, cmd_q->qsize,
&cmd_q->qbase_dma,
GFP_KERNEL);
if (!cmd_q->qbase) {
dev_err(dev, "unable to allocate command queue\n");
ret = -ENOMEM;
goto e_dma_alloc;
}
cmd_q->qidx = 0;
/* Preset some register values */
cmd_q->reg_control = pt->io_regs + CMD_Q_STATUS_INCR;
/* Turn off the queues and disable interrupts until ready */
pt_core_disable_queue_interrupts(pt);
cmd_q->qcontrol = 0; /* Start with nothing */
iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
ioread32(cmd_q->reg_control + 0x0104);
ioread32(cmd_q->reg_control + 0x0100);
/* Clear the interrupt status */
iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
/* Request an irq */
ret = request_irq(pt->pt_irq, pt_core_irq_handler, 0, dev_name(pt->dev), pt);
if (ret)
goto e_pool;
/* Update the device registers with queue information. */
cmd_q->qcontrol &= ~CMD_Q_SIZE;
cmd_q->qcontrol |= FIELD_PREP(CMD_Q_SIZE, QUEUE_SIZE_VAL);
cmd_q->qdma_tail = cmd_q->qbase_dma;
dma_addr_lo = lower_32_bits(cmd_q->qdma_tail);
iowrite32((u32)dma_addr_lo, cmd_q->reg_control + 0x0004);
iowrite32((u32)dma_addr_lo, cmd_q->reg_control + 0x0008);
dma_addr_hi = upper_32_bits(cmd_q->qdma_tail);
cmd_q->qcontrol |= (dma_addr_hi << 16);
iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
pt_core_enable_queue_interrupts(pt);
/* Register the DMA engine support */
ret = pt_dmaengine_register(pt);
if (ret)
goto e_dmaengine;
/* Set up debugfs entries */
ptdma_debugfs_setup(pt);
return 0;
e_dmaengine:
free_irq(pt->pt_irq, pt);
e_dma_alloc:
dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma);
e_pool:
dev_err(dev, "unable to allocate an IRQ\n");
dma_pool_destroy(pt->cmd_q.dma_pool);
return ret;
}
void pt_core_destroy(struct pt_device *pt)
{
struct device *dev = pt->dev;
struct pt_cmd_queue *cmd_q = &pt->cmd_q;
struct pt_cmd *cmd;
/* Unregister the DMA engine */
pt_dmaengine_unregister(pt);
/* Disable and clear interrupts */
pt_core_disable_queue_interrupts(pt);
/* Turn off the run bit */
pt_stop_queue(cmd_q);
/* Clear the interrupt status */
iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
ioread32(cmd_q->reg_control + 0x0104);
ioread32(cmd_q->reg_control + 0x0100);
free_irq(pt->pt_irq, pt);
dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase,
cmd_q->qbase_dma);
/* Flush the cmd queue */
while (!list_empty(&pt->cmd)) {
/* Invoke the callback directly with an error code */
cmd = list_first_entry(&pt->cmd, struct pt_cmd, entry);
list_del(&cmd->entry);
cmd->pt_cmd_callback(cmd->data, -ENODEV);
}
}
// SPDX-License-Identifier: GPL-2.0-only
/*
* AMD Passthrough DMA device driver
* -- Based on the CCP driver
*
* Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
*
* Author: Sanjay R Mehta <sanju.mehta@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
*/
#include "ptdma.h"
#include "../dmaengine.h"
#include "../virt-dma.h"
static inline struct pt_dma_chan *to_pt_chan(struct dma_chan *dma_chan)
{
return container_of(dma_chan, struct pt_dma_chan, vc.chan);
}
static inline struct pt_dma_desc *to_pt_desc(struct virt_dma_desc *vd)
{
return container_of(vd, struct pt_dma_desc, vd);
}
static void pt_free_chan_resources(struct dma_chan *dma_chan)
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
vchan_free_chan_resources(&chan->vc);
}
static void pt_synchronize(struct dma_chan *dma_chan)
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
vchan_synchronize(&chan->vc);
}
static void pt_do_cleanup(struct virt_dma_desc *vd)
{
struct pt_dma_desc *desc = to_pt_desc(vd);
struct pt_device *pt = desc->pt;
kmem_cache_free(pt->dma_desc_cache, desc);
}
static int pt_dma_start_desc(struct pt_dma_desc *desc)
{
struct pt_passthru_engine *pt_engine;
struct pt_device *pt;
struct pt_cmd *pt_cmd;
struct pt_cmd_queue *cmd_q;
desc->issued_to_hw = 1;
pt_cmd = &desc->pt_cmd;
pt = pt_cmd->pt;
cmd_q = &pt->cmd_q;
pt_engine = &pt_cmd->passthru;
pt->tdata.cmd = pt_cmd;
/* Execute the command */
pt_cmd->ret = pt_core_perform_passthru(cmd_q, pt_engine);
return 0;
}
static struct pt_dma_desc *pt_next_dma_desc(struct pt_dma_chan *chan)
{
/* Get the next DMA descriptor on the active list */
struct virt_dma_desc *vd = vchan_next_desc(&chan->vc);
return vd ? to_pt_desc(vd) : NULL;
}
static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan,
struct pt_dma_desc *desc)
{
struct dma_async_tx_descriptor *tx_desc;
struct virt_dma_desc *vd;
unsigned long flags;
/* Loop over descriptors until one is found with commands */
do {
if (desc) {
if (!desc->issued_to_hw) {
/* No errors, keep going */
if (desc->status != DMA_ERROR)
return desc;
}
tx_desc = &desc->vd.tx;
vd = &desc->vd;
} else {
tx_desc = NULL;
}
spin_lock_irqsave(&chan->vc.lock, flags);
if (desc) {
if (desc->status != DMA_ERROR)
desc->status = DMA_COMPLETE;
dma_cookie_complete(tx_desc);
dma_descriptor_unmap(tx_desc);
list_del(&desc->vd.node);
}
desc = pt_next_dma_desc(chan);
spin_unlock_irqrestore(&chan->vc.lock, flags);
if (tx_desc) {
dmaengine_desc_get_callback_invoke(tx_desc, NULL);
dma_run_dependencies(tx_desc);
vchan_vdesc_fini(vd);
}
} while (desc);
return NULL;
}
static void pt_cmd_callback(void *data, int err)
{
struct pt_dma_desc *desc = data;
struct dma_chan *dma_chan;
struct pt_dma_chan *chan;
int ret;
if (err == -EINPROGRESS)
return;
dma_chan = desc->vd.tx.chan;
chan = to_pt_chan(dma_chan);
if (err)
desc->status = DMA_ERROR;
while (true) {
/* Check for DMA descriptor completion */
desc = pt_handle_active_desc(chan, desc);
/* Don't submit cmd if no descriptor or DMA is paused */
if (!desc)
break;
ret = pt_dma_start_desc(desc);
if (!ret)
break;
desc->status = DMA_ERROR;
}
}
static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan,
unsigned long flags)
{
struct pt_dma_desc *desc;
desc = kmem_cache_zalloc(chan->pt->dma_desc_cache, GFP_NOWAIT);
if (!desc)
return NULL;
vchan_tx_prep(&chan->vc, &desc->vd, flags);
desc->pt = chan->pt;
desc->issued_to_hw = 0;
desc->status = DMA_IN_PROGRESS;
return desc;
}
static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
dma_addr_t dst,
dma_addr_t src,
unsigned int len,
unsigned long flags)
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
struct pt_passthru_engine *pt_engine;
struct pt_dma_desc *desc;
struct pt_cmd *pt_cmd;
desc = pt_alloc_dma_desc(chan, flags);
if (!desc)
return NULL;
pt_cmd = &desc->pt_cmd;
pt_cmd->pt = chan->pt;
pt_engine = &pt_cmd->passthru;
pt_cmd->engine = PT_ENGINE_PASSTHRU;
pt_engine->src_dma = src;
pt_engine->dst_dma = dst;
pt_engine->src_len = len;
pt_cmd->pt_cmd_callback = pt_cmd_callback;
pt_cmd->data = desc;
desc->len = len;
return desc;
}
static struct dma_async_tx_descriptor *
pt_prep_dma_memcpy(struct dma_chan *dma_chan, dma_addr_t dst,
dma_addr_t src, size_t len, unsigned long flags)
{
struct pt_dma_desc *desc;
desc = pt_create_desc(dma_chan, dst, src, len, flags);
if (!desc)
return NULL;
return &desc->vd.tx;
}
static struct dma_async_tx_descriptor *
pt_prep_dma_interrupt(struct dma_chan *dma_chan, unsigned long flags)
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
struct pt_dma_desc *desc;
desc = pt_alloc_dma_desc(chan, flags);
if (!desc)
return NULL;
return &desc->vd.tx;
}
static void pt_issue_pending(struct dma_chan *dma_chan)
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
struct pt_dma_desc *desc;
unsigned long flags;
spin_lock_irqsave(&chan->vc.lock, flags);
vchan_issue_pending(&chan->vc);
desc = pt_next_dma_desc(chan);
spin_unlock_irqrestore(&chan->vc.lock, flags);
/* If there was nothing active, start processing */
if (desc)
pt_cmd_callback(desc, 0);
}
static int pt_pause(struct dma_chan *dma_chan)
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
unsigned long flags;
spin_lock_irqsave(&chan->vc.lock, flags);
pt_stop_queue(&chan->pt->cmd_q);
spin_unlock_irqrestore(&chan->vc.lock, flags);
return 0;
}
static int pt_resume(struct dma_chan *dma_chan)
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
struct pt_dma_desc *desc = NULL;
unsigned long flags;
spin_lock_irqsave(&chan->vc.lock, flags);
pt_start_queue(&chan->pt->cmd_q);
desc = pt_next_dma_desc(chan);
spin_unlock_irqrestore(&chan->vc.lock, flags);
/* If there was something active, re-start */
if (desc)
pt_cmd_callback(desc, 0);
return 0;
}
static int pt_terminate_all(struct dma_chan *dma_chan)
{
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
unsigned long flags;
LIST_HEAD(head);
spin_lock_irqsave(&chan->vc.lock, flags);
vchan_get_all_descriptors(&chan->vc, &head);
spin_unlock_irqrestore(&chan->vc.lock, flags);
vchan_dma_desc_free_list(&chan->vc, &head);
vchan_free_chan_resources(&chan->vc);
return 0;
}
int pt_dmaengine_register(struct pt_device *pt)
{
struct pt_dma_chan *chan;
struct dma_device *dma_dev = &pt->dma_dev;
char *cmd_cache_name;
char *desc_cache_name;
int ret;
pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan),
GFP_KERNEL);
if (!pt->pt_dma_chan)
return -ENOMEM;
cmd_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
"%s-dmaengine-cmd-cache",
dev_name(pt->dev));
if (!cmd_cache_name)
return -ENOMEM;
desc_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
"%s-dmaengine-desc-cache",
dev_name(pt->dev));
if (!desc_cache_name) {
ret = -ENOMEM;
goto err_cache;
}
pt->dma_desc_cache = kmem_cache_create(desc_cache_name,
sizeof(struct pt_dma_desc), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!pt->dma_desc_cache) {
ret = -ENOMEM;
goto err_cache;
}
dma_dev->dev = pt->dev;
dma_dev->src_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
dma_dev->dst_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
dma_dev->directions = DMA_MEM_TO_MEM;
dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
/*
* PTDMA is intended to be used with the AMD NTB devices, hence
* marking it as DMA_PRIVATE.
*/
dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
INIT_LIST_HEAD(&dma_dev->channels);
chan = pt->pt_dma_chan;
chan->pt = pt;
/* Set base and prep routines */
dma_dev->device_free_chan_resources = pt_free_chan_resources;
dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy;
dma_dev->device_prep_dma_interrupt = pt_prep_dma_interrupt;
dma_dev->device_issue_pending = pt_issue_pending;
dma_dev->device_tx_status = dma_cookie_status;
dma_dev->device_pause = pt_pause;
dma_dev->device_resume = pt_resume;
dma_dev->device_terminate_all = pt_terminate_all;
dma_dev->device_synchronize = pt_synchronize;
chan->vc.desc_free = pt_do_cleanup;
vchan_init(&chan->vc, dma_dev);
dma_set_mask_and_coherent(pt->dev, DMA_BIT_MASK(64));
ret = dma_async_device_register(dma_dev);
if (ret)
goto err_reg;
return 0;
err_reg:
kmem_cache_destroy(pt->dma_desc_cache);
err_cache:
kmem_cache_destroy(pt->dma_cmd_cache);
return ret;
}
void pt_dmaengine_unregister(struct pt_device *pt)
{
struct dma_device *dma_dev = &pt->dma_dev;
dma_async_device_unregister(dma_dev);
kmem_cache_destroy(pt->dma_desc_cache);
kmem_cache_destroy(pt->dma_cmd_cache);
}
This diff is collapsed.
This diff is collapsed.
......@@ -47,3 +47,12 @@ config RENESAS_USB_DMAC
help
This driver supports the USB-DMA controller found in the Renesas
SoCs.
config RZ_DMAC
tristate "Renesas RZ/G2L DMA Controller"
depends on ARCH_R9A07G044 || COMPILE_TEST
select RENESAS_DMA
select DMA_VIRTUAL_CHANNELS
help
This driver supports the general purpose DMA controller found in the
Renesas RZ/G2L SoC variants.
......@@ -15,3 +15,4 @@ obj-$(CONFIG_SH_DMAE) += shdma.o
obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o
obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o
obj-$(CONFIG_RZ_DMAC) += rz-dmac.o
This diff is collapsed.
......@@ -466,7 +466,7 @@ static int usb_dmac_chan_terminate_all(struct dma_chan *chan)
static unsigned int usb_dmac_get_current_residue(struct usb_dmac_chan *chan,
struct usb_dmac_desc *desc,
int sg_index)
unsigned int sg_index)
{
struct usb_dmac_sg *sg = desc->sg + sg_index;
u32 mem_addr = sg->mem_addr & 0xffffffff;
......
......@@ -1265,6 +1265,7 @@ static const struct of_device_id sprd_dma_match[] = {
{ .compatible = "sprd,sc9860-dma", },
{},
};
MODULE_DEVICE_TABLE(of, sprd_dma_match);
static int __maybe_unused sprd_dma_runtime_suspend(struct device *dev)
{
......
......@@ -60,6 +60,7 @@
#define STM32_DMA_SCR_PSIZE_GET(n) ((n & STM32_DMA_SCR_PSIZE_MASK) >> 11)
#define STM32_DMA_SCR_DIR_MASK GENMASK(7, 6)
#define STM32_DMA_SCR_DIR(n) ((n & 0x3) << 6)
#define STM32_DMA_SCR_TRBUFF BIT(20) /* Bufferable transfer for USART/UART */
#define STM32_DMA_SCR_CT BIT(19) /* Target in double buffer */
#define STM32_DMA_SCR_DBM BIT(18) /* Double Buffer Mode */
#define STM32_DMA_SCR_PINCOS BIT(15) /* Peripheral inc offset size */
......@@ -138,8 +139,9 @@
#define STM32_DMA_THRESHOLD_FTR_MASK GENMASK(1, 0)
#define STM32_DMA_THRESHOLD_FTR_GET(n) ((n) & STM32_DMA_THRESHOLD_FTR_MASK)
#define STM32_DMA_DIRECT_MODE_MASK BIT(2)
#define STM32_DMA_DIRECT_MODE_GET(n) (((n) & STM32_DMA_DIRECT_MODE_MASK) \
>> 2)
#define STM32_DMA_DIRECT_MODE_GET(n) (((n) & STM32_DMA_DIRECT_MODE_MASK) >> 2)
#define STM32_DMA_ALT_ACK_MODE_MASK BIT(4)
#define STM32_DMA_ALT_ACK_MODE_GET(n) (((n) & STM32_DMA_ALT_ACK_MODE_MASK) >> 4)
enum stm32_dma_width {
STM32_DMA_BYTE,
......@@ -1252,6 +1254,8 @@ static void stm32_dma_set_config(struct stm32_dma_chan *chan,
chan->threshold = STM32_DMA_THRESHOLD_FTR_GET(cfg->features);
if (STM32_DMA_DIRECT_MODE_GET(cfg->features))
chan->threshold = STM32_DMA_FIFO_THRESHOLD_NONE;
if (STM32_DMA_ALT_ACK_MODE_GET(cfg->features))
chan->chan_reg.dma_scr |= STM32_DMA_SCR_TRBUFF;
}
static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
......
......@@ -655,9 +655,8 @@ static int tegra_adma_alloc_chan_resources(struct dma_chan *dc)
return ret;
}
ret = pm_runtime_get_sync(tdc2dev(tdc));
ret = pm_runtime_resume_and_get(tdc2dev(tdc));
if (ret < 0) {
pm_runtime_put_noidle(tdc2dev(tdc));
free_irq(tdc->irq, tdc);
return ret;
}
......@@ -869,10 +868,8 @@ static int tegra_adma_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
pm_runtime_put_noidle(&pdev->dev);
if (ret < 0)
goto rpm_disable;
}
ret = tegra_adma_init(tdma);
if (ret)
......
This diff is collapsed.
This diff is collapsed.
......@@ -434,8 +434,7 @@ static void zynqmp_dma_free_descriptor(struct zynqmp_dma_chan *chan,
struct zynqmp_dma_desc_sw *child, *next;
chan->desc_free_cnt++;
list_del(&sdesc->node);
list_add_tail(&sdesc->node, &chan->free_list);
list_move_tail(&sdesc->node, &chan->free_list);
list_for_each_entry_safe(child, next, &sdesc->tx_list, node) {
chan->desc_free_cnt++;
list_move_tail(&child->node, &chan->free_list);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment