Commit 4879b7ae authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-4.12-rc1' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine updates from Vinod Koul:
 "This time again a smaller update consisting of:

   - support for TI DA8xx dma controller and updates to the cppi driver

   - updates on bunch of drivers like xilinx, pl08x, stm32-dma, mv_xor,
     ioat, dmatest"

* tag 'dmaengine-4.12-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (35 commits)
  dmaengine: pl08x: remove lock documentation
  dmaengine: pl08x: fix pl08x_dma_chan_state documentation
  dmaengine: pl08x: Use the BIT() macro consistently
  dmaengine: pl080: Fix some missing kerneldoc
  dmaengine: pl080: Cut some unused defines
  dmaengine: dmatest: Add check for supported buffer count (sg_buffers)
  dmaengine: dmatest: Select DMA_ENGINE_RAID as its needed for the slave_sg test
  dmaengine: virt-dma: Convert to use list_for_each_entry_safe()
  dma-debug: use offset_in_page() macro
  dmaengine: mv_xor: use offset_in_page() macro
  dmaengine: dmatest: use offset_in_page() macro
  dmaengine: sun4i: fix invalid argument
  dmaengine: ioat: use setup_timer
  dmaengine: cppi41: Fix an Oops happening in cppi41_dma_probe()
  dmaengine: pl330: remove pdata based initialization
  dmaengine: cppi: fix build error due to bad variable
  dmaengine: imx-sdma: add 1ms delay to ensure SDMA channel is stopped
  dmaengine: cppi41: use managed functions devm_*()
  dmaengine: cppi41: fix cppi41_dma_tx_status() logic
  dmaengine: qcom_hidma: pause the channel on shutdown
  ...
parents ecc721a7 be13ec66
...@@ -18,10 +18,26 @@ Required properties: ...@@ -18,10 +18,26 @@ Required properties:
- phy-names: Should be "usb-phy" - phy-names: Should be "usb-phy"
- dmas: specifies the dma channels
- dma-names: specifies the names of the channels. Use "rxN" for receive
and "txN" for transmit endpoints. N specifies the endpoint number.
Optional properties: Optional properties:
~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~
- vbus-supply: Phandle to a regulator providing the USB bus power. - vbus-supply: Phandle to a regulator providing the USB bus power.
DMA
~~~
- compatible: ti,da830-cppi41
- reg: offset and length of the following register spaces: CPPI DMA Controller,
CPPI DMA Scheduler, Queue Manager
- reg-names: "controller", "scheduler", "queuemgr"
- #dma-cells: should be set to 2. The first number represents the
channel number (0 … 3 for endpoints 1 … 4).
The second number is 0 for RX and 1 for TX transfers.
- #dma-channels: should be set to 4 representing the 4 endpoints.
Example: Example:
usb_phy: usb-phy { usb_phy: usb-phy {
compatible = "ti,da830-usb-phy"; compatible = "ti,da830-usb-phy";
...@@ -30,7 +46,10 @@ Example: ...@@ -30,7 +46,10 @@ Example:
}; };
usb0: usb@200000 { usb0: usb@200000 {
compatible = "ti,da830-musb"; compatible = "ti,da830-musb";
reg = <0x00200000 0x10000>; reg = <0x00200000 0x1000>;
ranges;
#address-cells = <1>;
#size-cells = <1>;
interrupts = <58>; interrupts = <58>;
interrupt-names = "mc"; interrupt-names = "mc";
...@@ -39,5 +58,25 @@ Example: ...@@ -39,5 +58,25 @@ Example:
phys = <&usb_phy 0>; phys = <&usb_phy 0>;
phy-names = "usb-phy"; phy-names = "usb-phy";
dmas = <&cppi41dma 0 0 &cppi41dma 1 0
&cppi41dma 2 0 &cppi41dma 3 0
&cppi41dma 0 1 &cppi41dma 1 1
&cppi41dma 2 1 &cppi41dma 3 1>;
dma-names =
"rx1", "rx2", "rx3", "rx4",
"tx1", "tx2", "tx3", "tx4";
status = "okay"; status = "okay";
cppi41dma: dma-controller@201000 {
compatible = "ti,da830-cppi41";
reg = <0x201000 0x1000
0x202000 0x1000
0x204000 0x4000>;
reg-names = "controller", "scheduler", "queuemgr";
interrupts = <58>;
#dma-cells = <2>;
#dma-channels = <4>;
};
}; };
...@@ -10,7 +10,6 @@ ...@@ -10,7 +10,6 @@
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#include <linux/amba/pl330.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
......
...@@ -514,12 +514,12 @@ config TIMB_DMA ...@@ -514,12 +514,12 @@ config TIMB_DMA
Enable support for the Timberdale FPGA DMA engine. Enable support for the Timberdale FPGA DMA engine.
config TI_CPPI41 config TI_CPPI41
tristate "AM33xx CPPI41 DMA support" tristate "CPPI 4.1 DMA support"
depends on ARCH_OMAP depends on (ARCH_OMAP || ARCH_DAVINCI_DA8XX)
select DMA_ENGINE select DMA_ENGINE
help help
The Communications Port Programming Interface (CPPI) 4.1 DMA engine The Communications Port Programming Interface (CPPI) 4.1 DMA engine
is currently used by the USB driver on AM335x platforms. is currently used by the USB driver on AM335x and DA8xx platforms.
config TI_DMA_CROSSBAR config TI_DMA_CROSSBAR
bool bool
...@@ -608,6 +608,7 @@ config ASYNC_TX_DMA ...@@ -608,6 +608,7 @@ config ASYNC_TX_DMA
config DMATEST config DMATEST
tristate "DMA Test client" tristate "DMA Test client"
depends on DMA_ENGINE depends on DMA_ENGINE
select DMA_ENGINE_RAID
help help
Simple DMA test client. Say N unless you're debugging a Simple DMA test client. Say N unless you're debugging a
DMA Device driver. DMA Device driver.
......
...@@ -106,6 +106,7 @@ struct pl08x_driver_data; ...@@ -106,6 +106,7 @@ struct pl08x_driver_data;
/** /**
* struct vendor_data - vendor-specific config parameters for PL08x derivatives * struct vendor_data - vendor-specific config parameters for PL08x derivatives
* @config_offset: offset to the configuration register
* @channels: the number of channels available in this variant * @channels: the number of channels available in this variant
* @signals: the number of request signals available from the hardware * @signals: the number of request signals available from the hardware
* @dualmaster: whether this version supports dual AHB masters or not. * @dualmaster: whether this version supports dual AHB masters or not.
...@@ -145,6 +146,8 @@ struct pl08x_bus_data { ...@@ -145,6 +146,8 @@ struct pl08x_bus_data {
/** /**
* struct pl08x_phy_chan - holder for the physical channels * struct pl08x_phy_chan - holder for the physical channels
* @id: physical index to this channel * @id: physical index to this channel
* @base: memory base address for this physical channel
* @reg_config: configuration address for this physical channel
* @lock: a lock to use when altering an instance of this struct * @lock: a lock to use when altering an instance of this struct
* @serving: the virtual channel currently being served by this physical * @serving: the virtual channel currently being served by this physical
* channel * channel
...@@ -203,7 +206,7 @@ struct pl08x_txd { ...@@ -203,7 +206,7 @@ struct pl08x_txd {
}; };
/** /**
* struct pl08x_dma_chan_state - holds the PL08x specific virtual channel * enum pl08x_dma_chan_state - holds the PL08x specific virtual channel
* states * states
* @PL08X_CHAN_IDLE: the channel is idle * @PL08X_CHAN_IDLE: the channel is idle
* @PL08X_CHAN_RUNNING: the channel has allocated a physical transport * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
...@@ -226,9 +229,8 @@ enum pl08x_dma_chan_state { ...@@ -226,9 +229,8 @@ enum pl08x_dma_chan_state {
* @phychan: the physical channel utilized by this channel, if there is one * @phychan: the physical channel utilized by this channel, if there is one
* @name: name of channel * @name: name of channel
* @cd: channel platform data * @cd: channel platform data
* @runtime_addr: address for RX/TX according to the runtime config * @cfg: slave configuration
* @at: active transaction on this channel * @at: active transaction on this channel
* @lock: a lock for this channel data
* @host: a pointer to the host (internal use) * @host: a pointer to the host (internal use)
* @state: whether the channel is idle, paused, running etc * @state: whether the channel is idle, paused, running etc
* @slave: whether this channel is a device (slave) or for memcpy * @slave: whether this channel is a device (slave) or for memcpy
...@@ -262,7 +264,7 @@ struct pl08x_dma_chan { ...@@ -262,7 +264,7 @@ struct pl08x_dma_chan {
* @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
* fetches * fetches
* @mem_buses: set to indicate memory transfers on AHB2. * @mem_buses: set to indicate memory transfers on AHB2.
* @lock: a spinlock for this struct * @lli_words: how many words are used in each LLI item for this variant
*/ */
struct pl08x_driver_data { struct pl08x_driver_data {
struct dma_device slave; struct dma_device slave;
...@@ -417,7 +419,7 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan) ...@@ -417,7 +419,7 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
/* Enable the DMA channel */ /* Enable the DMA channel */
/* Do not access config register until channel shows as disabled */ /* Do not access config register until channel shows as disabled */
while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id)) while (readl(pl08x->base + PL080_EN_CHAN) & BIT(phychan->id))
cpu_relax(); cpu_relax();
/* Do not access config register until channel shows as inactive */ /* Do not access config register until channel shows as inactive */
...@@ -484,8 +486,8 @@ static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x, ...@@ -484,8 +486,8 @@ static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
writel(val, ch->reg_config); writel(val, ch->reg_config);
writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR); writel(BIT(ch->id), pl08x->base + PL080_ERR_CLEAR);
writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR); writel(BIT(ch->id), pl08x->base + PL080_TC_CLEAR);
} }
static inline u32 get_bytes_in_cctl(u32 cctl) static inline u32 get_bytes_in_cctl(u32 cctl)
...@@ -1834,7 +1836,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev) ...@@ -1834,7 +1836,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
return IRQ_NONE; return IRQ_NONE;
for (i = 0; i < pl08x->vd->channels; i++) { for (i = 0; i < pl08x->vd->channels; i++) {
if (((1 << i) & err) || ((1 << i) & tc)) { if ((BIT(i) & err) || (BIT(i) & tc)) {
/* Locate physical channel */ /* Locate physical channel */
struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
struct pl08x_dma_chan *plchan = phychan->serving; struct pl08x_dma_chan *plchan = phychan->serving;
...@@ -1872,7 +1874,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev) ...@@ -1872,7 +1874,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
} }
spin_unlock(&plchan->vc.lock); spin_unlock(&plchan->vc.lock);
mask |= (1 << i); mask |= BIT(i);
} }
} }
......
This diff is collapsed.
...@@ -535,6 +535,13 @@ static int dmatest_func(void *data) ...@@ -535,6 +535,13 @@ static int dmatest_func(void *data)
total_tests++; total_tests++;
/* Check if buffer count fits into map count variable (u8) */
if ((src_cnt + dst_cnt) >= 255) {
pr_err("too many buffers (%d of 255 supported)\n",
src_cnt + dst_cnt);
break;
}
if (1 << align > params->buf_size) { if (1 << align > params->buf_size) {
pr_err("%u-byte buffer too small for %d-byte alignment\n", pr_err("%u-byte buffer too small for %d-byte alignment\n",
params->buf_size, 1 << align); params->buf_size, 1 << align);
...@@ -585,7 +592,7 @@ static int dmatest_func(void *data) ...@@ -585,7 +592,7 @@ static int dmatest_func(void *data)
for (i = 0; i < src_cnt; i++) { for (i = 0; i < src_cnt; i++) {
void *buf = thread->srcs[i]; void *buf = thread->srcs[i];
struct page *pg = virt_to_page(buf); struct page *pg = virt_to_page(buf);
unsigned pg_off = (unsigned long) buf & ~PAGE_MASK; unsigned long pg_off = offset_in_page(buf);
um->addr[i] = dma_map_page(dev->dev, pg, pg_off, um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
um->len, DMA_TO_DEVICE); um->len, DMA_TO_DEVICE);
...@@ -605,7 +612,7 @@ static int dmatest_func(void *data) ...@@ -605,7 +612,7 @@ static int dmatest_func(void *data)
for (i = 0; i < dst_cnt; i++) { for (i = 0; i < dst_cnt; i++) {
void *buf = thread->dsts[i]; void *buf = thread->dsts[i];
struct page *pg = virt_to_page(buf); struct page *pg = virt_to_page(buf);
unsigned pg_off = (unsigned long) buf & ~PAGE_MASK; unsigned long pg_off = offset_in_page(buf);
dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
......
...@@ -937,6 +937,21 @@ static int sdma_disable_channel(struct dma_chan *chan) ...@@ -937,6 +937,21 @@ static int sdma_disable_channel(struct dma_chan *chan)
return 0; return 0;
} }
static int sdma_disable_channel_with_delay(struct dma_chan *chan)
{
sdma_disable_channel(chan);
/*
* According to NXP R&D team a delay of one BD SDMA cost time
* (maximum is 1ms) should be added after disable of the channel
* bit, to ensure SDMA core has really been stopped after SDMA
* clients call .device_terminate_all.
*/
mdelay(1);
return 0;
}
static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac) static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
{ {
struct sdma_engine *sdma = sdmac->sdma; struct sdma_engine *sdma = sdmac->sdma;
...@@ -1828,11 +1843,11 @@ static int sdma_probe(struct platform_device *pdev) ...@@ -1828,11 +1843,11 @@ static int sdma_probe(struct platform_device *pdev)
sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
sdma->dma_device.device_config = sdma_config; sdma->dma_device.device_config = sdma_config;
sdma->dma_device.device_terminate_all = sdma_disable_channel; sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay;
sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
sdma->dma_device.device_issue_pending = sdma_issue_pending; sdma->dma_device.device_issue_pending = sdma_issue_pending;
sdma->dma_device.dev->dma_parms = &sdma->dma_parms; sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
dma_set_max_seg_size(sdma->dma_device.dev, 65535); dma_set_max_seg_size(sdma->dma_device.dev, 65535);
......
...@@ -760,9 +760,7 @@ ioat_init_channel(struct ioatdma_device *ioat_dma, ...@@ -760,9 +760,7 @@ ioat_init_channel(struct ioatdma_device *ioat_dma,
dma_cookie_init(&ioat_chan->dma_chan); dma_cookie_init(&ioat_chan->dma_chan);
list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels); list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels);
ioat_dma->idx[idx] = ioat_chan; ioat_dma->idx[idx] = ioat_chan;
init_timer(&ioat_chan->timer); setup_timer(&ioat_chan->timer, ioat_timer_event, data);
ioat_chan->timer.function = ioat_timer_event;
ioat_chan->timer.data = data;
tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data); tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data);
} }
......
...@@ -960,7 +960,7 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan) ...@@ -960,7 +960,7 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
} }
src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src),
(size_t)src & ~PAGE_MASK, PAGE_SIZE, offset_in_page(src), PAGE_SIZE,
DMA_TO_DEVICE); DMA_TO_DEVICE);
unmap->addr[0] = src_dma; unmap->addr[0] = src_dma;
...@@ -972,7 +972,7 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan) ...@@ -972,7 +972,7 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
unmap->to_cnt = 1; unmap->to_cnt = 1;
dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest),
(size_t)dest & ~PAGE_MASK, PAGE_SIZE, offset_in_page(dest), PAGE_SIZE,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
unmap->addr[1] = dest_dma; unmap->addr[1] = dest_dma;
...@@ -1580,11 +1580,6 @@ static int mv_xor_probe(struct platform_device *pdev) ...@@ -1580,11 +1580,6 @@ static int mv_xor_probe(struct platform_device *pdev)
int irq; int irq;
cd = &pdata->channels[i]; cd = &pdata->channels[i];
if (!cd) {
ret = -ENODEV;
goto err_channel_add;
}
irq = platform_get_irq(pdev, i); irq = platform_get_irq(pdev, i);
if (irq < 0) { if (irq < 0) {
ret = irq; ret = irq;
......
...@@ -22,7 +22,6 @@ ...@@ -22,7 +22,6 @@
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/dmaengine.h> #include <linux/dmaengine.h>
#include <linux/amba/bus.h> #include <linux/amba/bus.h>
#include <linux/amba/pl330.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_dma.h> #include <linux/of_dma.h>
...@@ -2077,18 +2076,6 @@ static void pl330_tasklet(unsigned long data) ...@@ -2077,18 +2076,6 @@ static void pl330_tasklet(unsigned long data)
} }
} }
bool pl330_filter(struct dma_chan *chan, void *param)
{
u8 *peri_id;
if (chan->device->dev->driver != &pl330_driver.drv)
return false;
peri_id = chan->private;
return *peri_id == (unsigned long)param;
}
EXPORT_SYMBOL(pl330_filter);
static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec, static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma) struct of_dma *ofdma)
{ {
...@@ -2833,7 +2820,6 @@ static SIMPLE_DEV_PM_OPS(pl330_pm, pl330_suspend, pl330_resume); ...@@ -2833,7 +2820,6 @@ static SIMPLE_DEV_PM_OPS(pl330_pm, pl330_suspend, pl330_resume);
static int static int
pl330_probe(struct amba_device *adev, const struct amba_id *id) pl330_probe(struct amba_device *adev, const struct amba_id *id)
{ {
struct dma_pl330_platdata *pdat;
struct pl330_config *pcfg; struct pl330_config *pcfg;
struct pl330_dmac *pl330; struct pl330_dmac *pl330;
struct dma_pl330_chan *pch, *_p; struct dma_pl330_chan *pch, *_p;
...@@ -2843,8 +2829,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) ...@@ -2843,8 +2829,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
int num_chan; int num_chan;
struct device_node *np = adev->dev.of_node; struct device_node *np = adev->dev.of_node;
pdat = dev_get_platdata(&adev->dev);
ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32)); ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
if (ret) if (ret)
return ret; return ret;
...@@ -2857,7 +2841,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) ...@@ -2857,7 +2841,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pd = &pl330->ddma; pd = &pl330->ddma;
pd->dev = &adev->dev; pd->dev = &adev->dev;
pl330->mcbufsz = pdat ? pdat->mcbuf_sz : 0; pl330->mcbufsz = 0;
/* get quirk */ /* get quirk */
for (i = 0; i < ARRAY_SIZE(of_quirks); i++) for (i = 0; i < ARRAY_SIZE(of_quirks); i++)
...@@ -2901,10 +2885,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) ...@@ -2901,10 +2885,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
INIT_LIST_HEAD(&pd->channels); INIT_LIST_HEAD(&pd->channels);
/* Initialize channel parameters */ /* Initialize channel parameters */
if (pdat) num_chan = max_t(int, pcfg->num_peri, pcfg->num_chan);
num_chan = max_t(int, pdat->nr_valid_peri, pcfg->num_chan);
else
num_chan = max_t(int, pcfg->num_peri, pcfg->num_chan);
pl330->num_peripherals = num_chan; pl330->num_peripherals = num_chan;
...@@ -2916,11 +2897,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) ...@@ -2916,11 +2897,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
for (i = 0; i < num_chan; i++) { for (i = 0; i < num_chan; i++) {
pch = &pl330->peripherals[i]; pch = &pl330->peripherals[i];
if (!adev->dev.of_node)
pch->chan.private = pdat ? &pdat->peri_id[i] : NULL;
else
pch->chan.private = adev->dev.of_node;
pch->chan.private = adev->dev.of_node;
INIT_LIST_HEAD(&pch->submitted_list); INIT_LIST_HEAD(&pch->submitted_list);
INIT_LIST_HEAD(&pch->work_list); INIT_LIST_HEAD(&pch->work_list);
INIT_LIST_HEAD(&pch->completed_list); INIT_LIST_HEAD(&pch->completed_list);
...@@ -2933,15 +2911,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) ...@@ -2933,15 +2911,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
list_add_tail(&pch->chan.device_node, &pd->channels); list_add_tail(&pch->chan.device_node, &pd->channels);
} }
if (pdat) { dma_cap_set(DMA_MEMCPY, pd->cap_mask);
pd->cap_mask = pdat->cap_mask; if (pcfg->num_peri) {
} else { dma_cap_set(DMA_SLAVE, pd->cap_mask);
dma_cap_set(DMA_MEMCPY, pd->cap_mask); dma_cap_set(DMA_CYCLIC, pd->cap_mask);
if (pcfg->num_peri) { dma_cap_set(DMA_PRIVATE, pd->cap_mask);
dma_cap_set(DMA_SLAVE, pd->cap_mask);
dma_cap_set(DMA_CYCLIC, pd->cap_mask);
dma_cap_set(DMA_PRIVATE, pd->cap_mask);
}
} }
pd->device_alloc_chan_resources = pl330_alloc_chan_resources; pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
......
...@@ -865,6 +865,20 @@ static int hidma_probe(struct platform_device *pdev) ...@@ -865,6 +865,20 @@ static int hidma_probe(struct platform_device *pdev)
return rc; return rc;
} }
static void hidma_shutdown(struct platform_device *pdev)
{
struct hidma_dev *dmadev = platform_get_drvdata(pdev);
dev_info(dmadev->ddev.dev, "HI-DMA engine shutdown\n");
pm_runtime_get_sync(dmadev->ddev.dev);
if (hidma_ll_disable(dmadev->lldev))
dev_warn(dmadev->ddev.dev, "channel did not stop\n");
pm_runtime_mark_last_busy(dmadev->ddev.dev);
pm_runtime_put_autosuspend(dmadev->ddev.dev);
}
static int hidma_remove(struct platform_device *pdev) static int hidma_remove(struct platform_device *pdev)
{ {
struct hidma_dev *dmadev = platform_get_drvdata(pdev); struct hidma_dev *dmadev = platform_get_drvdata(pdev);
...@@ -908,6 +922,7 @@ MODULE_DEVICE_TABLE(of, hidma_match); ...@@ -908,6 +922,7 @@ MODULE_DEVICE_TABLE(of, hidma_match);
static struct platform_driver hidma_driver = { static struct platform_driver hidma_driver = {
.probe = hidma_probe, .probe = hidma_probe,
.remove = hidma_remove, .remove = hidma_remove,
.shutdown = hidma_shutdown,
.driver = { .driver = {
.name = "hidma", .name = "hidma",
.of_match_table = hidma_match, .of_match_table = hidma_match,
......
...@@ -499,6 +499,9 @@ int hidma_ll_enable(struct hidma_lldev *lldev) ...@@ -499,6 +499,9 @@ int hidma_ll_enable(struct hidma_lldev *lldev)
lldev->trch_state = HIDMA_CH_ENABLED; lldev->trch_state = HIDMA_CH_ENABLED;
lldev->evch_state = HIDMA_CH_ENABLED; lldev->evch_state = HIDMA_CH_ENABLED;
/* enable irqs */
writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
return 0; return 0;
} }
...@@ -596,6 +599,9 @@ int hidma_ll_disable(struct hidma_lldev *lldev) ...@@ -596,6 +599,9 @@ int hidma_ll_disable(struct hidma_lldev *lldev)
lldev->trch_state = HIDMA_CH_SUSPENDED; lldev->trch_state = HIDMA_CH_SUSPENDED;
lldev->evch_state = HIDMA_CH_SUSPENDED; lldev->evch_state = HIDMA_CH_SUSPENDED;
/* disable interrupts */
writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
return 0; return 0;
} }
......
...@@ -344,13 +344,19 @@ static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan) ...@@ -344,13 +344,19 @@ static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid); rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid);
if (desc->hwdescs.use) { if (desc->hwdescs.use) {
struct rcar_dmac_xfer_chunk *chunk; struct rcar_dmac_xfer_chunk *chunk =
list_first_entry(&desc->chunks,
struct rcar_dmac_xfer_chunk, node);
dev_dbg(chan->chan.device->dev, dev_dbg(chan->chan.device->dev,
"chan%u: queue desc %p: %u@%pad\n", "chan%u: queue desc %p: %u@%pad\n",
chan->index, desc, desc->nchunks, &desc->hwdescs.dma); chan->index, desc, desc->nchunks, &desc->hwdescs.dma);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
chunk->src_addr >> 32);
rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
chunk->dst_addr >> 32);
rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE, rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE,
desc->hwdescs.dma >> 32); desc->hwdescs.dma >> 32);
#endif #endif
...@@ -368,8 +374,6 @@ static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan) ...@@ -368,8 +374,6 @@ static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
* should. Initialize it manually with the destination address * should. Initialize it manually with the destination address
* of the first chunk. * of the first chunk.
*/ */
chunk = list_first_entry(&desc->chunks,
struct rcar_dmac_xfer_chunk, node);
rcar_dmac_chan_write(chan, RCAR_DMADAR, rcar_dmac_chan_write(chan, RCAR_DMADAR,
chunk->dst_addr & 0xffffffff); chunk->dst_addr & 0xffffffff);
...@@ -855,8 +859,12 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, ...@@ -855,8 +859,12 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
unsigned int nchunks = 0; unsigned int nchunks = 0;
unsigned int max_chunk_size; unsigned int max_chunk_size;
unsigned int full_size = 0; unsigned int full_size = 0;
bool highmem = false; bool cross_boundary = false;
unsigned int i; unsigned int i;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
u32 high_dev_addr;
u32 high_mem_addr;
#endif
desc = rcar_dmac_desc_get(chan); desc = rcar_dmac_desc_get(chan);
if (!desc) if (!desc)
...@@ -882,6 +890,16 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, ...@@ -882,6 +890,16 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
full_size += len; full_size += len;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (i == 0) {
high_dev_addr = dev_addr >> 32;
high_mem_addr = mem_addr >> 32;
}
if ((dev_addr >> 32 != high_dev_addr) ||
(mem_addr >> 32 != high_mem_addr))
cross_boundary = true;
#endif
while (len) { while (len) {
unsigned int size = min(len, max_chunk_size); unsigned int size = min(len, max_chunk_size);
...@@ -890,18 +908,14 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, ...@@ -890,18 +908,14 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
* Prevent individual transfers from crossing 4GB * Prevent individual transfers from crossing 4GB
* boundaries. * boundaries.
*/ */
if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) {
size = ALIGN(dev_addr, 1ULL << 32) - dev_addr; size = ALIGN(dev_addr, 1ULL << 32) - dev_addr;
if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) cross_boundary = true;
}
if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) {
size = ALIGN(mem_addr, 1ULL << 32) - mem_addr; size = ALIGN(mem_addr, 1ULL << 32) - mem_addr;
cross_boundary = true;
/* }
* Check if either of the source or destination address
* can't be expressed in 32 bits. If so we can't use
* hardware descriptor lists.
*/
if (dev_addr >> 32 || mem_addr >> 32)
highmem = true;
#endif #endif
chunk = rcar_dmac_xfer_chunk_get(chan); chunk = rcar_dmac_xfer_chunk_get(chan);
...@@ -943,13 +957,11 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, ...@@ -943,13 +957,11 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
* Use hardware descriptor lists if possible when more than one chunk * Use hardware descriptor lists if possible when more than one chunk
* needs to be transferred (otherwise they don't make much sense). * needs to be transferred (otherwise they don't make much sense).
* *
* The highmem check currently covers the whole transfer. As an * Source/Destination address should be located in same 4GiB region
* optimization we could use descriptor lists for consecutive lowmem * in the 40bit address space when it uses Hardware descriptor,
* chunks and direct manual mode for highmem chunks. Whether the * and cross_boundary is checking it.
* performance improvement would be significant enough compared to the
* additional complexity remains to be investigated.
*/ */
desc->hwdescs.use = !highmem && nchunks > 1; desc->hwdescs.use = !cross_boundary && nchunks > 1;
if (desc->hwdescs.use) { if (desc->hwdescs.use) {
if (rcar_dmac_fill_hwdesc(chan, desc) < 0) if (rcar_dmac_fill_hwdesc(chan, desc) < 0)
desc->hwdescs.use = false; desc->hwdescs.use = false;
......
...@@ -1008,7 +1008,7 @@ static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec, ...@@ -1008,7 +1008,7 @@ static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
c = dma_get_slave_channel(&chan->vchan.chan); c = dma_get_slave_channel(&chan->vchan.chan);
if (!c) { if (!c) {
dev_err(dev, "No more channel avalaible\n"); dev_err(dev, "No more channels available\n");
return NULL; return NULL;
} }
......
...@@ -238,7 +238,7 @@ static struct sun4i_dma_pchan *find_and_use_pchan(struct sun4i_dma_dev *priv, ...@@ -238,7 +238,7 @@ static struct sun4i_dma_pchan *find_and_use_pchan(struct sun4i_dma_dev *priv,
} }
spin_lock_irqsave(&priv->lock, flags); spin_lock_irqsave(&priv->lock, flags);
for_each_clear_bit_from(i, &priv->pchans_used, max) { for_each_clear_bit_from(i, priv->pchans_used, max) {
pchan = &pchans[i]; pchan = &pchans[i];
pchan->vchan = vchan; pchan->vchan = vchan;
set_bit(i, priv->pchans_used); set_bit(i, priv->pchans_used);
......
...@@ -86,7 +86,7 @@ EXPORT_SYMBOL_GPL(vchan_find_desc); ...@@ -86,7 +86,7 @@ EXPORT_SYMBOL_GPL(vchan_find_desc);
static void vchan_complete(unsigned long arg) static void vchan_complete(unsigned long arg)
{ {
struct virt_dma_chan *vc = (struct virt_dma_chan *)arg; struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
struct virt_dma_desc *vd; struct virt_dma_desc *vd, *_vd;
struct dmaengine_desc_callback cb; struct dmaengine_desc_callback cb;
LIST_HEAD(head); LIST_HEAD(head);
...@@ -103,8 +103,7 @@ static void vchan_complete(unsigned long arg) ...@@ -103,8 +103,7 @@ static void vchan_complete(unsigned long arg)
dmaengine_desc_callback_invoke(&cb, NULL); dmaengine_desc_callback_invoke(&cb, NULL);
while (!list_empty(&head)) { list_for_each_entry_safe(vd, _vd, &head, node) {
vd = list_first_entry(&head, struct virt_dma_desc, node);
dmaengine_desc_get_callback(&vd->tx, &cb); dmaengine_desc_get_callback(&vd->tx, &cb);
list_del(&vd->node); list_del(&vd->node);
...@@ -119,9 +118,9 @@ static void vchan_complete(unsigned long arg) ...@@ -119,9 +118,9 @@ static void vchan_complete(unsigned long arg)
void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
{ {
while (!list_empty(head)) { struct virt_dma_desc *vd, *_vd;
struct virt_dma_desc *vd = list_first_entry(head,
struct virt_dma_desc, node); list_for_each_entry_safe(vd, _vd, head, node) {
if (dmaengine_desc_test_reuse(&vd->tx)) { if (dmaengine_desc_test_reuse(&vd->tx)) {
list_move_tail(&vd->node, &vc->desc_allocated); list_move_tail(&vd->node, &vc->desc_allocated);
} else { } else {
......
...@@ -331,6 +331,7 @@ struct xilinx_dma_tx_descriptor { ...@@ -331,6 +331,7 @@ struct xilinx_dma_tx_descriptor {
* @seg_v: Statically allocated segments base * @seg_v: Statically allocated segments base
* @cyclic_seg_v: Statically allocated segment base for cyclic transfers * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
* @start_transfer: Differentiate b/w DMA IP's transfer * @start_transfer: Differentiate b/w DMA IP's transfer
* @stop_transfer: Differentiate b/w DMA IP's quiesce
*/ */
struct xilinx_dma_chan { struct xilinx_dma_chan {
struct xilinx_dma_device *xdev; struct xilinx_dma_device *xdev;
...@@ -361,6 +362,7 @@ struct xilinx_dma_chan { ...@@ -361,6 +362,7 @@ struct xilinx_dma_chan {
struct xilinx_axidma_tx_segment *seg_v; struct xilinx_axidma_tx_segment *seg_v;
struct xilinx_axidma_tx_segment *cyclic_seg_v; struct xilinx_axidma_tx_segment *cyclic_seg_v;
void (*start_transfer)(struct xilinx_dma_chan *chan); void (*start_transfer)(struct xilinx_dma_chan *chan);
int (*stop_transfer)(struct xilinx_dma_chan *chan);
u16 tdest; u16 tdest;
}; };
...@@ -946,26 +948,32 @@ static bool xilinx_dma_is_idle(struct xilinx_dma_chan *chan) ...@@ -946,26 +948,32 @@ static bool xilinx_dma_is_idle(struct xilinx_dma_chan *chan)
} }
/** /**
* xilinx_dma_halt - Halt DMA channel * xilinx_dma_stop_transfer - Halt DMA channel
* @chan: Driver specific DMA channel * @chan: Driver specific DMA channel
*/ */
static void xilinx_dma_halt(struct xilinx_dma_chan *chan) static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
{ {
int err;
u32 val; u32 val;
dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
/* Wait for the hardware to halt */ /* Wait for the hardware to halt */
err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
(val & XILINX_DMA_DMASR_HALTED), 0, val & XILINX_DMA_DMASR_HALTED, 0,
XILINX_DMA_LOOP_COUNT); XILINX_DMA_LOOP_COUNT);
}
if (err) { /**
dev_err(chan->dev, "Cannot stop channel %p: %x\n", * xilinx_cdma_stop_transfer - Wait for the current transfer to complete
chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); * @chan: Driver specific DMA channel
chan->err = true; */
} static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
{
u32 val;
return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
val & XILINX_DMA_DMASR_IDLE, 0,
XILINX_DMA_LOOP_COUNT);
} }
/** /**
...@@ -1653,7 +1661,7 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, ...@@ -1653,7 +1661,7 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
{ {
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
struct xilinx_dma_tx_descriptor *desc; struct xilinx_dma_tx_descriptor *desc;
struct xilinx_cdma_tx_segment *segment, *prev; struct xilinx_cdma_tx_segment *segment;
struct xilinx_cdma_desc_hw *hw; struct xilinx_cdma_desc_hw *hw;
if (!len || len > XILINX_DMA_MAX_TRANS_LEN) if (!len || len > XILINX_DMA_MAX_TRANS_LEN)
...@@ -1680,21 +1688,11 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, ...@@ -1680,21 +1688,11 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
hw->dest_addr_msb = upper_32_bits(dma_dst); hw->dest_addr_msb = upper_32_bits(dma_dst);
} }
/* Fill the previous next descriptor with current */
prev = list_last_entry(&desc->segments,
struct xilinx_cdma_tx_segment, node);
prev->hw.next_desc = segment->phys;
/* Insert the segment into the descriptor segments list. */ /* Insert the segment into the descriptor segments list. */
list_add_tail(&segment->node, &desc->segments); list_add_tail(&segment->node, &desc->segments);
prev = segment;
/* Link the last hardware descriptor with the first. */
segment = list_first_entry(&desc->segments,
struct xilinx_cdma_tx_segment, node);
desc->async_tx.phys = segment->phys; desc->async_tx.phys = segment->phys;
prev->hw.next_desc = segment->phys; hw->next_desc = segment->phys;
return &desc->async_tx; return &desc->async_tx;
...@@ -2003,12 +2001,17 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan) ...@@ -2003,12 +2001,17 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
{ {
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
u32 reg; u32 reg;
int err;
if (chan->cyclic) if (chan->cyclic)
xilinx_dma_chan_reset(chan); xilinx_dma_chan_reset(chan);
/* Halt the DMA engine */ err = chan->stop_transfer(chan);
xilinx_dma_halt(chan); if (err) {
dev_err(chan->dev, "Cannot stop channel %p: %x\n",
chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
chan->err = true;
}
/* Remove and free all of the descriptors in the lists */ /* Remove and free all of the descriptors in the lists */
xilinx_dma_free_descriptors(chan); xilinx_dma_free_descriptors(chan);
...@@ -2397,12 +2400,16 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, ...@@ -2397,12 +2400,16 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
return err; return err;
} }
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
chan->start_transfer = xilinx_dma_start_transfer; chan->start_transfer = xilinx_dma_start_transfer;
else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) chan->stop_transfer = xilinx_dma_stop_transfer;
} else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
chan->start_transfer = xilinx_cdma_start_transfer; chan->start_transfer = xilinx_cdma_start_transfer;
else chan->stop_transfer = xilinx_cdma_stop_transfer;
} else {
chan->start_transfer = xilinx_vdma_start_transfer; chan->start_transfer = xilinx_vdma_start_transfer;
chan->stop_transfer = xilinx_dma_stop_transfer;
}
/* Initialize the tasklet */ /* Initialize the tasklet */
tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet, tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
......
...@@ -38,24 +38,16 @@ ...@@ -38,24 +38,16 @@
#define PL080_SOFT_LSREQ (0x2C) #define PL080_SOFT_LSREQ (0x2C)
#define PL080_CONFIG (0x30) #define PL080_CONFIG (0x30)
#define PL080_CONFIG_M2_BE (1 << 2) #define PL080_CONFIG_M2_BE BIT(2)
#define PL080_CONFIG_M1_BE (1 << 1) #define PL080_CONFIG_M1_BE BIT(1)
#define PL080_CONFIG_ENABLE (1 << 0) #define PL080_CONFIG_ENABLE BIT(0)
#define PL080_SYNC (0x34) #define PL080_SYNC (0x34)
/* Per channel configuration registers */ /* Per channel configuration registers */
#define PL080_Cx_STRIDE (0x20) /* Per channel configuration registers */
#define PL080_Cx_BASE(x) ((0x100 + (x * 0x20))) #define PL080_Cx_BASE(x) ((0x100 + (x * 0x20)))
#define PL080_Cx_SRC_ADDR(x) ((0x100 + (x * 0x20)))
#define PL080_Cx_DST_ADDR(x) ((0x104 + (x * 0x20)))
#define PL080_Cx_LLI(x) ((0x108 + (x * 0x20)))
#define PL080_Cx_CONTROL(x) ((0x10C + (x * 0x20)))
#define PL080_Cx_CONFIG(x) ((0x110 + (x * 0x20)))
#define PL080S_Cx_CONTROL2(x) ((0x110 + (x * 0x20)))
#define PL080S_Cx_CONFIG(x) ((0x114 + (x * 0x20)))
#define PL080_CH_SRC_ADDR (0x00) #define PL080_CH_SRC_ADDR (0x00)
#define PL080_CH_DST_ADDR (0x04) #define PL080_CH_DST_ADDR (0x04)
#define PL080_CH_LLI (0x08) #define PL080_CH_LLI (0x08)
...@@ -66,18 +58,18 @@ ...@@ -66,18 +58,18 @@
#define PL080_LLI_ADDR_MASK (0x3fffffff << 2) #define PL080_LLI_ADDR_MASK (0x3fffffff << 2)
#define PL080_LLI_ADDR_SHIFT (2) #define PL080_LLI_ADDR_SHIFT (2)
#define PL080_LLI_LM_AHB2 (1 << 0) #define PL080_LLI_LM_AHB2 BIT(0)
#define PL080_CONTROL_TC_IRQ_EN (1 << 31) #define PL080_CONTROL_TC_IRQ_EN BIT(31)
#define PL080_CONTROL_PROT_MASK (0x7 << 28) #define PL080_CONTROL_PROT_MASK (0x7 << 28)
#define PL080_CONTROL_PROT_SHIFT (28) #define PL080_CONTROL_PROT_SHIFT (28)
#define PL080_CONTROL_PROT_CACHE (1 << 30) #define PL080_CONTROL_PROT_CACHE BIT(30)
#define PL080_CONTROL_PROT_BUFF (1 << 29) #define PL080_CONTROL_PROT_BUFF BIT(29)
#define PL080_CONTROL_PROT_SYS (1 << 28) #define PL080_CONTROL_PROT_SYS BIT(28)
#define PL080_CONTROL_DST_INCR (1 << 27) #define PL080_CONTROL_DST_INCR BIT(27)
#define PL080_CONTROL_SRC_INCR (1 << 26) #define PL080_CONTROL_SRC_INCR BIT(26)
#define PL080_CONTROL_DST_AHB2 (1 << 25) #define PL080_CONTROL_DST_AHB2 BIT(25)
#define PL080_CONTROL_SRC_AHB2 (1 << 24) #define PL080_CONTROL_SRC_AHB2 BIT(24)
#define PL080_CONTROL_DWIDTH_MASK (0x7 << 21) #define PL080_CONTROL_DWIDTH_MASK (0x7 << 21)
#define PL080_CONTROL_DWIDTH_SHIFT (21) #define PL080_CONTROL_DWIDTH_SHIFT (21)
#define PL080_CONTROL_SWIDTH_MASK (0x7 << 18) #define PL080_CONTROL_SWIDTH_MASK (0x7 << 18)
...@@ -103,20 +95,20 @@ ...@@ -103,20 +95,20 @@
#define PL080_WIDTH_16BIT (0x1) #define PL080_WIDTH_16BIT (0x1)
#define PL080_WIDTH_32BIT (0x2) #define PL080_WIDTH_32BIT (0x2)
#define PL080N_CONFIG_ITPROT (1 << 20) #define PL080N_CONFIG_ITPROT BIT(20)
#define PL080N_CONFIG_SECPROT (1 << 19) #define PL080N_CONFIG_SECPROT BIT(19)
#define PL080_CONFIG_HALT (1 << 18) #define PL080_CONFIG_HALT BIT(18)
#define PL080_CONFIG_ACTIVE (1 << 17) /* RO */ #define PL080_CONFIG_ACTIVE BIT(17) /* RO */
#define PL080_CONFIG_LOCK (1 << 16) #define PL080_CONFIG_LOCK BIT(16)
#define PL080_CONFIG_TC_IRQ_MASK (1 << 15) #define PL080_CONFIG_TC_IRQ_MASK BIT(15)
#define PL080_CONFIG_ERR_IRQ_MASK (1 << 14) #define PL080_CONFIG_ERR_IRQ_MASK BIT(14)
#define PL080_CONFIG_FLOW_CONTROL_MASK (0x7 << 11) #define PL080_CONFIG_FLOW_CONTROL_MASK (0x7 << 11)
#define PL080_CONFIG_FLOW_CONTROL_SHIFT (11) #define PL080_CONFIG_FLOW_CONTROL_SHIFT (11)
#define PL080_CONFIG_DST_SEL_MASK (0xf << 6) #define PL080_CONFIG_DST_SEL_MASK (0xf << 6)
#define PL080_CONFIG_DST_SEL_SHIFT (6) #define PL080_CONFIG_DST_SEL_SHIFT (6)
#define PL080_CONFIG_SRC_SEL_MASK (0xf << 1) #define PL080_CONFIG_SRC_SEL_MASK (0xf << 1)
#define PL080_CONFIG_SRC_SEL_SHIFT (1) #define PL080_CONFIG_SRC_SEL_SHIFT (1)
#define PL080_CONFIG_ENABLE (1 << 0) #define PL080_CONFIG_ENABLE BIT(0)
#define PL080_FLOW_MEM2MEM (0x0) #define PL080_FLOW_MEM2MEM (0x0)
#define PL080_FLOW_MEM2PER (0x1) #define PL080_FLOW_MEM2PER (0x1)
......
/* linux/include/linux/amba/pl330.h
*
* Copyright (C) 2010 Samsung Electronics Co. Ltd.
* Jaswinder Singh <jassi.brar@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#ifndef __AMBA_PL330_H_
#define __AMBA_PL330_H_
#include <linux/dmaengine.h>
struct dma_pl330_platdata {
/*
* Number of valid peripherals connected to DMAC.
* This may be different from the value read from
* CR0, as the PL330 implementation might have 'holes'
* in the peri list or the peri could also be reached
* from another DMAC which the platform prefers.
*/
u8 nr_valid_peri;
/* Array of valid peripherals */
u8 *peri_id;
/* Operational capabilities */
dma_cap_mask_t cap_mask;
/* Bytes to allocate for MC buffer */
unsigned mcbuf_sz;
};
extern bool pl330_filter(struct dma_chan *chan, void *param);
#endif /* __AMBA_PL330_H_ */
...@@ -1498,7 +1498,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size, ...@@ -1498,7 +1498,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
entry->type = dma_debug_coherent; entry->type = dma_debug_coherent;
entry->dev = dev; entry->dev = dev;
entry->pfn = page_to_pfn(virt_to_page(virt)); entry->pfn = page_to_pfn(virt_to_page(virt));
entry->offset = (size_t) virt & ~PAGE_MASK; entry->offset = offset_in_page(virt);
entry->size = size; entry->size = size;
entry->dev_addr = dma_addr; entry->dev_addr = dma_addr;
entry->direction = DMA_BIDIRECTIONAL; entry->direction = DMA_BIDIRECTIONAL;
...@@ -1514,7 +1514,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size, ...@@ -1514,7 +1514,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
.type = dma_debug_coherent, .type = dma_debug_coherent,
.dev = dev, .dev = dev,
.pfn = page_to_pfn(virt_to_page(virt)), .pfn = page_to_pfn(virt_to_page(virt)),
.offset = (size_t) virt & ~PAGE_MASK, .offset = offset_in_page(virt),
.dev_addr = addr, .dev_addr = addr,
.size = size, .size = size,
.direction = DMA_BIDIRECTIONAL, .direction = DMA_BIDIRECTIONAL,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment