Commit 12ff47e7 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma

* 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma: (37 commits)
  Improve slave/cyclic DMA engine documentation
  dmaengine: pl08x: handle the rest of enums in pl08x_width
  DMA: PL08x: cleanup selection of burst size
  DMA: PL08x: avoid recalculating cctl at each prepare
  DMA: PL08x: cleanup selection of buswidth
  DMA: PL08x: constify plchan->cd and plat->slave_channels
  DMA: PL08x: separately store source/destination cctl
  DMA: PL08x: separately store source/destination slave address
  DMA: PL08x: clean up LLI debugging
  DMA: PL08x: select LLI bus only once per LLI setup
  DMA: PL08x: remove unused constants
  ARM: mxs-dma: reset after disable channel
  dma: intel_mid_dma: remove redundant pci_set_drvdata calls
  dma: mxs-dma: fix unterminated platform_device_id table
  dmaengine: pl330: make platform data optional
  dmaengine: imx-sdma: return proper error if kzalloc fails
  pch_dma: Fix CTL register access issue
  dmaengine: mxs-dma: skip request_irq for NO_IRQ
  dmaengine/coh901318: fix slave submission semantics
  dmaengine/ste_dma40: allow memory buswidth/burst to be configured
  ...

Fix trivial whitespace conflict in drivers/dma/mv_xor.c
parents 73bcbac1 1ae105aa
This diff is collapsed.
......@@ -9,6 +9,5 @@ TODO for slave dma
- mxs-dma.c
- dw_dmac
- intel_mid_dma
- ste_dma40
4. Check other subsystems for dma drivers and merge/move to dmaengine
5. Remove dma_slave_config's dma direction.
This diff is collapsed.
......@@ -1216,7 +1216,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
atdma->dma_common.cap_mask = pdata->cap_mask;
atdma->all_chan_mask = (1 << pdata->nr_channels) - 1;
size = io->end - io->start + 1;
size = resource_size(io);
if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
err = -EBUSY;
goto err_kfree;
......@@ -1362,7 +1362,7 @@ static int __exit at_dma_remove(struct platform_device *pdev)
atdma->regs = NULL;
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(io->start, io->end - io->start + 1);
release_mem_region(io->start, resource_size(io));
kfree(atdma);
......
......@@ -41,6 +41,8 @@ struct coh901318_desc {
struct coh901318_lli *lli;
enum dma_data_direction dir;
unsigned long flags;
u32 head_config;
u32 head_ctrl;
};
struct coh901318_base {
......@@ -661,6 +663,9 @@ static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc)
coh901318_desc_submit(cohc, cohd);
/* Program the transaction head */
coh901318_set_conf(cohc, cohd->head_config);
coh901318_set_ctrl(cohc, cohd->head_ctrl);
coh901318_prep_linked_list(cohc, cohd->lli);
/* start dma job on this channel */
......@@ -1091,8 +1096,6 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
} else
goto err_direction;
coh901318_set_conf(cohc, config);
/* The dma only supports transmitting packages up to
* MAX_DMA_PACKET_SIZE. Calculate to total number of
* dma elemts required to send the entire sg list
......@@ -1129,16 +1132,18 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (ret)
goto err_lli_fill;
/*
* Set the default ctrl for the channel to the one from the lli,
* things may have changed due to odd buffer alignment etc.
*/
coh901318_set_ctrl(cohc, lli->control);
COH_DBG(coh901318_list_print(cohc, lli));
/* Pick a descriptor to handle this transfer */
cohd = coh901318_desc_get(cohc);
cohd->head_config = config;
/*
* Set the default head ctrl for the channel to the one from the
* lli, things may have changed due to odd buffer alignment
* etc.
*/
cohd->head_ctrl = lli->control;
cohd->dir = direction;
cohd->flags = flags;
cohd->desc.tx_submit = coh901318_tx_submit;
......
......@@ -510,8 +510,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
dma_chan_name(chan));
list_del_rcu(&device->global_node);
} else if (err)
pr_err("dmaengine: failed to get %s: (%d)\n",
dma_chan_name(chan), err);
pr_debug("dmaengine: failed to get %s: (%d)\n",
dma_chan_name(chan), err);
else
break;
if (--device->privatecnt == 0)
......
......@@ -902,7 +902,7 @@ static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
*
* Returns a valid DMA descriptor or %NULL in case of failure.
*/
struct dma_async_tx_descriptor *
static struct dma_async_tx_descriptor *
ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
dma_addr_t src, size_t len, unsigned long flags)
{
......
......@@ -1305,8 +1305,10 @@ static int __init sdma_probe(struct platform_device *pdev)
goto err_request_irq;
sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
if (!sdma->script_addrs)
if (!sdma->script_addrs) {
ret = -ENOMEM;
goto err_alloc;
}
if (of_id)
pdev->id_entry = of_id->data;
......
......@@ -1351,7 +1351,6 @@ int dma_suspend(struct pci_dev *pci, pm_message_t state)
return -EAGAIN;
}
device->state = SUSPENDED;
pci_set_drvdata(pci, device);
pci_save_state(pci);
pci_disable_device(pci);
pci_set_power_state(pci, PCI_D3hot);
......@@ -1380,7 +1379,6 @@ int dma_resume(struct pci_dev *pci)
}
device->state = RUNNING;
iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
pci_set_drvdata(pci, device);
return 0;
}
......
......@@ -1706,16 +1706,14 @@ static int __init ipu_probe(struct platform_device *pdev)
ipu_data.irq_fn, ipu_data.irq_err, ipu_data.irq_base);
/* Remap IPU common registers */
ipu_data.reg_ipu = ioremap(mem_ipu->start,
mem_ipu->end - mem_ipu->start + 1);
ipu_data.reg_ipu = ioremap(mem_ipu->start, resource_size(mem_ipu));
if (!ipu_data.reg_ipu) {
ret = -ENOMEM;
goto err_ioremap_ipu;
}
/* Remap Image Converter and Image DMA Controller registers */
ipu_data.reg_ic = ioremap(mem_ic->start,
mem_ic->end - mem_ic->start + 1);
ipu_data.reg_ic = ioremap(mem_ic->start, resource_size(mem_ic));
if (!ipu_data.reg_ic) {
ret = -ENOMEM;
goto err_ioremap_ic;
......
......@@ -1304,7 +1304,8 @@ static int mv_xor_shared_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
msp->xor_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
msp->xor_base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!msp->xor_base)
return -EBUSY;
......
......@@ -327,10 +327,12 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
memset(mxs_chan->ccw, 0, PAGE_SIZE);
ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
0, "mxs-dma", mxs_dma);
if (ret)
goto err_irq;
if (mxs_chan->chan_irq != NO_IRQ) {
ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
0, "mxs-dma", mxs_dma);
if (ret)
goto err_irq;
}
ret = clk_enable(mxs_dma->clk);
if (ret)
......@@ -535,6 +537,7 @@ static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
switch (cmd) {
case DMA_TERMINATE_ALL:
mxs_dma_disable_chan(mxs_chan);
mxs_dma_reset_chan(mxs_chan);
break;
case DMA_PAUSE:
mxs_dma_pause_chan(mxs_chan);
......@@ -707,6 +710,8 @@ static struct platform_device_id mxs_dma_type[] = {
}, {
.name = "mxs-dma-apbx",
.driver_data = MXS_DMA_APBX,
}, {
/* end of list */
}
};
......
......@@ -45,7 +45,8 @@
#define DMA_STATUS_MASK_BITS 0x3
#define DMA_STATUS_SHIFT_BITS 16
#define DMA_STATUS_IRQ(x) (0x1 << (x))
#define DMA_STATUS_ERR(x) (0x1 << ((x) + 8))
#define DMA_STATUS0_ERR(x) (0x1 << ((x) + 8))
#define DMA_STATUS2_ERR(x) (0x1 << (x))
#define DMA_DESC_WIDTH_SHIFT_BITS 12
#define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS)
......@@ -61,6 +62,9 @@
#define MAX_CHAN_NR 8
#define DMA_MASK_CTL0_MODE 0x33333333
#define DMA_MASK_CTL2_MODE 0x00003333
static unsigned int init_nr_desc_per_channel = 64;
module_param(init_nr_desc_per_channel, uint, 0644);
MODULE_PARM_DESC(init_nr_desc_per_channel,
......@@ -133,6 +137,7 @@ struct pch_dma {
#define PCH_DMA_CTL3 0x0C
#define PCH_DMA_STS0 0x10
#define PCH_DMA_STS1 0x14
#define PCH_DMA_STS2 0x18
#define dma_readl(pd, name) \
readl((pd)->membase + PCH_DMA_##name)
......@@ -183,13 +188,19 @@ static void pdc_enable_irq(struct dma_chan *chan, int enable)
{
struct pch_dma *pd = to_pd(chan->device);
u32 val;
int pos;
if (chan->chan_id < 8)
pos = chan->chan_id;
else
pos = chan->chan_id + 8;
val = dma_readl(pd, CTL2);
if (enable)
val |= 0x1 << chan->chan_id;
val |= 0x1 << pos;
else
val &= ~(0x1 << chan->chan_id);
val &= ~(0x1 << pos);
dma_writel(pd, CTL2, val);
......@@ -202,10 +213,17 @@ static void pdc_set_dir(struct dma_chan *chan)
struct pch_dma_chan *pd_chan = to_pd_chan(chan);
struct pch_dma *pd = to_pd(chan->device);
u32 val;
u32 mask_mode;
u32 mask_ctl;
if (chan->chan_id < 8) {
val = dma_readl(pd, CTL0);
mask_mode = DMA_CTL0_MODE_MASK_BITS <<
(DMA_CTL0_BITS_PER_CH * chan->chan_id);
mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
(DMA_CTL0_BITS_PER_CH * chan->chan_id));
val &= mask_mode;
if (pd_chan->dir == DMA_TO_DEVICE)
val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
DMA_CTL0_DIR_SHIFT_BITS);
......@@ -213,18 +231,24 @@ static void pdc_set_dir(struct dma_chan *chan)
val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
DMA_CTL0_DIR_SHIFT_BITS));
val |= mask_ctl;
dma_writel(pd, CTL0, val);
} else {
int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
val = dma_readl(pd, CTL3);
mask_mode = DMA_CTL0_MODE_MASK_BITS <<
(DMA_CTL0_BITS_PER_CH * ch);
mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
(DMA_CTL0_BITS_PER_CH * ch));
val &= mask_mode;
if (pd_chan->dir == DMA_TO_DEVICE)
val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
DMA_CTL0_DIR_SHIFT_BITS);
else
val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch +
DMA_CTL0_DIR_SHIFT_BITS));
val |= mask_ctl;
dma_writel(pd, CTL3, val);
}
......@@ -236,33 +260,37 @@ static void pdc_set_mode(struct dma_chan *chan, u32 mode)
{
struct pch_dma *pd = to_pd(chan->device);
u32 val;
u32 mask_ctl;
u32 mask_dir;
if (chan->chan_id < 8) {
mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
(DMA_CTL0_BITS_PER_CH * chan->chan_id));
mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +\
DMA_CTL0_DIR_SHIFT_BITS);
val = dma_readl(pd, CTL0);
val &= ~(DMA_CTL0_MODE_MASK_BITS <<
(DMA_CTL0_BITS_PER_CH * chan->chan_id));
val &= mask_dir;
val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
val |= mask_ctl;
dma_writel(pd, CTL0, val);
} else {
int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
(DMA_CTL0_BITS_PER_CH * ch));
mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * ch +\
DMA_CTL0_DIR_SHIFT_BITS);
val = dma_readl(pd, CTL3);
val &= ~(DMA_CTL0_MODE_MASK_BITS <<
(DMA_CTL0_BITS_PER_CH * ch));
val &= mask_dir;
val |= mode << (DMA_CTL0_BITS_PER_CH * ch);
val |= mask_ctl;
dma_writel(pd, CTL3, val);
}
dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
chan->chan_id, val);
}
static u32 pdc_get_status(struct pch_dma_chan *pd_chan)
static u32 pdc_get_status0(struct pch_dma_chan *pd_chan)
{
struct pch_dma *pd = to_pd(pd_chan->chan.device);
u32 val;
......@@ -272,9 +300,27 @@ static u32 pdc_get_status(struct pch_dma_chan *pd_chan)
DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id));
}
static u32 pdc_get_status2(struct pch_dma_chan *pd_chan)
{
struct pch_dma *pd = to_pd(pd_chan->chan.device);
u32 val;
val = dma_readl(pd, STS2);
return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8)));
}
static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
{
if (pdc_get_status(pd_chan) == DMA_STATUS_IDLE)
u32 sts;
if (pd_chan->chan.chan_id < 8)
sts = pdc_get_status0(pd_chan);
else
sts = pdc_get_status2(pd_chan);
if (sts == DMA_STATUS_IDLE)
return true;
else
return false;
......@@ -495,11 +541,11 @@ static int pd_alloc_chan_resources(struct dma_chan *chan)
list_add_tail(&desc->desc_node, &tmp_list);
}
spin_lock_bh(&pd_chan->lock);
spin_lock_irq(&pd_chan->lock);
list_splice(&tmp_list, &pd_chan->free_list);
pd_chan->descs_allocated = i;
pd_chan->completed_cookie = chan->cookie = 1;
spin_unlock_bh(&pd_chan->lock);
spin_unlock_irq(&pd_chan->lock);
pdc_enable_irq(chan, 1);
......@@ -517,10 +563,10 @@ static void pd_free_chan_resources(struct dma_chan *chan)
BUG_ON(!list_empty(&pd_chan->active_list));
BUG_ON(!list_empty(&pd_chan->queue));
spin_lock_bh(&pd_chan->lock);
spin_lock_irq(&pd_chan->lock);
list_splice_init(&pd_chan->free_list, &tmp_list);
pd_chan->descs_allocated = 0;
spin_unlock_bh(&pd_chan->lock);
spin_unlock_irq(&pd_chan->lock);
list_for_each_entry_safe(desc, _d, &tmp_list, desc_node)
pci_pool_free(pd->pool, desc, desc->txd.phys);
......@@ -536,10 +582,10 @@ static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
dma_cookie_t last_completed;
int ret;
spin_lock_bh(&pd_chan->lock);
spin_lock_irq(&pd_chan->lock);
last_completed = pd_chan->completed_cookie;
last_used = chan->cookie;
spin_unlock_bh(&pd_chan->lock);
spin_unlock_irq(&pd_chan->lock);
ret = dma_async_is_complete(cookie, last_completed, last_used);
......@@ -654,7 +700,7 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
if (cmd != DMA_TERMINATE_ALL)
return -ENXIO;
spin_lock_bh(&pd_chan->lock);
spin_lock_irq(&pd_chan->lock);
pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
......@@ -664,7 +710,7 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
list_for_each_entry_safe(desc, _d, &list, desc_node)
pdc_chain_complete(pd_chan, desc);
spin_unlock_bh(&pd_chan->lock);
spin_unlock_irq(&pd_chan->lock);
return 0;
}
......@@ -693,30 +739,45 @@ static irqreturn_t pd_irq(int irq, void *devid)
struct pch_dma *pd = (struct pch_dma *)devid;
struct pch_dma_chan *pd_chan;
u32 sts0;
u32 sts2;
int i;
int ret = IRQ_NONE;
int ret0 = IRQ_NONE;
int ret2 = IRQ_NONE;
sts0 = dma_readl(pd, STS0);
sts2 = dma_readl(pd, STS2);
dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0);
for (i = 0; i < pd->dma.chancnt; i++) {
pd_chan = &pd->channels[i];
if (sts0 & DMA_STATUS_IRQ(i)) {
if (sts0 & DMA_STATUS_ERR(i))
set_bit(0, &pd_chan->err_status);
if (i < 8) {
if (sts0 & DMA_STATUS_IRQ(i)) {
if (sts0 & DMA_STATUS0_ERR(i))
set_bit(0, &pd_chan->err_status);
tasklet_schedule(&pd_chan->tasklet);
ret = IRQ_HANDLED;
}
tasklet_schedule(&pd_chan->tasklet);
ret0 = IRQ_HANDLED;
}
} else {
if (sts2 & DMA_STATUS_IRQ(i - 8)) {
if (sts2 & DMA_STATUS2_ERR(i))
set_bit(0, &pd_chan->err_status);
tasklet_schedule(&pd_chan->tasklet);
ret2 = IRQ_HANDLED;
}
}
}
/* clear interrupt bits in status register */
dma_writel(pd, STS0, sts0);
if (ret0)
dma_writel(pd, STS0, sts0);
if (ret2)
dma_writel(pd, STS2, sts2);
return ret;
return ret0 | ret2;
}
#ifdef CONFIG_PM
......
......@@ -82,7 +82,7 @@ struct dma_pl330_dmac {
spinlock_t pool_lock;
/* Peripheral channels connected to this DMAC */
struct dma_pl330_chan peripherals[0]; /* keep at end */
struct dma_pl330_chan *peripherals; /* keep at end */
};
struct dma_pl330_desc {
......@@ -451,8 +451,13 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
desc->txd.cookie = 0;
async_tx_ack(&desc->txd);
desc->req.rqtype = peri->rqtype;
desc->req.peri = peri->peri_id;
if (peri) {
desc->req.rqtype = peri->rqtype;
desc->req.peri = peri->peri_id;
} else {
desc->req.rqtype = MEMTOMEM;
desc->req.peri = 0;
}
dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
......@@ -529,10 +534,10 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
struct pl330_info *pi;
int burst;
if (unlikely(!pch || !len || !peri))
if (unlikely(!pch || !len))
return NULL;
if (peri->rqtype != MEMTOMEM)
if (peri && peri->rqtype != MEMTOMEM)
return NULL;
pi = &pch->dmac->pif;
......@@ -577,7 +582,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
int i, burst_size;
dma_addr_t addr;
if (unlikely(!pch || !sgl || !sg_len))
if (unlikely(!pch || !sgl || !sg_len || !peri))
return NULL;
/* Make sure the direction is consistent */
......@@ -666,17 +671,12 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
struct dma_device *pd;
struct resource *res;
int i, ret, irq;
int num_chan;
pdat = adev->dev.platform_data;
if (!pdat || !pdat->nr_valid_peri) {
dev_err(&adev->dev, "platform data missing\n");
return -ENODEV;
}
/* Allocate a new DMAC and its Channels */
pdmac = kzalloc(pdat->nr_valid_peri * sizeof(*pch)
+ sizeof(*pdmac), GFP_KERNEL);
pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
if (!pdmac) {
dev_err(&adev->dev, "unable to allocate mem\n");
return -ENOMEM;
......@@ -685,7 +685,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pi = &pdmac->pif;
pi->dev = &adev->dev;
pi->pl330_data = NULL;
pi->mcbufsz = pdat->mcbuf_sz;
pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
res = &adev->res;
request_mem_region(res->start, resource_size(res), "dma-pl330");
......@@ -717,27 +717,35 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
INIT_LIST_HEAD(&pd->channels);
/* Initialize channel parameters */
for (i = 0; i < pdat->nr_valid_peri; i++) {
struct dma_pl330_peri *peri = &pdat->peri[i];
pch = &pdmac->peripherals[i];
num_chan = max(pdat ? pdat->nr_valid_peri : 0, (u8)pi->pcfg.num_chan);
pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
switch (peri->rqtype) {
case MEMTOMEM:
for (i = 0; i < num_chan; i++) {
pch = &pdmac->peripherals[i];
if (pdat) {
struct dma_pl330_peri *peri = &pdat->peri[i];
switch (peri->rqtype) {
case MEMTOMEM:
dma_cap_set(DMA_MEMCPY, pd->cap_mask);
break;
case MEMTODEV:
case DEVTOMEM:
dma_cap_set(DMA_SLAVE, pd->cap_mask);
break;
default:
dev_err(&adev->dev, "DEVTODEV Not Supported\n");
continue;
}
pch->chan.private = peri;
} else {
dma_cap_set(DMA_MEMCPY, pd->cap_mask);
break;
case MEMTODEV:
case DEVTOMEM:
dma_cap_set(DMA_SLAVE, pd->cap_mask);
break;
default:
dev_err(&adev->dev, "DEVTODEV Not Supported\n");
continue;
pch->chan.private = NULL;
}
INIT_LIST_HEAD(&pch->work_list);
spin_lock_init(&pch->lock);
pch->pl330_chid = NULL;
pch->chan.private = peri;
pch->chan.device = pd;
pch->chan.chan_id = i;
pch->dmac = pdmac;
......
This diff is collapsed.
......@@ -184,9 +184,6 @@
#define D40_DREG_PERIPHID0 0xFE0
#define D40_DREG_PERIPHID1 0xFE4
#define D40_DREG_PERIPHID2 0xFE8
#define D40_DREG_PERIPHID2_REV_POS 4
#define D40_DREG_PERIPHID2_REV_MASK (0xf << D40_DREG_PERIPHID2_REV_POS)
#define D40_DREG_PERIPHID2_DESIGNER_MASK 0xf
#define D40_DREG_PERIPHID3 0xFEC
#define D40_DREG_CELLID0 0xFF0
#define D40_DREG_CELLID1 0xFF4
......
......@@ -172,8 +172,11 @@ struct pl08x_dma_chan {
int phychan_hold;
struct tasklet_struct tasklet;
char *name;
struct pl08x_channel_data *cd;
dma_addr_t runtime_addr;
const struct pl08x_channel_data *cd;
dma_addr_t src_addr;
dma_addr_t dst_addr;
u32 src_cctl;
u32 dst_cctl;
enum dma_data_direction runtime_direction;
dma_cookie_t lc;
struct list_head pend_list;
......@@ -202,7 +205,7 @@ struct pl08x_dma_chan {
* @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2
*/
struct pl08x_platform_data {
struct pl08x_channel_data *slave_channels;
const struct pl08x_channel_data *slave_channels;
unsigned int num_slave_channels;
struct pl08x_channel_data memcpy_channel;
int (*get_signal)(struct pl08x_dma_chan *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment