Commit 9da5bb24 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-fix-5.3-rc5' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine fixes from Vinod Koul:
 "Fixes in dmaengine drivers for:

   - dw-edma: endianess, _iomem type and stack usages

   - ste_dma40: unneeded variable and null-pointer dereference

   - tegra210-adma: unused function

   - omap-dma: off-by-one fix"

* tag 'dmaengine-fix-5.3-rc5' of git://git.infradead.org/users/vkoul/slave-dma:
  omap-dma/omap_vout_vrfb: fix off-by-one fi value
  dmaengine: stm32-mdma: Fix a possible null-pointer dereference in stm32_mdma_irq_handler()
  dmaengine: tegra210-adma: Fix unused function warnings
  dmaengine: ste_dma40: fix unneeded variable warning
  dmaengine: dw-edma: fix endianess confusion
  dmaengine: dw-edma: fix __iomem type confusion
  dmaengine: dw-edma: fix unnecessary stack usage
parents cfa0bb2a d555c343
...@@ -50,7 +50,7 @@ struct dw_edma_burst { ...@@ -50,7 +50,7 @@ struct dw_edma_burst {
struct dw_edma_region { struct dw_edma_region {
phys_addr_t paddr; phys_addr_t paddr;
dma_addr_t vaddr; void __iomem *vaddr;
size_t sz; size_t sz;
}; };
......
...@@ -130,19 +130,19 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev, ...@@ -130,19 +130,19 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
chip->id = pdev->devfn; chip->id = pdev->devfn;
chip->irq = pdev->irq; chip->irq = pdev->irq;
dw->rg_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->rg_bar]; dw->rg_region.vaddr = pcim_iomap_table(pdev)[pdata->rg_bar];
dw->rg_region.vaddr += pdata->rg_off; dw->rg_region.vaddr += pdata->rg_off;
dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start; dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start;
dw->rg_region.paddr += pdata->rg_off; dw->rg_region.paddr += pdata->rg_off;
dw->rg_region.sz = pdata->rg_sz; dw->rg_region.sz = pdata->rg_sz;
dw->ll_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->ll_bar]; dw->ll_region.vaddr = pcim_iomap_table(pdev)[pdata->ll_bar];
dw->ll_region.vaddr += pdata->ll_off; dw->ll_region.vaddr += pdata->ll_off;
dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start; dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start;
dw->ll_region.paddr += pdata->ll_off; dw->ll_region.paddr += pdata->ll_off;
dw->ll_region.sz = pdata->ll_sz; dw->ll_region.sz = pdata->ll_sz;
dw->dt_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->dt_bar]; dw->dt_region.vaddr = pcim_iomap_table(pdev)[pdata->dt_bar];
dw->dt_region.vaddr += pdata->dt_off; dw->dt_region.vaddr += pdata->dt_off;
dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start; dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start;
dw->dt_region.paddr += pdata->dt_off; dw->dt_region.paddr += pdata->dt_off;
...@@ -158,17 +158,17 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev, ...@@ -158,17 +158,17 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
pci_dbg(pdev, "Mode:\t%s\n", pci_dbg(pdev, "Mode:\t%s\n",
dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll"); dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll");
pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n", pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
pdata->rg_bar, pdata->rg_off, pdata->rg_sz, pdata->rg_bar, pdata->rg_off, pdata->rg_sz,
&dw->rg_region.vaddr, &dw->rg_region.paddr); dw->rg_region.vaddr, &dw->rg_region.paddr);
pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n", pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
pdata->ll_bar, pdata->ll_off, pdata->ll_sz, pdata->ll_bar, pdata->ll_off, pdata->ll_sz,
&dw->ll_region.vaddr, &dw->ll_region.paddr); dw->ll_region.vaddr, &dw->ll_region.paddr);
pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n", pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
pdata->dt_bar, pdata->dt_off, pdata->dt_sz, pdata->dt_bar, pdata->dt_off, pdata->dt_sz,
&dw->dt_region.vaddr, &dw->dt_region.paddr); dw->dt_region.vaddr, &dw->dt_region.paddr);
pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs); pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs);
......
...@@ -25,7 +25,7 @@ enum dw_edma_control { ...@@ -25,7 +25,7 @@ enum dw_edma_control {
static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw) static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
{ {
return (struct dw_edma_v0_regs __iomem *)dw->rg_region.vaddr; return dw->rg_region.vaddr;
} }
#define SET(dw, name, value) \ #define SET(dw, name, value) \
...@@ -192,13 +192,12 @@ u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir) ...@@ -192,13 +192,12 @@ u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk) static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
{ {
struct dw_edma_burst *child; struct dw_edma_burst *child;
struct dw_edma_v0_lli *lli; struct dw_edma_v0_lli __iomem *lli;
struct dw_edma_v0_llp *llp; struct dw_edma_v0_llp __iomem *llp;
u32 control = 0, i = 0; u32 control = 0, i = 0;
u64 sar, dar, addr;
int j; int j;
lli = (struct dw_edma_v0_lli *)chunk->ll_region.vaddr; lli = chunk->ll_region.vaddr;
if (chunk->cb) if (chunk->cb)
control = DW_EDMA_V0_CB; control = DW_EDMA_V0_CB;
...@@ -214,17 +213,15 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk) ...@@ -214,17 +213,15 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
/* Transfer size */ /* Transfer size */
SET_LL(&lli[i].transfer_size, child->sz); SET_LL(&lli[i].transfer_size, child->sz);
/* SAR - low, high */ /* SAR - low, high */
sar = cpu_to_le64(child->sar); SET_LL(&lli[i].sar_low, lower_32_bits(child->sar));
SET_LL(&lli[i].sar_low, lower_32_bits(sar)); SET_LL(&lli[i].sar_high, upper_32_bits(child->sar));
SET_LL(&lli[i].sar_high, upper_32_bits(sar));
/* DAR - low, high */ /* DAR - low, high */
dar = cpu_to_le64(child->dar); SET_LL(&lli[i].dar_low, lower_32_bits(child->dar));
SET_LL(&lli[i].dar_low, lower_32_bits(dar)); SET_LL(&lli[i].dar_high, upper_32_bits(child->dar));
SET_LL(&lli[i].dar_high, upper_32_bits(dar));
i++; i++;
} }
llp = (struct dw_edma_v0_llp *)&lli[i]; llp = (void __iomem *)&lli[i];
control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB; control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB;
if (!chunk->cb) if (!chunk->cb)
control |= DW_EDMA_V0_CB; control |= DW_EDMA_V0_CB;
...@@ -232,9 +229,8 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk) ...@@ -232,9 +229,8 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
/* Channel control */ /* Channel control */
SET_LL(&llp->control, control); SET_LL(&llp->control, control);
/* Linked list - low, high */ /* Linked list - low, high */
addr = cpu_to_le64(chunk->ll_region.paddr); SET_LL(&llp->llp_low, lower_32_bits(chunk->ll_region.paddr));
SET_LL(&llp->llp_low, lower_32_bits(addr)); SET_LL(&llp->llp_high, upper_32_bits(chunk->ll_region.paddr));
SET_LL(&llp->llp_high, upper_32_bits(addr));
} }
void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
...@@ -242,7 +238,6 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) ...@@ -242,7 +238,6 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
struct dw_edma_chan *chan = chunk->chan; struct dw_edma_chan *chan = chunk->chan;
struct dw_edma *dw = chan->chip->dw; struct dw_edma *dw = chan->chip->dw;
u32 tmp; u32 tmp;
u64 llp;
dw_edma_v0_core_write_chunk(chunk); dw_edma_v0_core_write_chunk(chunk);
...@@ -262,9 +257,10 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) ...@@ -262,9 +257,10 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
SET_CH(dw, chan->dir, chan->id, ch_control1, SET_CH(dw, chan->dir, chan->id, ch_control1,
(DW_EDMA_V0_CCS | DW_EDMA_V0_LLE)); (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
/* Linked list - low, high */ /* Linked list - low, high */
llp = cpu_to_le64(chunk->ll_region.paddr); SET_CH(dw, chan->dir, chan->id, llp_low,
SET_CH(dw, chan->dir, chan->id, llp_low, lower_32_bits(llp)); lower_32_bits(chunk->ll_region.paddr));
SET_CH(dw, chan->dir, chan->id, llp_high, upper_32_bits(llp)); SET_CH(dw, chan->dir, chan->id, llp_high,
upper_32_bits(chunk->ll_region.paddr));
} }
/* Doorbell */ /* Doorbell */
SET_RW(dw, chan->dir, doorbell, SET_RW(dw, chan->dir, doorbell,
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include "dw-edma-core.h" #include "dw-edma-core.h"
#define REGS_ADDR(name) \ #define REGS_ADDR(name) \
((dma_addr_t *)&regs->name) ((void __force *)&regs->name)
#define REGISTER(name) \ #define REGISTER(name) \
{ #name, REGS_ADDR(name) } { #name, REGS_ADDR(name) }
...@@ -40,36 +40,37 @@ ...@@ -40,36 +40,37 @@
static struct dentry *base_dir; static struct dentry *base_dir;
static struct dw_edma *dw; static struct dw_edma *dw;
static struct dw_edma_v0_regs *regs; static struct dw_edma_v0_regs __iomem *regs;
static struct { static struct {
void *start; void __iomem *start;
void *end; void __iomem *end;
} lim[2][EDMA_V0_MAX_NR_CH]; } lim[2][EDMA_V0_MAX_NR_CH];
struct debugfs_entries { struct debugfs_entries {
char name[24]; const char *name;
dma_addr_t *reg; dma_addr_t *reg;
}; };
static int dw_edma_debugfs_u32_get(void *data, u64 *val) static int dw_edma_debugfs_u32_get(void *data, u64 *val)
{ {
void __iomem *reg = (void __force __iomem *)data;
if (dw->mode == EDMA_MODE_LEGACY && if (dw->mode == EDMA_MODE_LEGACY &&
data >= (void *)&regs->type.legacy.ch) { reg >= (void __iomem *)&regs->type.legacy.ch) {
void *ptr = (void *)&regs->type.legacy.ch; void __iomem *ptr = &regs->type.legacy.ch;
u32 viewport_sel = 0; u32 viewport_sel = 0;
unsigned long flags; unsigned long flags;
u16 ch; u16 ch;
for (ch = 0; ch < dw->wr_ch_cnt; ch++) for (ch = 0; ch < dw->wr_ch_cnt; ch++)
if (lim[0][ch].start >= data && data < lim[0][ch].end) { if (lim[0][ch].start >= reg && reg < lim[0][ch].end) {
ptr += (data - lim[0][ch].start); ptr += (reg - lim[0][ch].start);
goto legacy_sel_wr; goto legacy_sel_wr;
} }
for (ch = 0; ch < dw->rd_ch_cnt; ch++) for (ch = 0; ch < dw->rd_ch_cnt; ch++)
if (lim[1][ch].start >= data && data < lim[1][ch].end) { if (lim[1][ch].start >= reg && reg < lim[1][ch].end) {
ptr += (data - lim[1][ch].start); ptr += (reg - lim[1][ch].start);
goto legacy_sel_rd; goto legacy_sel_rd;
} }
...@@ -86,7 +87,7 @@ static int dw_edma_debugfs_u32_get(void *data, u64 *val) ...@@ -86,7 +87,7 @@ static int dw_edma_debugfs_u32_get(void *data, u64 *val)
raw_spin_unlock_irqrestore(&dw->lock, flags); raw_spin_unlock_irqrestore(&dw->lock, flags);
} else { } else {
*val = readl(data); *val = readl(reg);
} }
return 0; return 0;
...@@ -105,7 +106,7 @@ static void dw_edma_debugfs_create_x32(const struct debugfs_entries entries[], ...@@ -105,7 +106,7 @@ static void dw_edma_debugfs_create_x32(const struct debugfs_entries entries[],
} }
} }
static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs *regs, static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs __iomem *regs,
struct dentry *dir) struct dentry *dir)
{ {
int nr_entries; int nr_entries;
...@@ -288,7 +289,7 @@ void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip) ...@@ -288,7 +289,7 @@ void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
if (!dw) if (!dw)
return; return;
regs = (struct dw_edma_v0_regs *)dw->rg_region.vaddr; regs = dw->rg_region.vaddr;
if (!regs) if (!regs)
return; return;
......
...@@ -142,7 +142,7 @@ enum d40_events { ...@@ -142,7 +142,7 @@ enum d40_events {
* when the DMA hw is powered off. * when the DMA hw is powered off.
* TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works. * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
*/ */
static u32 d40_backup_regs[] = { static __maybe_unused u32 d40_backup_regs[] = {
D40_DREG_LCPA, D40_DREG_LCPA,
D40_DREG_LCLA, D40_DREG_LCLA,
D40_DREG_PRMSE, D40_DREG_PRMSE,
...@@ -211,7 +211,7 @@ static u32 d40_backup_regs_v4b[] = { ...@@ -211,7 +211,7 @@ static u32 d40_backup_regs_v4b[] = {
#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b) #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
static u32 d40_backup_regs_chan[] = { static __maybe_unused u32 d40_backup_regs_chan[] = {
D40_CHAN_REG_SSCFG, D40_CHAN_REG_SSCFG,
D40_CHAN_REG_SSELT, D40_CHAN_REG_SSELT,
D40_CHAN_REG_SSPTR, D40_CHAN_REG_SSPTR,
......
...@@ -1366,7 +1366,7 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid) ...@@ -1366,7 +1366,7 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
chan = &dmadev->chan[id]; chan = &dmadev->chan[id];
if (!chan) { if (!chan) {
dev_err(chan2dev(chan), "MDMA channel not initialized\n"); dev_dbg(mdma2dev(dmadev), "MDMA channel not initialized\n");
goto exit; goto exit;
} }
......
...@@ -712,7 +712,7 @@ static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec, ...@@ -712,7 +712,7 @@ static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
return chan; return chan;
} }
static int tegra_adma_runtime_suspend(struct device *dev) static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev)
{ {
struct tegra_adma *tdma = dev_get_drvdata(dev); struct tegra_adma *tdma = dev_get_drvdata(dev);
struct tegra_adma_chan_regs *ch_reg; struct tegra_adma_chan_regs *ch_reg;
...@@ -744,7 +744,7 @@ static int tegra_adma_runtime_suspend(struct device *dev) ...@@ -744,7 +744,7 @@ static int tegra_adma_runtime_suspend(struct device *dev)
return 0; return 0;
} }
static int tegra_adma_runtime_resume(struct device *dev) static int __maybe_unused tegra_adma_runtime_resume(struct device *dev)
{ {
struct tegra_adma *tdma = dev_get_drvdata(dev); struct tegra_adma *tdma = dev_get_drvdata(dev);
struct tegra_adma_chan_regs *ch_reg; struct tegra_adma_chan_regs *ch_reg;
......
...@@ -1234,7 +1234,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved( ...@@ -1234,7 +1234,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
if (src_icg) { if (src_icg) {
d->ccr |= CCR_SRC_AMODE_DBLIDX; d->ccr |= CCR_SRC_AMODE_DBLIDX;
d->ei = 1; d->ei = 1;
d->fi = src_icg; d->fi = src_icg + 1;
} else if (xt->src_inc) { } else if (xt->src_inc) {
d->ccr |= CCR_SRC_AMODE_POSTINC; d->ccr |= CCR_SRC_AMODE_POSTINC;
d->fi = 0; d->fi = 0;
...@@ -1249,7 +1249,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved( ...@@ -1249,7 +1249,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
if (dst_icg) { if (dst_icg) {
d->ccr |= CCR_DST_AMODE_DBLIDX; d->ccr |= CCR_DST_AMODE_DBLIDX;
sg->ei = 1; sg->ei = 1;
sg->fi = dst_icg; sg->fi = dst_icg + 1;
} else if (xt->dst_inc) { } else if (xt->dst_inc) {
d->ccr |= CCR_DST_AMODE_POSTINC; d->ccr |= CCR_DST_AMODE_POSTINC;
sg->fi = 0; sg->fi = 0;
......
...@@ -253,8 +253,7 @@ int omap_vout_prepare_vrfb(struct omap_vout_device *vout, ...@@ -253,8 +253,7 @@ int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
*/ */
pixsize = vout->bpp * vout->vrfb_bpp; pixsize = vout->bpp * vout->vrfb_bpp;
dst_icg = ((MAX_PIXELS_PER_LINE * pixsize) - dst_icg = MAX_PIXELS_PER_LINE * pixsize - vout->pix.width * vout->bpp;
(vout->pix.width * vout->bpp)) + 1;
xt->src_start = vout->buf_phy_addr[vb->i]; xt->src_start = vout->buf_phy_addr[vb->i];
xt->dst_start = vout->vrfb_context[vb->i].paddr[0]; xt->dst_start = vout->vrfb_context[vb->i].paddr[0];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment