Commit e0c1d538 authored by Gustavo Pimentel's avatar Gustavo Pimentel Committed by Vinod Koul

dmaengine: dw-edma: Add support for the HDMA feature

Add support for the HDMA feature.

This new feature enables the current eDMA IP to use a deeper prefetch
of the linked list, which reduces the algorithm execution latency
observed when loading the elements of the list, causing more stable
and higher data transfer.
Signed-off-by: default avatarGustavo Pimentel <gustavo.pimentel@synopsys.com>
Link: https://lore.kernel.org/r/5f40f89ef7d6255a12d5b23f34e6e59dcd28861e.1613674948.git.gustavo.pimentel@synopsys.comSigned-off-by: default avatarVinod Koul <vkoul@kernel.org>
parent b79f1751
...@@ -21,9 +21,10 @@ enum dw_edma_dir { ...@@ -21,9 +21,10 @@ enum dw_edma_dir {
EDMA_DIR_READ EDMA_DIR_READ
}; };
enum dw_edma_mode { enum dw_edma_map_format {
EDMA_MODE_LEGACY = 0, EDMA_MF_EDMA_LEGACY = 0x0,
EDMA_MODE_UNROLL EDMA_MF_EDMA_UNROLL = 0x1,
EDMA_MF_HDMA_COMPAT = 0x5
}; };
enum dw_edma_request { enum dw_edma_request {
...@@ -123,8 +124,7 @@ struct dw_edma { ...@@ -123,8 +124,7 @@ struct dw_edma {
struct dw_edma_irq *irq; struct dw_edma_irq *irq;
int nr_irqs; int nr_irqs;
u32 version; enum dw_edma_map_format mf;
enum dw_edma_mode mode;
struct dw_edma_chan *chan; struct dw_edma_chan *chan;
const struct dw_edma_core_ops *ops; const struct dw_edma_core_ops *ops;
......
...@@ -30,8 +30,7 @@ struct dw_edma_pcie_data { ...@@ -30,8 +30,7 @@ struct dw_edma_pcie_data {
off_t dt_off; off_t dt_off;
size_t dt_sz; size_t dt_sz;
/* Other */ /* Other */
u32 version; enum dw_edma_map_format mf;
enum dw_edma_mode mode;
u8 irqs; u8 irqs;
}; };
...@@ -49,8 +48,7 @@ static const struct dw_edma_pcie_data snps_edda_data = { ...@@ -49,8 +48,7 @@ static const struct dw_edma_pcie_data snps_edda_data = {
.dt_off = 0x00800000, /* 8 Mbytes */ .dt_off = 0x00800000, /* 8 Mbytes */
.dt_sz = 0x03800000, /* 56 Mbytes */ .dt_sz = 0x03800000, /* 56 Mbytes */
/* Other */ /* Other */
.version = 0, .mf = EDMA_MF_EDMA_UNROLL,
.mode = EDMA_MODE_UNROLL,
.irqs = 1, .irqs = 1,
}; };
...@@ -69,8 +67,8 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev, ...@@ -69,8 +67,8 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
const struct dw_edma_pcie_data *pdata = (void *)pid->driver_data; const struct dw_edma_pcie_data *pdata = (void *)pid->driver_data;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct dw_edma_chip *chip; struct dw_edma_chip *chip;
int err, nr_irqs;
struct dw_edma *dw; struct dw_edma *dw;
int err, nr_irqs;
/* Enable PCI device */ /* Enable PCI device */
err = pcim_enable_device(pdev); err = pcim_enable_device(pdev);
...@@ -157,16 +155,19 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev, ...@@ -157,16 +155,19 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
dw->dt_region.paddr += pdata->dt_off; dw->dt_region.paddr += pdata->dt_off;
dw->dt_region.sz = pdata->dt_sz; dw->dt_region.sz = pdata->dt_sz;
dw->version = pdata->version; dw->mf = pdata->mf;
dw->mode = pdata->mode;
dw->nr_irqs = nr_irqs; dw->nr_irqs = nr_irqs;
dw->ops = &dw_edma_pcie_core_ops; dw->ops = &dw_edma_pcie_core_ops;
/* Debug info */ /* Debug info */
pci_dbg(pdev, "Version:\t%u\n", dw->version); if (dw->mf == EDMA_MF_EDMA_LEGACY)
pci_dbg(pdev, "Version:\teDMA Port Logic (0x%x)\n", dw->mf);
pci_dbg(pdev, "Mode:\t%s\n", else if (dw->mf == EDMA_MF_EDMA_UNROLL)
dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll"); pci_dbg(pdev, "Version:\teDMA Unroll (0x%x)\n", dw->mf);
else if (dw->mf == EDMA_MF_HDMA_COMPAT)
pci_dbg(pdev, "Version:\tHDMA Compatible (0x%x)\n", dw->mf);
else
pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", dw->mf);
pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n", pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
pdata->rg_bar, pdata->rg_off, pdata->rg_sz, pdata->rg_bar, pdata->rg_off, pdata->rg_sz,
......
...@@ -96,7 +96,7 @@ static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw) ...@@ -96,7 +96,7 @@ static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
static inline struct dw_edma_v0_ch_regs __iomem * static inline struct dw_edma_v0_ch_regs __iomem *
__dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch) __dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
{ {
if (dw->mode == EDMA_MODE_LEGACY) if (dw->mf == EDMA_MF_EDMA_LEGACY)
return &(__dw_regs(dw)->type.legacy.ch); return &(__dw_regs(dw)->type.legacy.ch);
if (dir == EDMA_DIR_WRITE) if (dir == EDMA_DIR_WRITE)
...@@ -108,7 +108,7 @@ __dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch) ...@@ -108,7 +108,7 @@ __dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
static inline void writel_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, static inline void writel_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
u32 value, void __iomem *addr) u32 value, void __iomem *addr)
{ {
if (dw->mode == EDMA_MODE_LEGACY) { if (dw->mf == EDMA_MF_EDMA_LEGACY) {
u32 viewport_sel; u32 viewport_sel;
unsigned long flags; unsigned long flags;
...@@ -133,7 +133,7 @@ static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, ...@@ -133,7 +133,7 @@ static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
{ {
u32 value; u32 value;
if (dw->mode == EDMA_MODE_LEGACY) { if (dw->mf == EDMA_MF_EDMA_LEGACY) {
u32 viewport_sel; u32 viewport_sel;
unsigned long flags; unsigned long flags;
...@@ -365,6 +365,42 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) ...@@ -365,6 +365,42 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
if (first) { if (first) {
/* Enable engine */ /* Enable engine */
SET_RW_32(dw, chan->dir, engine_en, BIT(0)); SET_RW_32(dw, chan->dir, engine_en, BIT(0));
if (dw->mf == EDMA_MF_HDMA_COMPAT) {
switch (chan->id) {
case 0:
SET_RW_COMPAT(dw, chan->dir, ch0_pwr_en,
BIT(0));
break;
case 1:
SET_RW_COMPAT(dw, chan->dir, ch1_pwr_en,
BIT(0));
break;
case 2:
SET_RW_COMPAT(dw, chan->dir, ch2_pwr_en,
BIT(0));
break;
case 3:
SET_RW_COMPAT(dw, chan->dir, ch3_pwr_en,
BIT(0));
break;
case 4:
SET_RW_COMPAT(dw, chan->dir, ch4_pwr_en,
BIT(0));
break;
case 5:
SET_RW_COMPAT(dw, chan->dir, ch5_pwr_en,
BIT(0));
break;
case 6:
SET_RW_COMPAT(dw, chan->dir, ch6_pwr_en,
BIT(0));
break;
case 7:
SET_RW_COMPAT(dw, chan->dir, ch7_pwr_en,
BIT(0));
break;
}
}
/* Interrupt unmask - done, abort */ /* Interrupt unmask - done, abort */
tmp = GET_RW_32(dw, chan->dir, int_mask); tmp = GET_RW_32(dw, chan->dir, int_mask);
tmp &= ~FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)); tmp &= ~FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id));
......
...@@ -55,7 +55,7 @@ struct debugfs_entries { ...@@ -55,7 +55,7 @@ struct debugfs_entries {
static int dw_edma_debugfs_u32_get(void *data, u64 *val) static int dw_edma_debugfs_u32_get(void *data, u64 *val)
{ {
void __iomem *reg = (void __force __iomem *)data; void __iomem *reg = (void __force __iomem *)data;
if (dw->mode == EDMA_MODE_LEGACY && if (dw->mf == EDMA_MF_EDMA_LEGACY &&
reg >= (void __iomem *)&regs->type.legacy.ch) { reg >= (void __iomem *)&regs->type.legacy.ch) {
void __iomem *ptr = &regs->type.legacy.ch; void __iomem *ptr = &regs->type.legacy.ch;
u32 viewport_sel = 0; u32 viewport_sel = 0;
...@@ -174,7 +174,7 @@ static void dw_edma_debugfs_regs_wr(struct dentry *dir) ...@@ -174,7 +174,7 @@ static void dw_edma_debugfs_regs_wr(struct dentry *dir)
nr_entries = ARRAY_SIZE(debugfs_regs); nr_entries = ARRAY_SIZE(debugfs_regs);
dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir); dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
if (dw->mode == EDMA_MODE_UNROLL) { if (dw->mf == EDMA_MF_HDMA_COMPAT) {
nr_entries = ARRAY_SIZE(debugfs_unroll_regs); nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries, dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
regs_dir); regs_dir);
...@@ -243,7 +243,7 @@ static void dw_edma_debugfs_regs_rd(struct dentry *dir) ...@@ -243,7 +243,7 @@ static void dw_edma_debugfs_regs_rd(struct dentry *dir)
nr_entries = ARRAY_SIZE(debugfs_regs); nr_entries = ARRAY_SIZE(debugfs_regs);
dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir); dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
if (dw->mode == EDMA_MODE_UNROLL) { if (dw->mf == EDMA_MF_HDMA_COMPAT) {
nr_entries = ARRAY_SIZE(debugfs_unroll_regs); nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries, dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
regs_dir); regs_dir);
...@@ -297,8 +297,7 @@ void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip) ...@@ -297,8 +297,7 @@ void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
if (!base_dir) if (!base_dir)
return; return;
debugfs_create_u32("version", 0444, base_dir, &dw->version); debugfs_create_u32("mf", 0444, base_dir, &dw->mf);
debugfs_create_u32("mode", 0444, base_dir, &dw->mode);
debugfs_create_u16("wr_ch_cnt", 0444, base_dir, &dw->wr_ch_cnt); debugfs_create_u16("wr_ch_cnt", 0444, base_dir, &dw->wr_ch_cnt);
debugfs_create_u16("rd_ch_cnt", 0444, base_dir, &dw->rd_ch_cnt); debugfs_create_u16("rd_ch_cnt", 0444, base_dir, &dw->rd_ch_cnt);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment