Commit 6c5e6a39 authored by Vinod Koul's avatar Vinod Koul

Merge tag 'ux500-dma40' of //git.linaro.org/people/fabiobaltieri/linux.git

Pull ste_dma40 fixes from Fabio
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
parents 77bcc497 da2ac56a
...@@ -53,6 +53,8 @@ ...@@ -53,6 +53,8 @@
#define D40_ALLOC_PHY (1 << 30) #define D40_ALLOC_PHY (1 << 30)
#define D40_ALLOC_LOG_FREE 0 #define D40_ALLOC_LOG_FREE 0
#define MAX(a, b) (((a) < (b)) ? (b) : (a))
/** /**
* enum 40_command - The different commands and/or statuses. * enum 40_command - The different commands and/or statuses.
* *
...@@ -100,8 +102,19 @@ static u32 d40_backup_regs[] = { ...@@ -100,8 +102,19 @@ static u32 d40_backup_regs[] = {
#define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs) #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
/* TODO: Check if all these registers have to be saved/restored on dma40 v3 */ /*
static u32 d40_backup_regs_v3[] = { * since 9540 and 8540 has the same HW revision
* use v4a for 9540 or ealier
* use v4b for 8540 or later
* HW revision:
* DB8500ed has revision 0
* DB8500v1 has revision 2
* DB8500v2 has revision 3
* AP9540v1 has revision 4
* DB8540v1 has revision 4
* TODO: Check if all these registers have to be saved/restored on dma40 v4a
*/
static u32 d40_backup_regs_v4a[] = {
D40_DREG_PSEG1, D40_DREG_PSEG1,
D40_DREG_PSEG2, D40_DREG_PSEG2,
D40_DREG_PSEG3, D40_DREG_PSEG3,
...@@ -120,7 +133,32 @@ static u32 d40_backup_regs_v3[] = { ...@@ -120,7 +133,32 @@ static u32 d40_backup_regs_v3[] = {
D40_DREG_RCEG4, D40_DREG_RCEG4,
}; };
#define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3) #define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a)
static u32 d40_backup_regs_v4b[] = {
D40_DREG_CPSEG1,
D40_DREG_CPSEG2,
D40_DREG_CPSEG3,
D40_DREG_CPSEG4,
D40_DREG_CPSEG5,
D40_DREG_CPCEG1,
D40_DREG_CPCEG2,
D40_DREG_CPCEG3,
D40_DREG_CPCEG4,
D40_DREG_CPCEG5,
D40_DREG_CRSEG1,
D40_DREG_CRSEG2,
D40_DREG_CRSEG3,
D40_DREG_CRSEG4,
D40_DREG_CRSEG5,
D40_DREG_CRCEG1,
D40_DREG_CRCEG2,
D40_DREG_CRCEG3,
D40_DREG_CRCEG4,
D40_DREG_CRCEG5,
};
#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
static u32 d40_backup_regs_chan[] = { static u32 d40_backup_regs_chan[] = {
D40_CHAN_REG_SSCFG, D40_CHAN_REG_SSCFG,
...@@ -133,6 +171,102 @@ static u32 d40_backup_regs_chan[] = { ...@@ -133,6 +171,102 @@ static u32 d40_backup_regs_chan[] = {
D40_CHAN_REG_SDLNK, D40_CHAN_REG_SDLNK,
}; };
/**
* struct d40_interrupt_lookup - lookup table for interrupt handler
*
* @src: Interrupt mask register.
* @clr: Interrupt clear register.
* @is_error: true if this is an error interrupt.
* @offset: start delta in the lookup_log_chans in d40_base. If equals to
* D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
*/
struct d40_interrupt_lookup {
u32 src;
u32 clr;
bool is_error;
int offset;
};
static struct d40_interrupt_lookup il_v4a[] = {
{D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
{D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
{D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
{D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
{D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
{D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
{D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
{D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
{D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
{D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
};
static struct d40_interrupt_lookup il_v4b[] = {
{D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false, 0},
{D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32},
{D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64},
{D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96},
{D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128},
{D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true, 0},
{D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true, 32},
{D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true, 64},
{D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true, 96},
{D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true, 128},
{D40_DREG_CPCTIS, D40_DREG_CPCICR, false, D40_PHY_CHAN},
{D40_DREG_CPCEIS, D40_DREG_CPCICR, true, D40_PHY_CHAN},
};
/**
* struct d40_reg_val - simple lookup struct
*
* @reg: The register.
* @val: The value that belongs to the register in reg.
*/
struct d40_reg_val {
unsigned int reg;
unsigned int val;
};
static __initdata struct d40_reg_val dma_init_reg_v4a[] = {
/* Clock every part of the DMA block from start */
{ .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
/* Interrupts on all logical channels */
{ .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
};
static __initdata struct d40_reg_val dma_init_reg_v4b[] = {
/* Clock every part of the DMA block from start */
{ .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
/* Interrupts on all logical channels */
{ .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF}
};
/** /**
* struct d40_lli_pool - Structure for keeping LLIs in memory * struct d40_lli_pool - Structure for keeping LLIs in memory
* *
...@@ -221,6 +355,7 @@ struct d40_lcla_pool { ...@@ -221,6 +355,7 @@ struct d40_lcla_pool {
* @allocated_dst: Same as for src but is dst. * @allocated_dst: Same as for src but is dst.
* allocated_dst and allocated_src uses the D40_ALLOC* defines as well as * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
* event line number. * event line number.
* @use_soft_lli: To mark if the linked lists of channel are managed by SW.
*/ */
struct d40_phy_res { struct d40_phy_res {
spinlock_t lock; spinlock_t lock;
...@@ -228,6 +363,7 @@ struct d40_phy_res { ...@@ -228,6 +363,7 @@ struct d40_phy_res {
int num; int num;
u32 allocated_src; u32 allocated_src;
u32 allocated_dst; u32 allocated_dst;
bool use_soft_lli;
}; };
struct d40_base; struct d40_base;
...@@ -248,6 +384,7 @@ struct d40_base; ...@@ -248,6 +384,7 @@ struct d40_base;
* @client: Cliented owned descriptor list. * @client: Cliented owned descriptor list.
* @pending_queue: Submitted jobs, to be issued by issue_pending() * @pending_queue: Submitted jobs, to be issued by issue_pending()
* @active: Active descriptor. * @active: Active descriptor.
* @done: Completed jobs
* @queue: Queued jobs. * @queue: Queued jobs.
* @prepare_queue: Prepared jobs. * @prepare_queue: Prepared jobs.
* @dma_cfg: The client configuration of this dma channel. * @dma_cfg: The client configuration of this dma channel.
...@@ -273,6 +410,7 @@ struct d40_chan { ...@@ -273,6 +410,7 @@ struct d40_chan {
struct list_head client; struct list_head client;
struct list_head pending_queue; struct list_head pending_queue;
struct list_head active; struct list_head active;
struct list_head done;
struct list_head queue; struct list_head queue;
struct list_head prepare_queue; struct list_head prepare_queue;
struct stedma40_chan_cfg dma_cfg; struct stedma40_chan_cfg dma_cfg;
...@@ -288,6 +426,38 @@ struct d40_chan { ...@@ -288,6 +426,38 @@ struct d40_chan {
enum dma_transfer_direction runtime_direction; enum dma_transfer_direction runtime_direction;
}; };
/**
* struct d40_gen_dmac - generic values to represent u8500/u8540 DMA
* controller
*
* @backup: the pointer to the registers address array for backup
* @backup_size: the size of the registers address array for backup
* @realtime_en: the realtime enable register
* @realtime_clear: the realtime clear register
* @high_prio_en: the high priority enable register
* @high_prio_clear: the high priority clear register
* @interrupt_en: the interrupt enable register
* @interrupt_clear: the interrupt clear register
* @il: the pointer to struct d40_interrupt_lookup
* @il_size: the size of d40_interrupt_lookup array
* @init_reg: the pointer to the struct d40_reg_val
* @init_reg_size: the size of d40_reg_val array
*/
struct d40_gen_dmac {
u32 *backup;
u32 backup_size;
u32 realtime_en;
u32 realtime_clear;
u32 high_prio_en;
u32 high_prio_clear;
u32 interrupt_en;
u32 interrupt_clear;
struct d40_interrupt_lookup *il;
u32 il_size;
struct d40_reg_val *init_reg;
u32 init_reg_size;
};
/** /**
* struct d40_base - The big global struct, one for each probe'd instance. * struct d40_base - The big global struct, one for each probe'd instance.
* *
...@@ -326,11 +496,13 @@ struct d40_chan { ...@@ -326,11 +496,13 @@ struct d40_chan {
* @desc_slab: cache for descriptors. * @desc_slab: cache for descriptors.
* @reg_val_backup: Here the values of some hardware registers are stored * @reg_val_backup: Here the values of some hardware registers are stored
* before the DMA is powered off. They are restored when the power is back on. * before the DMA is powered off. They are restored when the power is back on.
* @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and * @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and
* later. * later
* @reg_val_backup_chan: Backup data for standard channel parameter registers. * @reg_val_backup_chan: Backup data for standard channel parameter registers.
* @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off. * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
* @initialized: true if the dma has been initialized * @initialized: true if the dma has been initialized
* @gen_dmac: the struct for generic registers values to represent u8500/8540
* DMA controller
*/ */
struct d40_base { struct d40_base {
spinlock_t interrupt_lock; spinlock_t interrupt_lock;
...@@ -344,6 +516,7 @@ struct d40_base { ...@@ -344,6 +516,7 @@ struct d40_base {
int irq; int irq;
int num_phy_chans; int num_phy_chans;
int num_log_chans; int num_log_chans;
struct device_dma_parameters dma_parms;
struct dma_device dma_both; struct dma_device dma_both;
struct dma_device dma_slave; struct dma_device dma_slave;
struct dma_device dma_memcpy; struct dma_device dma_memcpy;
...@@ -361,37 +534,11 @@ struct d40_base { ...@@ -361,37 +534,11 @@ struct d40_base {
resource_size_t lcpa_size; resource_size_t lcpa_size;
struct kmem_cache *desc_slab; struct kmem_cache *desc_slab;
u32 reg_val_backup[BACKUP_REGS_SZ]; u32 reg_val_backup[BACKUP_REGS_SZ];
u32 reg_val_backup_v3[BACKUP_REGS_SZ_V3]; u32 reg_val_backup_v4[MAX(BACKUP_REGS_SZ_V4A, BACKUP_REGS_SZ_V4B)];
u32 *reg_val_backup_chan; u32 *reg_val_backup_chan;
u16 gcc_pwr_off_mask; u16 gcc_pwr_off_mask;
bool initialized; bool initialized;
}; struct d40_gen_dmac gen_dmac;
/**
* struct d40_interrupt_lookup - lookup table for interrupt handler
*
* @src: Interrupt mask register.
* @clr: Interrupt clear register.
* @is_error: true if this is an error interrupt.
* @offset: start delta in the lookup_log_chans in d40_base. If equals to
* D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
*/
struct d40_interrupt_lookup {
u32 src;
u32 clr;
bool is_error;
int offset;
};
/**
* struct d40_reg_val - simple lookup struct
*
* @reg: The register.
* @val: The value that belongs to the register in reg.
*/
struct d40_reg_val {
unsigned int reg;
unsigned int val;
}; };
static struct device *chan2dev(struct d40_chan *d40c) static struct device *chan2dev(struct d40_chan *d40c)
...@@ -494,19 +641,18 @@ static int d40_lcla_alloc_one(struct d40_chan *d40c, ...@@ -494,19 +641,18 @@ static int d40_lcla_alloc_one(struct d40_chan *d40c,
unsigned long flags; unsigned long flags;
int i; int i;
int ret = -EINVAL; int ret = -EINVAL;
int p;
spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP;
/* /*
* Allocate both src and dst at the same time, therefore the half * Allocate both src and dst at the same time, therefore the half
* start on 1 since 0 can't be used since zero is used as end marker. * start on 1 since 0 can't be used since zero is used as end marker.
*/ */
for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
if (!d40c->base->lcla_pool.alloc_map[p + i]) { int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
d40c->base->lcla_pool.alloc_map[p + i] = d40d;
if (!d40c->base->lcla_pool.alloc_map[idx]) {
d40c->base->lcla_pool.alloc_map[idx] = d40d;
d40d->lcla_alloc++; d40d->lcla_alloc++;
ret = i; ret = i;
break; break;
...@@ -531,10 +677,10 @@ static int d40_lcla_free_all(struct d40_chan *d40c, ...@@ -531,10 +677,10 @@ static int d40_lcla_free_all(struct d40_chan *d40c,
spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num * int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) {
d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num * if (d40c->base->lcla_pool.alloc_map[idx] == d40d) {
D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL; d40c->base->lcla_pool.alloc_map[idx] = NULL;
d40d->lcla_alloc--; d40d->lcla_alloc--;
if (d40d->lcla_alloc == 0) { if (d40d->lcla_alloc == 0) {
ret = 0; ret = 0;
...@@ -611,6 +757,11 @@ static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc) ...@@ -611,6 +757,11 @@ static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK); writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
} }
static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc)
{
list_add_tail(&desc->node, &d40c->done);
}
static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
{ {
struct d40_lcla_pool *pool = &chan->base->lcla_pool; struct d40_lcla_pool *pool = &chan->base->lcla_pool;
...@@ -634,7 +785,16 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) ...@@ -634,7 +785,16 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
* can't link back to the one in LCPA space * can't link back to the one in LCPA space
*/ */
if (linkback || (lli_len - lli_current > 1)) { if (linkback || (lli_len - lli_current > 1)) {
/*
* If the channel is expected to use only soft_lli don't
* allocate a lcla. This is to avoid a HW issue that exists
* in some controller during a peripheral to memory transfer
* that uses linked lists.
*/
if (!(chan->phy_chan->use_soft_lli &&
chan->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM))
curr_lcla = d40_lcla_alloc_one(chan, desc); curr_lcla = d40_lcla_alloc_one(chan, desc);
first_lcla = curr_lcla; first_lcla = curr_lcla;
} }
...@@ -771,6 +931,14 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c) ...@@ -771,6 +931,14 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
return d; return d;
} }
static struct d40_desc *d40_first_done(struct d40_chan *d40c)
{
if (list_empty(&d40c->done))
return NULL;
return list_first_entry(&d40c->done, struct d40_desc, node);
}
static int d40_psize_2_burst_size(bool is_log, int psize) static int d40_psize_2_burst_size(bool is_log, int psize)
{ {
if (is_log) { if (is_log) {
...@@ -874,10 +1042,10 @@ static void d40_save_restore_registers(struct d40_base *base, bool save) ...@@ -874,10 +1042,10 @@ static void d40_save_restore_registers(struct d40_base *base, bool save)
save); save);
/* Save/Restore registers only existing on dma40 v3 and later */ /* Save/Restore registers only existing on dma40 v3 and later */
if (base->rev >= 3) if (base->gen_dmac.backup)
dma40_backup(base->virtbase, base->reg_val_backup_v3, dma40_backup(base->virtbase, base->reg_val_backup_v4,
d40_backup_regs_v3, base->gen_dmac.backup,
ARRAY_SIZE(d40_backup_regs_v3), base->gen_dmac.backup_size,
save); save);
} }
#else #else
...@@ -961,6 +1129,12 @@ static void d40_term_all(struct d40_chan *d40c) ...@@ -961,6 +1129,12 @@ static void d40_term_all(struct d40_chan *d40c)
struct d40_desc *d40d; struct d40_desc *d40d;
struct d40_desc *_d; struct d40_desc *_d;
/* Release completed descriptors */
while ((d40d = d40_first_done(d40c))) {
d40_desc_remove(d40d);
d40_desc_free(d40c, d40d);
}
/* Release active descriptors */ /* Release active descriptors */
while ((d40d = d40_first_active_get(d40c))) { while ((d40d = d40_first_active_get(d40c))) {
d40_desc_remove(d40d); d40_desc_remove(d40d);
...@@ -1398,6 +1572,9 @@ static void dma_tc_handle(struct d40_chan *d40c) ...@@ -1398,6 +1572,9 @@ static void dma_tc_handle(struct d40_chan *d40c)
pm_runtime_put_autosuspend(d40c->base->dev); pm_runtime_put_autosuspend(d40c->base->dev);
} }
d40_desc_remove(d40d);
d40_desc_done(d40c, d40d);
d40c->pending_tx++; d40c->pending_tx++;
tasklet_schedule(&d40c->tasklet); tasklet_schedule(&d40c->tasklet);
...@@ -1413,10 +1590,14 @@ static void dma_tasklet(unsigned long data) ...@@ -1413,10 +1590,14 @@ static void dma_tasklet(unsigned long data)
spin_lock_irqsave(&d40c->lock, flags); spin_lock_irqsave(&d40c->lock, flags);
/* Get first active entry from list */ /* Get first entry from the done list */
d40d = d40_first_done(d40c);
if (d40d == NULL) {
/* Check if we have reached here for cyclic job */
d40d = d40_first_active_get(d40c); d40d = d40_first_active_get(d40c);
if (d40d == NULL) if (d40d == NULL || !d40d->cyclic)
goto err; goto err;
}
if (!d40d->cyclic) if (!d40d->cyclic)
dma_cookie_complete(&d40d->txd); dma_cookie_complete(&d40d->txd);
...@@ -1438,15 +1619,13 @@ static void dma_tasklet(unsigned long data) ...@@ -1438,15 +1619,13 @@ static void dma_tasklet(unsigned long data)
if (async_tx_test_ack(&d40d->txd)) { if (async_tx_test_ack(&d40d->txd)) {
d40_desc_remove(d40d); d40_desc_remove(d40d);
d40_desc_free(d40c, d40d); d40_desc_free(d40c, d40d);
} else { } else if (!d40d->is_in_client_list) {
if (!d40d->is_in_client_list) {
d40_desc_remove(d40d); d40_desc_remove(d40d);
d40_lcla_free_all(d40c, d40d); d40_lcla_free_all(d40c, d40d);
list_add_tail(&d40d->node, &d40c->client); list_add_tail(&d40d->node, &d40c->client);
d40d->is_in_client_list = true; d40d->is_in_client_list = true;
} }
} }
}
d40c->pending_tx--; d40c->pending_tx--;
...@@ -1469,53 +1648,51 @@ static void dma_tasklet(unsigned long data) ...@@ -1469,53 +1648,51 @@ static void dma_tasklet(unsigned long data)
static irqreturn_t d40_handle_interrupt(int irq, void *data) static irqreturn_t d40_handle_interrupt(int irq, void *data)
{ {
static const struct d40_interrupt_lookup il[] = {
{D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
{D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
{D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
{D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
{D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
{D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
{D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
{D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
{D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
{D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
};
int i; int i;
u32 regs[ARRAY_SIZE(il)];
u32 idx; u32 idx;
u32 row; u32 row;
long chan = -1; long chan = -1;
struct d40_chan *d40c; struct d40_chan *d40c;
unsigned long flags; unsigned long flags;
struct d40_base *base = data; struct d40_base *base = data;
u32 regs[base->gen_dmac.il_size];
struct d40_interrupt_lookup *il = base->gen_dmac.il;
u32 il_size = base->gen_dmac.il_size;
spin_lock_irqsave(&base->interrupt_lock, flags); spin_lock_irqsave(&base->interrupt_lock, flags);
/* Read interrupt status of both logical and physical channels */ /* Read interrupt status of both logical and physical channels */
for (i = 0; i < ARRAY_SIZE(il); i++) for (i = 0; i < il_size; i++)
regs[i] = readl(base->virtbase + il[i].src); regs[i] = readl(base->virtbase + il[i].src);
for (;;) { for (;;) {
chan = find_next_bit((unsigned long *)regs, chan = find_next_bit((unsigned long *)regs,
BITS_PER_LONG * ARRAY_SIZE(il), chan + 1); BITS_PER_LONG * il_size, chan + 1);
/* No more set bits found? */ /* No more set bits found? */
if (chan == BITS_PER_LONG * ARRAY_SIZE(il)) if (chan == BITS_PER_LONG * il_size)
break; break;
row = chan / BITS_PER_LONG; row = chan / BITS_PER_LONG;
idx = chan & (BITS_PER_LONG - 1); idx = chan & (BITS_PER_LONG - 1);
/* ACK interrupt */
writel(1 << idx, base->virtbase + il[row].clr);
if (il[row].offset == D40_PHY_CHAN) if (il[row].offset == D40_PHY_CHAN)
d40c = base->lookup_phy_chans[idx]; d40c = base->lookup_phy_chans[idx];
else else
d40c = base->lookup_log_chans[il[row].offset + idx]; d40c = base->lookup_log_chans[il[row].offset + idx];
if (!d40c) {
/*
* No error because this can happen if something else
* in the system is using the channel.
*/
continue;
}
/* ACK interrupt */
writel(1 << idx, base->virtbase + il[row].clr);
spin_lock(&d40c->lock); spin_lock(&d40c->lock);
if (!il[row].is_error) if (!il[row].is_error)
...@@ -1710,10 +1887,12 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) ...@@ -1710,10 +1887,12 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
int i; int i;
int j; int j;
int log_num; int log_num;
int num_phy_chans;
bool is_src; bool is_src;
bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL; bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
phys = d40c->base->phy_res; phys = d40c->base->phy_res;
num_phy_chans = d40c->base->num_phy_chans;
if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
dev_type = d40c->dma_cfg.src_dev_type; dev_type = d40c->dma_cfg.src_dev_type;
...@@ -1734,12 +1913,19 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) ...@@ -1734,12 +1913,19 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
if (!is_log) { if (!is_log) {
if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
/* Find physical half channel */ /* Find physical half channel */
for (i = 0; i < d40c->base->num_phy_chans; i++) { if (d40c->dma_cfg.use_fixed_channel) {
i = d40c->dma_cfg.phy_channel;
if (d40_alloc_mask_set(&phys[i], is_src, if (d40_alloc_mask_set(&phys[i], is_src,
0, is_log, 0, is_log,
first_phy_user)) first_phy_user))
goto found_phy; goto found_phy;
} else {
for (i = 0; i < num_phy_chans; i++) {
if (d40_alloc_mask_set(&phys[i], is_src,
0, is_log,
first_phy_user))
goto found_phy;
}
} }
} else } else
for (j = 0; j < d40c->base->num_phy_chans; j += 8) { for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
...@@ -1954,7 +2140,6 @@ static bool d40_is_paused(struct d40_chan *d40c) ...@@ -1954,7 +2140,6 @@ static bool d40_is_paused(struct d40_chan *d40c)
} }
static u32 stedma40_residue(struct dma_chan *chan) static u32 stedma40_residue(struct dma_chan *chan)
{ {
struct d40_chan *d40c = struct d40_chan *d40c =
...@@ -2030,7 +2215,6 @@ d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc, ...@@ -2030,7 +2215,6 @@ d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
return ret < 0 ? ret : 0; return ret < 0 ? ret : 0;
} }
static struct d40_desc * static struct d40_desc *
d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
unsigned int sg_len, unsigned long dma_flags) unsigned int sg_len, unsigned long dma_flags)
...@@ -2056,7 +2240,6 @@ d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, ...@@ -2056,7 +2240,6 @@ d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
goto err; goto err;
} }
desc->lli_current = 0; desc->lli_current = 0;
desc->txd.flags = dma_flags; desc->txd.flags = dma_flags;
desc->txd.tx_submit = d40_tx_submit; desc->txd.tx_submit = d40_tx_submit;
...@@ -2105,7 +2288,6 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, ...@@ -2105,7 +2288,6 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
return NULL; return NULL;
} }
spin_lock_irqsave(&chan->lock, flags); spin_lock_irqsave(&chan->lock, flags);
desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags); desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
...@@ -2179,11 +2361,26 @@ static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src) ...@@ -2179,11 +2361,26 @@ static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
{ {
bool realtime = d40c->dma_cfg.realtime; bool realtime = d40c->dma_cfg.realtime;
bool highprio = d40c->dma_cfg.high_priority; bool highprio = d40c->dma_cfg.high_priority;
u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1; u32 rtreg;
u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1;
u32 event = D40_TYPE_TO_EVENT(dev_type); u32 event = D40_TYPE_TO_EVENT(dev_type);
u32 group = D40_TYPE_TO_GROUP(dev_type); u32 group = D40_TYPE_TO_GROUP(dev_type);
u32 bit = 1 << event; u32 bit = 1 << event;
u32 prioreg;
struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear;
/*
* Due to a hardware bug, in some cases a logical channel triggered by
* a high priority destination event line can generate extra packet
* transactions.
*
* The workaround is to not set the high priority level for the
* destination event lines that trigger logical channels.
*/
if (!src && chan_is_logical(d40c))
highprio = false;
prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear;
/* Destination event lines are stored in the upper halfword */ /* Destination event lines are stored in the upper halfword */
if (!src) if (!src)
...@@ -2287,7 +2484,6 @@ static void d40_free_chan_resources(struct dma_chan *chan) ...@@ -2287,7 +2484,6 @@ static void d40_free_chan_resources(struct dma_chan *chan)
return; return;
} }
spin_lock_irqsave(&d40c->lock, flags); spin_lock_irqsave(&d40c->lock, flags);
err = d40_free_dma(d40c); err = d40_free_dma(d40c);
...@@ -2330,12 +2526,10 @@ d40_prep_memcpy_sg(struct dma_chan *chan, ...@@ -2330,12 +2526,10 @@ d40_prep_memcpy_sg(struct dma_chan *chan,
return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags); return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags);
} }
static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, static struct dma_async_tx_descriptor *
struct scatterlist *sgl, d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, unsigned int sg_len, enum dma_transfer_direction direction,
enum dma_transfer_direction direction, unsigned long dma_flags, void *context)
unsigned long dma_flags,
void *context)
{ {
if (!is_slave_direction(direction)) if (!is_slave_direction(direction))
return NULL; return NULL;
...@@ -2577,6 +2771,14 @@ static int d40_set_runtime_config(struct dma_chan *chan, ...@@ -2577,6 +2771,14 @@ static int d40_set_runtime_config(struct dma_chan *chan,
return -EINVAL; return -EINVAL;
} }
if (src_maxburst > 16) {
src_maxburst = 16;
dst_maxburst = src_maxburst * src_addr_width / dst_addr_width;
} else if (dst_maxburst > 16) {
dst_maxburst = 16;
src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
}
ret = dma40_config_to_halfchannel(d40c, &cfg->src_info, ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
src_addr_width, src_addr_width,
src_maxburst); src_maxburst);
...@@ -2659,6 +2861,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, ...@@ -2659,6 +2861,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
d40c->log_num = D40_PHY_CHAN; d40c->log_num = D40_PHY_CHAN;
INIT_LIST_HEAD(&d40c->done);
INIT_LIST_HEAD(&d40c->active); INIT_LIST_HEAD(&d40c->active);
INIT_LIST_HEAD(&d40c->queue); INIT_LIST_HEAD(&d40c->queue);
INIT_LIST_HEAD(&d40c->pending_queue); INIT_LIST_HEAD(&d40c->pending_queue);
...@@ -2773,8 +2976,6 @@ static int dma40_pm_suspend(struct device *dev) ...@@ -2773,8 +2976,6 @@ static int dma40_pm_suspend(struct device *dev)
struct platform_device *pdev = to_platform_device(dev); struct platform_device *pdev = to_platform_device(dev);
struct d40_base *base = platform_get_drvdata(pdev); struct d40_base *base = platform_get_drvdata(pdev);
int ret = 0; int ret = 0;
if (!pm_runtime_suspended(dev))
return -EBUSY;
if (base->lcpa_regulator) if (base->lcpa_regulator)
ret = regulator_disable(base->lcpa_regulator); ret = regulator_disable(base->lcpa_regulator);
...@@ -2882,6 +3083,13 @@ static int __init d40_phy_res_init(struct d40_base *base) ...@@ -2882,6 +3083,13 @@ static int __init d40_phy_res_init(struct d40_base *base)
num_phy_chans_avail--; num_phy_chans_avail--;
} }
/* Mark soft_lli channels */
for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) {
int chan = base->plat_data->soft_lli_chans[i];
base->phy_res[chan].use_soft_lli = true;
}
dev_info(base->dev, "%d of %d physical DMA channels available\n", dev_info(base->dev, "%d of %d physical DMA channels available\n",
num_phy_chans_avail, base->num_phy_chans); num_phy_chans_avail, base->num_phy_chans);
...@@ -2975,14 +3183,21 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) ...@@ -2975,14 +3183,21 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
* ? has revision 1 * ? has revision 1
* DB8500v1 has revision 2 * DB8500v1 has revision 2
* DB8500v2 has revision 3 * DB8500v2 has revision 3
* AP9540v1 has revision 4
* DB8540v1 has revision 4
*/ */
rev = AMBA_REV_BITS(pid); rev = AMBA_REV_BITS(pid);
plat_data = pdev->dev.platform_data;
/* The number of physical channels on this HW */ /* The number of physical channels on this HW */
if (plat_data->num_of_phy_chans)
num_phy_chans = plat_data->num_of_phy_chans;
else
num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", dev_info(&pdev->dev, "hardware revision: %d @ 0x%x with %d physical channels\n",
rev, res->start); rev, res->start, num_phy_chans);
if (rev < 2) { if (rev < 2) {
d40_err(&pdev->dev, "hardware revision: %d is not supported", d40_err(&pdev->dev, "hardware revision: %d is not supported",
...@@ -2990,8 +3205,6 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) ...@@ -2990,8 +3205,6 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
goto failure; goto failure;
} }
plat_data = pdev->dev.platform_data;
/* Count the number of logical channels in use */ /* Count the number of logical channels in use */
for (i = 0; i < plat_data->dev_len; i++) for (i = 0; i < plat_data->dev_len; i++)
if (plat_data->dev_rx[i] != 0) if (plat_data->dev_rx[i] != 0)
...@@ -3022,6 +3235,36 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) ...@@ -3022,6 +3235,36 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4); base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
base->log_chans = &base->phy_chans[num_phy_chans]; base->log_chans = &base->phy_chans[num_phy_chans];
if (base->plat_data->num_of_phy_chans == 14) {
base->gen_dmac.backup = d40_backup_regs_v4b;
base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B;
base->gen_dmac.interrupt_en = D40_DREG_CPCMIS;
base->gen_dmac.interrupt_clear = D40_DREG_CPCICR;
base->gen_dmac.realtime_en = D40_DREG_CRSEG1;
base->gen_dmac.realtime_clear = D40_DREG_CRCEG1;
base->gen_dmac.high_prio_en = D40_DREG_CPSEG1;
base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1;
base->gen_dmac.il = il_v4b;
base->gen_dmac.il_size = ARRAY_SIZE(il_v4b);
base->gen_dmac.init_reg = dma_init_reg_v4b;
base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b);
} else {
if (base->rev >= 3) {
base->gen_dmac.backup = d40_backup_regs_v4a;
base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A;
}
base->gen_dmac.interrupt_en = D40_DREG_PCMIS;
base->gen_dmac.interrupt_clear = D40_DREG_PCICR;
base->gen_dmac.realtime_en = D40_DREG_RSEG1;
base->gen_dmac.realtime_clear = D40_DREG_RCEG1;
base->gen_dmac.high_prio_en = D40_DREG_PSEG1;
base->gen_dmac.high_prio_clear = D40_DREG_PCEG1;
base->gen_dmac.il = il_v4a;
base->gen_dmac.il_size = ARRAY_SIZE(il_v4a);
base->gen_dmac.init_reg = dma_init_reg_v4a;
base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a);
}
base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res), base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
GFP_KERNEL); GFP_KERNEL);
if (!base->phy_res) if (!base->phy_res)
...@@ -3093,31 +3336,15 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) ...@@ -3093,31 +3336,15 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
static void __init d40_hw_init(struct d40_base *base) static void __init d40_hw_init(struct d40_base *base)
{ {
static struct d40_reg_val dma_init_reg[] = {
/* Clock every part of the DMA block from start */
{ .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
/* Interrupts on all logical channels */
{ .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
{ .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
};
int i; int i;
u32 prmseo[2] = {0, 0}; u32 prmseo[2] = {0, 0};
u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF}; u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
u32 pcmis = 0; u32 pcmis = 0;
u32 pcicr = 0; u32 pcicr = 0;
struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg;
u32 reg_size = base->gen_dmac.init_reg_size;
for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++) for (i = 0; i < reg_size; i++)
writel(dma_init_reg[i].val, writel(dma_init_reg[i].val,
base->virtbase + dma_init_reg[i].reg); base->virtbase + dma_init_reg[i].reg);
...@@ -3150,11 +3377,14 @@ static void __init d40_hw_init(struct d40_base *base) ...@@ -3150,11 +3377,14 @@ static void __init d40_hw_init(struct d40_base *base)
writel(activeo[0], base->virtbase + D40_DREG_ACTIVO); writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
/* Write which interrupt to enable */ /* Write which interrupt to enable */
writel(pcmis, base->virtbase + D40_DREG_PCMIS); writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en);
/* Write which interrupt to clear */ /* Write which interrupt to clear */
writel(pcicr, base->virtbase + D40_DREG_PCICR); writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear);
/* These are __initdata and cannot be accessed after init */
base->gen_dmac.init_reg = NULL;
base->gen_dmac.init_reg_size = 0;
} }
static int __init d40_lcla_allocate(struct d40_base *base) static int __init d40_lcla_allocate(struct d40_base *base)
...@@ -3362,6 +3592,13 @@ static int __init d40_probe(struct platform_device *pdev) ...@@ -3362,6 +3592,13 @@ static int __init d40_probe(struct platform_device *pdev)
if (err) if (err)
goto failure; goto failure;
base->dev->dma_parms = &base->dma_parms;
err = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
if (err) {
d40_err(&pdev->dev, "Failed to set dma max seg size\n");
goto failure;
}
d40_hw_init(base); d40_hw_init(base);
dev_info(base->dev, "initialized\n"); dev_info(base->dev, "initialized\n");
...@@ -3397,7 +3634,7 @@ static int __init d40_probe(struct platform_device *pdev) ...@@ -3397,7 +3634,7 @@ static int __init d40_probe(struct platform_device *pdev)
release_mem_region(base->phy_start, release_mem_region(base->phy_start,
base->phy_size); base->phy_size);
if (base->clk) { if (base->clk) {
clk_disable(base->clk); clk_disable_unprepare(base->clk);
clk_put(base->clk); clk_put(base->clk);
} }
......
...@@ -102,17 +102,18 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg, ...@@ -102,17 +102,18 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS; src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS;
dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS; dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS;
/* Set the priority bit to high for the physical channel */
if (cfg->high_priority) {
src |= 1 << D40_SREG_CFG_PRI_POS;
dst |= 1 << D40_SREG_CFG_PRI_POS;
}
} else { } else {
/* Logical channel */ /* Logical channel */
dst |= 1 << D40_SREG_CFG_LOG_GIM_POS; dst |= 1 << D40_SREG_CFG_LOG_GIM_POS;
src |= 1 << D40_SREG_CFG_LOG_GIM_POS; src |= 1 << D40_SREG_CFG_LOG_GIM_POS;
} }
if (cfg->high_priority) {
src |= 1 << D40_SREG_CFG_PRI_POS;
dst |= 1 << D40_SREG_CFG_PRI_POS;
}
if (cfg->src_info.big_endian) if (cfg->src_info.big_endian)
src |= 1 << D40_SREG_CFG_LBE_POS; src |= 1 << D40_SREG_CFG_LBE_POS;
if (cfg->dst_info.big_endian) if (cfg->dst_info.big_endian)
...@@ -250,7 +251,7 @@ d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size, ...@@ -250,7 +251,7 @@ d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size,
return lli; return lli;
err: err:
return NULL; return NULL;
} }
...@@ -331,10 +332,10 @@ void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, ...@@ -331,10 +332,10 @@ void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
{ {
d40_log_lli_link(lli_dst, lli_src, next, flags); d40_log_lli_link(lli_dst, lli_src, next, flags);
writel(lli_src->lcsp02, &lcpa[0].lcsp0); writel_relaxed(lli_src->lcsp02, &lcpa[0].lcsp0);
writel(lli_src->lcsp13, &lcpa[0].lcsp1); writel_relaxed(lli_src->lcsp13, &lcpa[0].lcsp1);
writel(lli_dst->lcsp02, &lcpa[0].lcsp2); writel_relaxed(lli_dst->lcsp02, &lcpa[0].lcsp2);
writel(lli_dst->lcsp13, &lcpa[0].lcsp3); writel_relaxed(lli_dst->lcsp13, &lcpa[0].lcsp3);
} }
void d40_log_lli_lcla_write(struct d40_log_lli *lcla, void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
...@@ -344,10 +345,10 @@ void d40_log_lli_lcla_write(struct d40_log_lli *lcla, ...@@ -344,10 +345,10 @@ void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
{ {
d40_log_lli_link(lli_dst, lli_src, next, flags); d40_log_lli_link(lli_dst, lli_src, next, flags);
writel(lli_src->lcsp02, &lcla[0].lcsp02); writel_relaxed(lli_src->lcsp02, &lcla[0].lcsp02);
writel(lli_src->lcsp13, &lcla[0].lcsp13); writel_relaxed(lli_src->lcsp13, &lcla[0].lcsp13);
writel(lli_dst->lcsp02, &lcla[1].lcsp02); writel_relaxed(lli_dst->lcsp02, &lcla[1].lcsp02);
writel(lli_dst->lcsp13, &lcla[1].lcsp13); writel_relaxed(lli_dst->lcsp13, &lcla[1].lcsp13);
} }
static void d40_log_fill_lli(struct d40_log_lli *lli, static void d40_log_fill_lli(struct d40_log_lli *lli,
......
...@@ -125,7 +125,7 @@ ...@@ -125,7 +125,7 @@
#define D40_DREG_GCC 0x000 #define D40_DREG_GCC 0x000
#define D40_DREG_GCC_ENA 0x1 #define D40_DREG_GCC_ENA 0x1
/* This assumes that there are only 4 event groups */ /* This assumes that there are only 4 event groups */
#define D40_DREG_GCC_ENABLE_ALL 0xff01 #define D40_DREG_GCC_ENABLE_ALL 0x3ff01
#define D40_DREG_GCC_EVTGRP_POS 8 #define D40_DREG_GCC_EVTGRP_POS 8
#define D40_DREG_GCC_SRC 0 #define D40_DREG_GCC_SRC 0
#define D40_DREG_GCC_DST 1 #define D40_DREG_GCC_DST 1
...@@ -148,14 +148,31 @@ ...@@ -148,14 +148,31 @@
#define D40_DREG_LCPA 0x020 #define D40_DREG_LCPA 0x020
#define D40_DREG_LCLA 0x024 #define D40_DREG_LCLA 0x024
#define D40_DREG_SSEG1 0x030
#define D40_DREG_SSEG2 0x034
#define D40_DREG_SSEG3 0x038
#define D40_DREG_SSEG4 0x03C
#define D40_DREG_SCEG1 0x040
#define D40_DREG_SCEG2 0x044
#define D40_DREG_SCEG3 0x048
#define D40_DREG_SCEG4 0x04C
#define D40_DREG_ACTIVE 0x050 #define D40_DREG_ACTIVE 0x050
#define D40_DREG_ACTIVO 0x054 #define D40_DREG_ACTIVO 0x054
#define D40_DREG_FSEB1 0x058 #define D40_DREG_CIDMOD 0x058
#define D40_DREG_FSEB2 0x05C #define D40_DREG_TCIDV 0x05C
#define D40_DREG_PCMIS 0x060 #define D40_DREG_PCMIS 0x060
#define D40_DREG_PCICR 0x064 #define D40_DREG_PCICR 0x064
#define D40_DREG_PCTIS 0x068 #define D40_DREG_PCTIS 0x068
#define D40_DREG_PCEIS 0x06C #define D40_DREG_PCEIS 0x06C
#define D40_DREG_SPCMIS 0x070
#define D40_DREG_SPCICR 0x074
#define D40_DREG_SPCTIS 0x078
#define D40_DREG_SPCEIS 0x07C
#define D40_DREG_LCMIS0 0x080 #define D40_DREG_LCMIS0 0x080
#define D40_DREG_LCMIS1 0x084 #define D40_DREG_LCMIS1 0x084
#define D40_DREG_LCMIS2 0x088 #define D40_DREG_LCMIS2 0x088
...@@ -172,6 +189,33 @@ ...@@ -172,6 +189,33 @@
#define D40_DREG_LCEIS1 0x0B4 #define D40_DREG_LCEIS1 0x0B4
#define D40_DREG_LCEIS2 0x0B8 #define D40_DREG_LCEIS2 0x0B8
#define D40_DREG_LCEIS3 0x0BC #define D40_DREG_LCEIS3 0x0BC
#define D40_DREG_SLCMIS1 0x0C0
#define D40_DREG_SLCMIS2 0x0C4
#define D40_DREG_SLCMIS3 0x0C8
#define D40_DREG_SLCMIS4 0x0CC
#define D40_DREG_SLCICR1 0x0D0
#define D40_DREG_SLCICR2 0x0D4
#define D40_DREG_SLCICR3 0x0D8
#define D40_DREG_SLCICR4 0x0DC
#define D40_DREG_SLCTIS1 0x0E0
#define D40_DREG_SLCTIS2 0x0E4
#define D40_DREG_SLCTIS3 0x0E8
#define D40_DREG_SLCTIS4 0x0EC
#define D40_DREG_SLCEIS1 0x0F0
#define D40_DREG_SLCEIS2 0x0F4
#define D40_DREG_SLCEIS3 0x0F8
#define D40_DREG_SLCEIS4 0x0FC
#define D40_DREG_FSESS1 0x100
#define D40_DREG_FSESS2 0x104
#define D40_DREG_FSEBS1 0x108
#define D40_DREG_FSEBS2 0x10C
#define D40_DREG_PSEG1 0x110 #define D40_DREG_PSEG1 0x110
#define D40_DREG_PSEG2 0x114 #define D40_DREG_PSEG2 0x114
#define D40_DREG_PSEG3 0x118 #define D40_DREG_PSEG3 0x118
...@@ -188,6 +232,86 @@ ...@@ -188,6 +232,86 @@
#define D40_DREG_RCEG2 0x144 #define D40_DREG_RCEG2 0x144
#define D40_DREG_RCEG3 0x148 #define D40_DREG_RCEG3 0x148
#define D40_DREG_RCEG4 0x14C #define D40_DREG_RCEG4 0x14C
#define D40_DREG_PREFOT 0x15C
#define D40_DREG_EXTCFG 0x160
#define D40_DREG_CPSEG1 0x200
#define D40_DREG_CPSEG2 0x204
#define D40_DREG_CPSEG3 0x208
#define D40_DREG_CPSEG4 0x20C
#define D40_DREG_CPSEG5 0x210
#define D40_DREG_CPCEG1 0x220
#define D40_DREG_CPCEG2 0x224
#define D40_DREG_CPCEG3 0x228
#define D40_DREG_CPCEG4 0x22C
#define D40_DREG_CPCEG5 0x230
#define D40_DREG_CRSEG1 0x240
#define D40_DREG_CRSEG2 0x244
#define D40_DREG_CRSEG3 0x248
#define D40_DREG_CRSEG4 0x24C
#define D40_DREG_CRSEG5 0x250
#define D40_DREG_CRCEG1 0x260
#define D40_DREG_CRCEG2 0x264
#define D40_DREG_CRCEG3 0x268
#define D40_DREG_CRCEG4 0x26C
#define D40_DREG_CRCEG5 0x270
#define D40_DREG_CFSESS1 0x280
#define D40_DREG_CFSESS2 0x284
#define D40_DREG_CFSESS3 0x288
#define D40_DREG_CFSEBS1 0x290
#define D40_DREG_CFSEBS2 0x294
#define D40_DREG_CFSEBS3 0x298
#define D40_DREG_CLCMIS1 0x300
#define D40_DREG_CLCMIS2 0x304
#define D40_DREG_CLCMIS3 0x308
#define D40_DREG_CLCMIS4 0x30C
#define D40_DREG_CLCMIS5 0x310
#define D40_DREG_CLCICR1 0x320
#define D40_DREG_CLCICR2 0x324
#define D40_DREG_CLCICR3 0x328
#define D40_DREG_CLCICR4 0x32C
#define D40_DREG_CLCICR5 0x330
#define D40_DREG_CLCTIS1 0x340
#define D40_DREG_CLCTIS2 0x344
#define D40_DREG_CLCTIS3 0x348
#define D40_DREG_CLCTIS4 0x34C
#define D40_DREG_CLCTIS5 0x350
#define D40_DREG_CLCEIS1 0x360
#define D40_DREG_CLCEIS2 0x364
#define D40_DREG_CLCEIS3 0x368
#define D40_DREG_CLCEIS4 0x36C
#define D40_DREG_CLCEIS5 0x370
#define D40_DREG_CPCMIS 0x380
#define D40_DREG_CPCICR 0x384
#define D40_DREG_CPCTIS 0x388
#define D40_DREG_CPCEIS 0x38C
#define D40_DREG_SCCIDA1 0xE80
#define D40_DREG_SCCIDA2 0xE90
#define D40_DREG_SCCIDA3 0xEA0
#define D40_DREG_SCCIDA4 0xEB0
#define D40_DREG_SCCIDA5 0xEC0
#define D40_DREG_SCCIDB1 0xE84
#define D40_DREG_SCCIDB2 0xE94
#define D40_DREG_SCCIDB3 0xEA4
#define D40_DREG_SCCIDB4 0xEB4
#define D40_DREG_SCCIDB5 0xEC4
#define D40_DREG_PRSCCIDA 0xF80
#define D40_DREG_PRSCCIDB 0xF84
#define D40_DREG_STFU 0xFC8 #define D40_DREG_STFU 0xFC8
#define D40_DREG_ICFG 0xFCC #define D40_DREG_ICFG 0xFCC
#define D40_DREG_PERIPHID0 0xFE0 #define D40_DREG_PERIPHID0 0xFE0
......
...@@ -147,6 +147,16 @@ struct stedma40_chan_cfg { ...@@ -147,6 +147,16 @@ struct stedma40_chan_cfg {
* @memcpy_conf_log: default configuration of logical channel memcpy * @memcpy_conf_log: default configuration of logical channel memcpy
* @disabled_channels: A vector, ending with -1, that marks physical channels * @disabled_channels: A vector, ending with -1, that marks physical channels
* that are for different reasons not available for the driver. * that are for different reasons not available for the driver.
* @soft_lli_chans: A vector, that marks physical channels will use LLI by SW
* which avoids HW bug that exists in some versions of the controller.
* SoftLLI introduces relink overhead that could impact performace for
* certain use cases.
* @num_of_soft_lli_chans: The number of channels that needs to be configured
* to use SoftLLI.
* @use_esram_lcla: flag for mapping the lcla into esram region
* @num_of_phy_chans: The number of physical channels implemented in HW.
* 0 means reading the number of channels from DMA HW but this is only valid
* for 'multiple of 4' channels, like 8.
*/ */
struct stedma40_platform_data { struct stedma40_platform_data {
u32 dev_len; u32 dev_len;
...@@ -157,7 +167,10 @@ struct stedma40_platform_data { ...@@ -157,7 +167,10 @@ struct stedma40_platform_data {
struct stedma40_chan_cfg *memcpy_conf_phy; struct stedma40_chan_cfg *memcpy_conf_phy;
struct stedma40_chan_cfg *memcpy_conf_log; struct stedma40_chan_cfg *memcpy_conf_log;
int disabled_channels[STEDMA40_MAX_PHYS]; int disabled_channels[STEDMA40_MAX_PHYS];
int *soft_lli_chans;
int num_of_soft_lli_chans;
bool use_esram_lcla; bool use_esram_lcla;
int num_of_phy_chans;
}; };
#ifdef CONFIG_STE_DMA40 #ifdef CONFIG_STE_DMA40
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment