Commit bb5a471d authored by Vinod Koul's avatar Vinod Koul

Merge branch 'fixes' into next

parents b3794956 bacdcb66
...@@ -1707,6 +1707,14 @@ static void sdma_add_scripts(struct sdma_engine *sdma, ...@@ -1707,6 +1707,14 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
if (!sdma->script_number) if (!sdma->script_number)
sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
if (sdma->script_number > sizeof(struct sdma_script_start_addrs)
/ sizeof(s32)) {
dev_err(sdma->dev,
"SDMA script number %d not match with firmware.\n",
sdma->script_number);
return;
}
for (i = 0; i < sdma->script_number; i++) for (i = 0; i < sdma->script_number; i++)
if (addr_arr[i] > 0) if (addr_arr[i] > 0)
saddr_arr[i] = addr_arr[i]; saddr_arr[i] = addr_arr[i];
......
...@@ -694,6 +694,25 @@ static int bam_dma_terminate_all(struct dma_chan *chan) ...@@ -694,6 +694,25 @@ static int bam_dma_terminate_all(struct dma_chan *chan)
/* remove all transactions, including active transaction */ /* remove all transactions, including active transaction */
spin_lock_irqsave(&bchan->vc.lock, flag); spin_lock_irqsave(&bchan->vc.lock, flag);
/*
* If we have transactions queued, then some might be committed to the
* hardware in the desc fifo. The only way to reset the desc fifo is
* to do a hardware reset (either by pipe or the entire block).
* bam_chan_init_hw() will trigger a pipe reset, and also reinit the
* pipe. If the pipe is left disabled (default state after pipe reset)
* and is accessed by a connected hardware engine, a fatal error in
* the BAM will occur. There is a small window where this could happen
* with bam_chan_init_hw(), but it is assumed that the caller has
* stopped activity on any attached hardware engine. Make sure to do
* this first so that the BAM hardware doesn't cause memory corruption
* by accessing freed resources.
*/
if (!list_empty(&bchan->desc_list)) {
async_desc = list_first_entry(&bchan->desc_list,
struct bam_async_desc, desc_node);
bam_chan_init_hw(bchan, async_desc->dir);
}
list_for_each_entry_safe(async_desc, tmp, list_for_each_entry_safe(async_desc, tmp,
&bchan->desc_list, desc_node) { &bchan->desc_list, desc_node) {
list_add(&async_desc->vd.node, &bchan->vc.desc_issued); list_add(&async_desc->vd.node, &bchan->vc.desc_issued);
......
...@@ -134,6 +134,10 @@ ...@@ -134,6 +134,10 @@
#define SPRD_DMA_SRC_TRSF_STEP_OFFSET 0 #define SPRD_DMA_SRC_TRSF_STEP_OFFSET 0
#define SPRD_DMA_TRSF_STEP_MASK GENMASK(15, 0) #define SPRD_DMA_TRSF_STEP_MASK GENMASK(15, 0)
/* SPRD DMA_SRC_BLK_STEP register definition */
#define SPRD_DMA_LLIST_HIGH_MASK GENMASK(31, 28)
#define SPRD_DMA_LLIST_HIGH_SHIFT 28
/* define DMA channel mode & trigger mode mask */ /* define DMA channel mode & trigger mode mask */
#define SPRD_DMA_CHN_MODE_MASK GENMASK(7, 0) #define SPRD_DMA_CHN_MODE_MASK GENMASK(7, 0)
#define SPRD_DMA_TRG_MODE_MASK GENMASK(7, 0) #define SPRD_DMA_TRG_MODE_MASK GENMASK(7, 0)
...@@ -208,6 +212,7 @@ struct sprd_dma_dev { ...@@ -208,6 +212,7 @@ struct sprd_dma_dev {
struct sprd_dma_chn channels[0]; struct sprd_dma_chn channels[0];
}; };
static void sprd_dma_free_desc(struct virt_dma_desc *vd);
static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param); static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param);
static struct of_dma_filter_info sprd_dma_info = { static struct of_dma_filter_info sprd_dma_info = {
.filter_fn = sprd_dma_filter_fn, .filter_fn = sprd_dma_filter_fn,
...@@ -609,12 +614,19 @@ static int sprd_dma_alloc_chan_resources(struct dma_chan *chan) ...@@ -609,12 +614,19 @@ static int sprd_dma_alloc_chan_resources(struct dma_chan *chan)
static void sprd_dma_free_chan_resources(struct dma_chan *chan) static void sprd_dma_free_chan_resources(struct dma_chan *chan)
{ {
struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
struct virt_dma_desc *cur_vd = NULL;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&schan->vc.lock, flags); spin_lock_irqsave(&schan->vc.lock, flags);
if (schan->cur_desc)
cur_vd = &schan->cur_desc->vd;
sprd_dma_stop(schan); sprd_dma_stop(schan);
spin_unlock_irqrestore(&schan->vc.lock, flags); spin_unlock_irqrestore(&schan->vc.lock, flags);
if (cur_vd)
sprd_dma_free_desc(cur_vd);
vchan_free_chan_resources(&schan->vc); vchan_free_chan_resources(&schan->vc);
pm_runtime_put(chan->device->dev); pm_runtime_put(chan->device->dev);
} }
...@@ -717,6 +729,7 @@ static int sprd_dma_fill_desc(struct dma_chan *chan, ...@@ -717,6 +729,7 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
u32 int_mode = flags & SPRD_DMA_INT_MASK; u32 int_mode = flags & SPRD_DMA_INT_MASK;
int src_datawidth, dst_datawidth, src_step, dst_step; int src_datawidth, dst_datawidth, src_step, dst_step;
u32 temp, fix_mode = 0, fix_en = 0; u32 temp, fix_mode = 0, fix_en = 0;
phys_addr_t llist_ptr;
if (dir == DMA_MEM_TO_DEV) { if (dir == DMA_MEM_TO_DEV) {
src_step = sprd_dma_get_step(slave_cfg->src_addr_width); src_step = sprd_dma_get_step(slave_cfg->src_addr_width);
...@@ -814,13 +827,16 @@ static int sprd_dma_fill_desc(struct dma_chan *chan, ...@@ -814,13 +827,16 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
* Set the link-list pointer point to next link-list * Set the link-list pointer point to next link-list
* configuration's physical address. * configuration's physical address.
*/ */
hw->llist_ptr = schan->linklist.phy_addr + temp; llist_ptr = schan->linklist.phy_addr + temp;
hw->llist_ptr = lower_32_bits(llist_ptr);
hw->src_blk_step = (upper_32_bits(llist_ptr) << SPRD_DMA_LLIST_HIGH_SHIFT) &
SPRD_DMA_LLIST_HIGH_MASK;
} else { } else {
hw->llist_ptr = 0; hw->llist_ptr = 0;
hw->src_blk_step = 0;
} }
hw->frg_step = 0; hw->frg_step = 0;
hw->src_blk_step = 0;
hw->des_blk_step = 0; hw->des_blk_step = 0;
return 0; return 0;
} }
...@@ -1023,15 +1039,22 @@ static int sprd_dma_resume(struct dma_chan *chan) ...@@ -1023,15 +1039,22 @@ static int sprd_dma_resume(struct dma_chan *chan)
static int sprd_dma_terminate_all(struct dma_chan *chan) static int sprd_dma_terminate_all(struct dma_chan *chan)
{ {
struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
struct virt_dma_desc *cur_vd = NULL;
unsigned long flags; unsigned long flags;
LIST_HEAD(head); LIST_HEAD(head);
spin_lock_irqsave(&schan->vc.lock, flags); spin_lock_irqsave(&schan->vc.lock, flags);
if (schan->cur_desc)
cur_vd = &schan->cur_desc->vd;
sprd_dma_stop(schan); sprd_dma_stop(schan);
vchan_get_all_descriptors(&schan->vc, &head); vchan_get_all_descriptors(&schan->vc, &head);
spin_unlock_irqrestore(&schan->vc.lock, flags); spin_unlock_irqrestore(&schan->vc.lock, flags);
if (cur_vd)
sprd_dma_free_desc(cur_vd);
vchan_dma_desc_free_list(&schan->vc, &head); vchan_dma_desc_free_list(&schan->vc, &head);
return 0; return 0;
} }
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#define ADMA_CH_CONFIG_MAX_BURST_SIZE 16 #define ADMA_CH_CONFIG_MAX_BURST_SIZE 16
#define ADMA_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0xf) #define ADMA_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0xf)
#define ADMA_CH_CONFIG_MAX_BUFS 8 #define ADMA_CH_CONFIG_MAX_BUFS 8
#define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) (reqs << 4)
#define ADMA_CH_FIFO_CTRL 0x2c #define ADMA_CH_FIFO_CTRL 0x2c
#define TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(val) (((val) & 0xf) << 8) #define TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(val) (((val) & 0xf) << 8)
...@@ -77,6 +78,7 @@ struct tegra_adma; ...@@ -77,6 +78,7 @@ struct tegra_adma;
* @ch_req_tx_shift: Register offset for AHUB transmit channel select. * @ch_req_tx_shift: Register offset for AHUB transmit channel select.
* @ch_req_rx_shift: Register offset for AHUB receive channel select. * @ch_req_rx_shift: Register offset for AHUB receive channel select.
* @ch_base_offset: Register offset of DMA channel registers. * @ch_base_offset: Register offset of DMA channel registers.
* @has_outstanding_reqs: If DMA channel can have outstanding requests.
* @ch_fifo_ctrl: Default value for channel FIFO CTRL register. * @ch_fifo_ctrl: Default value for channel FIFO CTRL register.
* @ch_req_mask: Mask for Tx or Rx channel select. * @ch_req_mask: Mask for Tx or Rx channel select.
* @ch_req_max: Maximum number of Tx or Rx channels available. * @ch_req_max: Maximum number of Tx or Rx channels available.
...@@ -95,6 +97,7 @@ struct tegra_adma_chip_data { ...@@ -95,6 +97,7 @@ struct tegra_adma_chip_data {
unsigned int ch_req_max; unsigned int ch_req_max;
unsigned int ch_reg_size; unsigned int ch_reg_size;
unsigned int nr_channels; unsigned int nr_channels;
bool has_outstanding_reqs;
}; };
/* /*
...@@ -594,6 +597,8 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc, ...@@ -594,6 +597,8 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
ADMA_CH_CTRL_FLOWCTRL_EN; ADMA_CH_CTRL_FLOWCTRL_EN;
ch_regs->config |= cdata->adma_get_burst_config(burst_size); ch_regs->config |= cdata->adma_get_burst_config(burst_size);
ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1); ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1);
if (cdata->has_outstanding_reqs)
ch_regs->config |= TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8);
ch_regs->fifo_ctrl = cdata->ch_fifo_ctrl; ch_regs->fifo_ctrl = cdata->ch_fifo_ctrl;
ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK; ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK;
...@@ -778,6 +783,7 @@ static const struct tegra_adma_chip_data tegra210_chip_data = { ...@@ -778,6 +783,7 @@ static const struct tegra_adma_chip_data tegra210_chip_data = {
.ch_req_tx_shift = 28, .ch_req_tx_shift = 28,
.ch_req_rx_shift = 24, .ch_req_rx_shift = 24,
.ch_base_offset = 0, .ch_base_offset = 0,
.has_outstanding_reqs = false,
.ch_fifo_ctrl = TEGRA210_FIFO_CTRL_DEFAULT, .ch_fifo_ctrl = TEGRA210_FIFO_CTRL_DEFAULT,
.ch_req_mask = 0xf, .ch_req_mask = 0xf,
.ch_req_max = 10, .ch_req_max = 10,
...@@ -792,6 +798,7 @@ static const struct tegra_adma_chip_data tegra186_chip_data = { ...@@ -792,6 +798,7 @@ static const struct tegra_adma_chip_data tegra186_chip_data = {
.ch_req_tx_shift = 27, .ch_req_tx_shift = 27,
.ch_req_rx_shift = 22, .ch_req_rx_shift = 22,
.ch_base_offset = 0x10000, .ch_base_offset = 0x10000,
.has_outstanding_reqs = true,
.ch_fifo_ctrl = TEGRA186_FIFO_CTRL_DEFAULT, .ch_fifo_ctrl = TEGRA186_FIFO_CTRL_DEFAULT,
.ch_req_mask = 0x1f, .ch_req_mask = 0x1f,
.ch_req_max = 20, .ch_req_max = 20,
......
...@@ -586,9 +586,22 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg( ...@@ -586,9 +586,22 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
enum dma_transfer_direction dir, unsigned long tx_flags, void *context) enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
{ {
struct cppi41_channel *c = to_cpp41_chan(chan); struct cppi41_channel *c = to_cpp41_chan(chan);
struct dma_async_tx_descriptor *txd = NULL;
struct cppi41_dd *cdd = c->cdd;
struct cppi41_desc *d; struct cppi41_desc *d;
struct scatterlist *sg; struct scatterlist *sg;
unsigned int i; unsigned int i;
int error;
error = pm_runtime_get(cdd->ddev.dev);
if (error < 0) {
pm_runtime_put_noidle(cdd->ddev.dev);
return NULL;
}
if (cdd->is_suspended)
goto err_out_not_ready;
d = c->desc; d = c->desc;
for_each_sg(sgl, sg, sg_len, i) { for_each_sg(sgl, sg, sg_len, i) {
...@@ -611,7 +624,13 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg( ...@@ -611,7 +624,13 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
d++; d++;
} }
return &c->txd; txd = &c->txd;
err_out_not_ready:
pm_runtime_mark_last_busy(cdd->ddev.dev);
pm_runtime_put_autosuspend(cdd->ddev.dev);
return txd;
} }
static void cppi41_compute_td_desc(struct cppi41_desc *d) static void cppi41_compute_td_desc(struct cppi41_desc *d)
......
...@@ -74,6 +74,9 @@ ...@@ -74,6 +74,9 @@
#define XILINX_DMA_DMACR_CIRC_EN BIT(1) #define XILINX_DMA_DMACR_CIRC_EN BIT(1)
#define XILINX_DMA_DMACR_RUNSTOP BIT(0) #define XILINX_DMA_DMACR_RUNSTOP BIT(0)
#define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5) #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
#define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24)
#define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16)
#define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8)
#define XILINX_DMA_REG_DMASR 0x0004 #define XILINX_DMA_REG_DMASR 0x0004
#define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15) #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
...@@ -1536,7 +1539,8 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) ...@@ -1536,7 +1539,8 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
node); node);
hw = &segment->hw; hw = &segment->hw;
xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr); xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR,
xilinx_prep_dma_addr_t(hw->buf_addr));
/* Start the transfer */ /* Start the transfer */
dma_ctrl_write(chan, XILINX_DMA_REG_BTT, dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
...@@ -2459,8 +2463,10 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan, ...@@ -2459,8 +2463,10 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
chan->config.gen_lock = cfg->gen_lock; chan->config.gen_lock = cfg->gen_lock;
chan->config.master = cfg->master; chan->config.master = cfg->master;
dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN;
if (cfg->gen_lock && chan->genlock) { if (cfg->gen_lock && chan->genlock) {
dmacr |= XILINX_DMA_DMACR_GENLOCK_EN; dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK;
dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT; dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
} }
...@@ -2476,11 +2482,13 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan, ...@@ -2476,11 +2482,13 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
chan->config.delay = cfg->delay; chan->config.delay = cfg->delay;
if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) { if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK;
dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT; dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
chan->config.coalesc = cfg->coalesc; chan->config.coalesc = cfg->coalesc;
} }
if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) { if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK;
dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT; dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
chan->config.delay = cfg->delay; chan->config.delay = cfg->delay;
} }
......
...@@ -51,7 +51,10 @@ struct sdma_script_start_addrs { ...@@ -51,7 +51,10 @@ struct sdma_script_start_addrs {
/* End of v2 array */ /* End of v2 array */
s32 zcanfd_2_mcu_addr; s32 zcanfd_2_mcu_addr;
s32 zqspi_2_mcu_addr; s32 zqspi_2_mcu_addr;
s32 mcu_2_ecspi_addr;
/* End of v3 array */ /* End of v3 array */
s32 mcu_2_zqspi_addr;
/* End of v4 array */
}; };
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment