Commit 952c53cd authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-fix-5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine fixes from Vinod Koul:
 "One core fix for DMA_INTERRUPT and rest driver fixes.

  Core:

   - Revert verification of DMA_INTERRUPT capability as that was
     incorrect

  Bunch of driver fixes for:

   - ti: refcount and put_device leak

   - qcom_bam: runtime pm overflow

   - idxd: force wq context cleanup and call idxd_enable_system_pasid()
     on success

   - dw-axi-dmac: RMW on channel suspend register

   - imx-sdma: restart cyclic channel when enabled

   - at_xdma: error handling for at_xdmac_alloc_desc

   - pl330: lockdep warning

   - lgm: error handling path in probe

   - allwinner: Fix min/max typo in binding"

* tag 'dmaengine-fix-5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine:
  dt-bindings: dma: allwinner,sun50i-a64-dma: Fix min/max typo
  dmaengine: lgm: Fix an error handling path in intel_ldma_probe()
  dmaengine: pl330: Fix lockdep warning about non-static key
  dmaengine: idxd: Only call idxd_enable_system_pasid() if succeeded in enabling SVA feature
  dmaengine: at_xdma: handle errors of at_xdmac_alloc_desc() correctly
  dmaengine: imx-sdma: only restart cyclic channel when enabled
  dmaengine: dw-axi-dmac: Fix RMW on channel suspend register
  dmaengine: idxd: force wq context cleanup on device disable path
  dmaengine: qcom: bam_dma: fix runtime PM underflow
  dmaengine: imx-sdma: Allow imx8m for imx7 FW revs
  dmaengine: Revert "dmaengine: add verification of DMA_INTERRUPT capability for dmatest"
  dmaengine: ti: Add missing put_device in ti_dra7_xbar_route_allocate
  dmaengine: ti: Fix refcount leak in ti_dra7_xbar_route_allocate
parents 5867f3b8 607a48c7
......@@ -67,7 +67,7 @@ if:
then:
properties:
clocks:
maxItems: 2
minItems: 2
required:
- clock-names
......
......@@ -1900,6 +1900,11 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
for (i = 0; i < init_nr_desc_per_channel; i++) {
desc = at_xdmac_alloc_desc(chan, GFP_KERNEL);
if (!desc) {
if (i == 0) {
dev_warn(chan2dev(chan),
"can't allocate any descriptors\n");
return -EIO;
}
dev_warn(chan2dev(chan),
"only %d descriptors have been allocated\n", i);
break;
......
......@@ -675,16 +675,10 @@ static int dmatest_func(void *data)
/*
* src and dst buffers are freed by ourselves below
*/
if (params->polled) {
if (params->polled)
flags = DMA_CTRL_ACK;
} else {
if (dma_has_cap(DMA_INTERRUPT, dev->cap_mask)) {
flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
} else {
pr_err("Channel does not support interrupt!\n");
goto err_pq_array;
}
}
else
flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
ktime = ktime_get();
while (!(kthread_should_stop() ||
......@@ -912,7 +906,6 @@ static int dmatest_func(void *data)
runtime = ktime_to_us(ktime);
ret = 0;
err_pq_array:
kfree(dma_pq);
err_srcs_array:
kfree(srcs);
......
......@@ -1164,8 +1164,9 @@ static int dma_chan_pause(struct dma_chan *dchan)
BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
} else {
val = BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |
BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;
val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |
BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;
axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val);
}
......@@ -1190,12 +1191,13 @@ static inline void axi_chan_resume(struct axi_dma_chan *chan)
{
u32 val;
val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
if (chan->chip->dw->hdata->reg_map_8_channels) {
val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
} else {
val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT);
val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT);
axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val);
......
......@@ -716,10 +716,7 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
struct idxd_wq *wq = idxd->wqs[i];
mutex_lock(&wq->wq_lock);
if (wq->state == IDXD_WQ_ENABLED) {
idxd_wq_disable_cleanup(wq);
wq->state = IDXD_WQ_DISABLED;
}
idxd_wq_disable_cleanup(wq);
idxd_wq_device_reset_cleanup(wq);
mutex_unlock(&wq->wq_lock);
}
......
......@@ -512,15 +512,16 @@ static int idxd_probe(struct idxd_device *idxd)
dev_dbg(dev, "IDXD reset complete\n");
if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
if (iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA))
if (iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA)) {
dev_warn(dev, "Unable to turn on user SVA feature.\n");
else
} else {
set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
if (idxd_enable_system_pasid(idxd))
dev_warn(dev, "No in-kernel DMA with PASID.\n");
else
set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
if (idxd_enable_system_pasid(idxd))
dev_warn(dev, "No in-kernel DMA with PASID.\n");
else
set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
}
} else if (!sva) {
dev_warn(dev, "User forced SVA off via module param.\n");
}
......
......@@ -891,7 +891,7 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
* SDMA stops cyclic channel when DMA request triggers a channel and no SDMA
* owned buffer is available (i.e. BD_DONE was set too late).
*/
if (!is_sdma_channel_enabled(sdmac->sdma, sdmac->channel)) {
if (sdmac->desc && !is_sdma_channel_enabled(sdmac->sdma, sdmac->channel)) {
dev_warn(sdmac->sdma->dev, "restart cyclic channel %d\n", sdmac->channel);
sdma_enable_channel(sdmac->sdma, sdmac->channel);
}
......@@ -2346,7 +2346,7 @@ MODULE_DESCRIPTION("i.MX SDMA driver");
#if IS_ENABLED(CONFIG_SOC_IMX6Q)
MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin");
#endif
#if IS_ENABLED(CONFIG_SOC_IMX7D)
#if IS_ENABLED(CONFIG_SOC_IMX7D) || IS_ENABLED(CONFIG_SOC_IMX8M)
MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin");
#endif
MODULE_LICENSE("GPL");
......@@ -1593,11 +1593,12 @@ static int intel_ldma_probe(struct platform_device *pdev)
d->core_clk = devm_clk_get_optional(dev, NULL);
if (IS_ERR(d->core_clk))
return PTR_ERR(d->core_clk);
clk_prepare_enable(d->core_clk);
d->rst = devm_reset_control_get_optional(dev, NULL);
if (IS_ERR(d->rst))
return PTR_ERR(d->rst);
clk_prepare_enable(d->core_clk);
reset_control_deassert(d->rst);
ret = devm_add_action_or_reset(dev, ldma_clk_disable, d);
......
......@@ -2589,7 +2589,7 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
/* If the DMAC pool is empty, alloc new */
if (!desc) {
DEFINE_SPINLOCK(lock);
static DEFINE_SPINLOCK(lock);
LIST_HEAD(pool);
if (!add_desc(&pool, &lock, GFP_ATOMIC, 1))
......
......@@ -558,14 +558,6 @@ static int bam_alloc_chan(struct dma_chan *chan)
return 0;
}
static int bam_pm_runtime_get_sync(struct device *dev)
{
if (pm_runtime_enabled(dev))
return pm_runtime_get_sync(dev);
return 0;
}
/**
* bam_free_chan - Frees dma resources associated with specific channel
* @chan: specified channel
......@@ -581,7 +573,7 @@ static void bam_free_chan(struct dma_chan *chan)
unsigned long flags;
int ret;
ret = bam_pm_runtime_get_sync(bdev->dev);
ret = pm_runtime_get_sync(bdev->dev);
if (ret < 0)
return;
......@@ -784,7 +776,7 @@ static int bam_pause(struct dma_chan *chan)
unsigned long flag;
int ret;
ret = bam_pm_runtime_get_sync(bdev->dev);
ret = pm_runtime_get_sync(bdev->dev);
if (ret < 0)
return ret;
......@@ -810,7 +802,7 @@ static int bam_resume(struct dma_chan *chan)
unsigned long flag;
int ret;
ret = bam_pm_runtime_get_sync(bdev->dev);
ret = pm_runtime_get_sync(bdev->dev);
if (ret < 0)
return ret;
......@@ -919,7 +911,7 @@ static irqreturn_t bam_dma_irq(int irq, void *data)
if (srcs & P_IRQ)
tasklet_schedule(&bdev->task);
ret = bam_pm_runtime_get_sync(bdev->dev);
ret = pm_runtime_get_sync(bdev->dev);
if (ret < 0)
return IRQ_NONE;
......@@ -1037,7 +1029,7 @@ static void bam_start_dma(struct bam_chan *bchan)
if (!vd)
return;
ret = bam_pm_runtime_get_sync(bdev->dev);
ret = pm_runtime_get_sync(bdev->dev);
if (ret < 0)
return;
......@@ -1374,11 +1366,6 @@ static int bam_dma_probe(struct platform_device *pdev)
if (ret)
goto err_unregister_dma;
if (!bdev->bamclk) {
pm_runtime_disable(&pdev->dev);
return 0;
}
pm_runtime_irq_safe(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, BAM_DMA_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(&pdev->dev);
......@@ -1462,10 +1449,8 @@ static int __maybe_unused bam_dma_suspend(struct device *dev)
{
struct bam_device *bdev = dev_get_drvdata(dev);
if (bdev->bamclk) {
pm_runtime_force_suspend(dev);
clk_unprepare(bdev->bamclk);
}
pm_runtime_force_suspend(dev);
clk_unprepare(bdev->bamclk);
return 0;
}
......@@ -1475,13 +1460,11 @@ static int __maybe_unused bam_dma_resume(struct device *dev)
struct bam_device *bdev = dev_get_drvdata(dev);
int ret;
if (bdev->bamclk) {
ret = clk_prepare(bdev->bamclk);
if (ret)
return ret;
ret = clk_prepare(bdev->bamclk);
if (ret)
return ret;
pm_runtime_force_resume(dev);
}
pm_runtime_force_resume(dev);
return 0;
}
......
......@@ -245,6 +245,7 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
if (dma_spec->args[0] >= xbar->xbar_requests) {
dev_err(&pdev->dev, "Invalid XBAR request number: %d\n",
dma_spec->args[0]);
put_device(&pdev->dev);
return ERR_PTR(-EINVAL);
}
......@@ -252,12 +253,14 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
if (!dma_spec->np) {
dev_err(&pdev->dev, "Can't get DMA master\n");
put_device(&pdev->dev);
return ERR_PTR(-EINVAL);
}
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map) {
of_node_put(dma_spec->np);
put_device(&pdev->dev);
return ERR_PTR(-ENOMEM);
}
......@@ -268,6 +271,8 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
mutex_unlock(&xbar->mutex);
dev_err(&pdev->dev, "Run out of free DMA requests\n");
kfree(map);
of_node_put(dma_spec->np);
put_device(&pdev->dev);
return ERR_PTR(-ENOMEM);
}
set_bit(map->xbar_out, xbar->dma_inuse);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment