Commit 6fd145da authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-fix-5.6-rc5' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine fixes from Vinod Koul:
 "A bunch of driver fixes:

   - Doc updates to clean warnings for dmaengine

   - Fixes for newly added Intel idxd driver

   - More fixes for newly added TI k3-udma driver

   - Fixes for IMX and Tegra drivers"

* tag 'dmaengine-fix-5.6-rc5' of git://git.infradead.org/users/vkoul/slave-dma:
  dmaengine: imx-sdma: Fix the event id check to include RX event for UART6
  dmaengine: tegra-apb: Prevent race conditions of tasklet vs free list
  dmaengine: tegra-apb: Fix use-after-free
  dmaengine: imx-sdma: fix context cache
  dmaengine: idxd: wq size configuration needs to check global max size
  dmaengine: idxd: sysfs input of wq incorrect wq type should return error
  dmaengine: coh901318: Fix a double lock bug in dma_tc_handle()
  dmaengine: idxd: correct reserved token calculation
  dmaengine: ti: k3-udma: Fix terminated transfer handling
  dmaengine: ti: k3-udma: Use the channel direction in pause/resume functions
  dmaengine: ti: k3-udma: Use the TR counter helper for slave_sg and cyclic
  dmaengine: ti: k3-udma: Move the TR counter calculation to helper function
  dmaengine: ti: k3-udma: Workaround for RX teardown with stale data in peer
  dmaengine: ti: k3-udma: Use ktime/usleep_range based TX completion check
  dmaengine: idxd: Fix error handling in idxd_wq_cdev_dev_setup()
  dmaengine: doc: fix warnings/issues of client.rst
  dmaengine: idxd: fix runaway module ref count on device driver bind
parents 776e49e8 25962e1a
......@@ -151,8 +151,8 @@ The details of these operations are:
Note that callbacks will always be invoked from the DMA
engines tasklet, never from interrupt context.
Optional: per descriptor metadata
---------------------------------
**Optional: per descriptor metadata**
DMAengine provides two ways for metadata support.
DESC_METADATA_CLIENT
......@@ -199,12 +199,15 @@ Optional: per descriptor metadata
DESC_METADATA_CLIENT
- DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
1. prepare the descriptor (dmaengine_prep_*)
construct the metadata in the client's buffer
2. use dmaengine_desc_attach_metadata() to attach the buffer to the
descriptor
3. submit the transfer
- DMA_DEV_TO_MEM:
1. prepare the descriptor (dmaengine_prep_*)
2. use dmaengine_desc_attach_metadata() to attach the buffer to the
descriptor
......@@ -215,6 +218,7 @@ Optional: per descriptor metadata
DESC_METADATA_ENGINE
- DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
1. prepare the descriptor (dmaengine_prep_*)
2. use dmaengine_desc_get_metadata_ptr() to get the pointer to the
engine's metadata area
......@@ -222,7 +226,9 @@ Optional: per descriptor metadata
4. use dmaengine_desc_set_metadata_len() to tell the DMA engine the
amount of data the client has placed into the metadata buffer
5. submit the transfer
- DMA_DEV_TO_MEM:
1. prepare the descriptor (dmaengine_prep_*)
2. submit the transfer
3. on transfer completion, use dmaengine_desc_get_metadata_ptr() to get
......@@ -278,8 +284,8 @@ Optional: per descriptor metadata
void dma_async_issue_pending(struct dma_chan *chan);
Further APIs:
-------------
Further APIs
------------
1. Terminate APIs
......
......@@ -1947,8 +1947,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc)
return;
}
spin_lock(&cohc->lock);
/*
* When we reach this point, at least one queue item
* should have been moved over from cohc->queue to
......@@ -1969,8 +1967,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc)
if (coh901318_queue_start(cohc) == NULL)
cohc->busy = 0;
spin_unlock(&cohc->lock);
/*
* This tasklet will remove items from cohc->active
* and thus terminates them.
......
......@@ -204,6 +204,7 @@ static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq)
minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
if (minor < 0) {
rc = minor;
kfree(dev);
goto ida_err;
}
......@@ -212,7 +213,6 @@ static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq)
rc = device_register(dev);
if (rc < 0) {
dev_err(&idxd->pdev->dev, "device register failed\n");
put_device(dev);
goto dev_reg_err;
}
idxd_cdev->minor = minor;
......@@ -221,8 +221,8 @@ static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq)
dev_reg_err:
ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt));
put_device(dev);
ida_err:
kfree(dev);
idxd_cdev->dev = NULL;
return rc;
}
......
......@@ -124,6 +124,7 @@ static int idxd_config_bus_probe(struct device *dev)
rc = idxd_device_config(idxd);
if (rc < 0) {
spin_unlock_irqrestore(&idxd->dev_lock, flags);
module_put(THIS_MODULE);
dev_warn(dev, "Device config failed: %d\n", rc);
return rc;
}
......@@ -132,6 +133,7 @@ static int idxd_config_bus_probe(struct device *dev)
rc = idxd_device_enable(idxd);
if (rc < 0) {
spin_unlock_irqrestore(&idxd->dev_lock, flags);
module_put(THIS_MODULE);
dev_warn(dev, "Device enable failed: %d\n", rc);
return rc;
}
......@@ -142,6 +144,7 @@ static int idxd_config_bus_probe(struct device *dev)
rc = idxd_register_dma_device(idxd);
if (rc < 0) {
spin_unlock_irqrestore(&idxd->dev_lock, flags);
module_put(THIS_MODULE);
dev_dbg(dev, "Failed to register dmaengine device\n");
return rc;
}
......@@ -516,7 +519,7 @@ static ssize_t group_tokens_reserved_store(struct device *dev,
if (val > idxd->max_tokens)
return -EINVAL;
if (val > idxd->nr_tokens)
if (val > idxd->nr_tokens + group->tokens_reserved)
return -EINVAL;
group->tokens_reserved = val;
......@@ -901,6 +904,20 @@ static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%u\n", wq->size);
}
static int total_claimed_wq_size(struct idxd_device *idxd)
{
int i;
int wq_size = 0;
for (i = 0; i < idxd->max_wqs; i++) {
struct idxd_wq *wq = &idxd->wqs[i];
wq_size += wq->size;
}
return wq_size;
}
static ssize_t wq_size_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
......@@ -920,7 +937,7 @@ static ssize_t wq_size_store(struct device *dev,
if (wq->state != IDXD_WQ_DISABLED)
return -EPERM;
if (size > idxd->max_wq_size)
if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
return -EINVAL;
wq->size = size;
......@@ -999,12 +1016,14 @@ static ssize_t wq_type_store(struct device *dev,
return -EPERM;
old_type = wq->type;
if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
wq->type = IDXD_WQT_NONE;
else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
wq->type = IDXD_WQT_KERNEL;
else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
wq->type = IDXD_WQT_USER;
else
wq->type = IDXD_WQT_NONE;
return -EINVAL;
/* If we are changing queue type, clear the name */
if (wq->type != old_type)
......
......@@ -1331,13 +1331,14 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
sdma_channel_synchronize(chan);
if (sdmac->event_id0)
if (sdmac->event_id0 >= 0)
sdma_event_disable(sdmac, sdmac->event_id0);
if (sdmac->event_id1)
sdma_event_disable(sdmac, sdmac->event_id1);
sdmac->event_id0 = 0;
sdmac->event_id1 = 0;
sdmac->context_loaded = false;
sdma_set_channel_priority(sdmac, 0);
......@@ -1631,7 +1632,7 @@ static int sdma_config(struct dma_chan *chan,
memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
/* Set ENBLn earlier to make sure dma request triggered after that */
if (sdmac->event_id0) {
if (sdmac->event_id0 >= 0) {
if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
return -EINVAL;
sdma_event_enable(sdmac, sdmac->event_id0);
......
......@@ -281,7 +281,7 @@ static struct tegra_dma_desc *tegra_dma_desc_get(
/* Do not allocate if desc are waiting for ack */
list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
if (async_tx_test_ack(&dma_desc->txd)) {
if (async_tx_test_ack(&dma_desc->txd) && !dma_desc->cb_count) {
list_del(&dma_desc->node);
spin_unlock_irqrestore(&tdc->lock, flags);
dma_desc->txd.flags = 0;
......@@ -756,10 +756,6 @@ static int tegra_dma_terminate_all(struct dma_chan *dc)
bool was_busy;
spin_lock_irqsave(&tdc->lock, flags);
if (list_empty(&tdc->pending_sg_req)) {
spin_unlock_irqrestore(&tdc->lock, flags);
return 0;
}
if (!tdc->busy)
goto skip_dma_stop;
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment