Commit 6fd145da authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-fix-5.6-rc5' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine fixes from Vinod Koul:
 "A bunch of driver fixes:

   - Doc updates to clean warnings for dmaengine

   - Fixes for newly added Intel idxd driver

   - More fixes for newly added TI k3-udma driver

   - Fixes for IMX and Tegra drivers"

* tag 'dmaengine-fix-5.6-rc5' of git://git.infradead.org/users/vkoul/slave-dma:
  dmaengine: imx-sdma: Fix the event id check to include RX event for UART6
  dmaengine: tegra-apb: Prevent race conditions of tasklet vs free list
  dmaengine: tegra-apb: Fix use-after-free
  dmaengine: imx-sdma: fix context cache
  dmaengine: idxd: wq size configuration needs to check global max size
  dmaengine: idxd: sysfs input of wq incorrect wq type should return error
  dmaengine: coh901318: Fix a double lock bug in dma_tc_handle()
  dmaengine: idxd: correct reserved token calculation
  dmaengine: ti: k3-udma: Fix terminated transfer handling
  dmaengine: ti: k3-udma: Use the channel direction in pause/resume functions
  dmaengine: ti: k3-udma: Use the TR counter helper for slave_sg and cyclic
  dmaengine: ti: k3-udma: Move the TR counter calculation to helper function
  dmaengine: ti: k3-udma: Workaround for RX teardown with stale data in peer
  dmaengine: ti: k3-udma: Use ktime/usleep_range based TX completion check
  dmaengine: idxd: Fix error handling in idxd_wq_cdev_dev_setup()
  dmaengine: doc: fix warnings/issues of client.rst
  dmaengine: idxd: fix runaway module ref count on device driver bind
parents 776e49e8 25962e1a
...@@ -151,8 +151,8 @@ The details of these operations are: ...@@ -151,8 +151,8 @@ The details of these operations are:
Note that callbacks will always be invoked from the DMA Note that callbacks will always be invoked from the DMA
engines tasklet, never from interrupt context. engines tasklet, never from interrupt context.
Optional: per descriptor metadata **Optional: per descriptor metadata**
---------------------------------
DMAengine provides two ways for metadata support. DMAengine provides two ways for metadata support.
DESC_METADATA_CLIENT DESC_METADATA_CLIENT
...@@ -199,12 +199,15 @@ Optional: per descriptor metadata ...@@ -199,12 +199,15 @@ Optional: per descriptor metadata
DESC_METADATA_CLIENT DESC_METADATA_CLIENT
- DMA_MEM_TO_DEV / DEV_MEM_TO_MEM: - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
1. prepare the descriptor (dmaengine_prep_*) 1. prepare the descriptor (dmaengine_prep_*)
construct the metadata in the client's buffer construct the metadata in the client's buffer
2. use dmaengine_desc_attach_metadata() to attach the buffer to the 2. use dmaengine_desc_attach_metadata() to attach the buffer to the
descriptor descriptor
3. submit the transfer 3. submit the transfer
- DMA_DEV_TO_MEM: - DMA_DEV_TO_MEM:
1. prepare the descriptor (dmaengine_prep_*) 1. prepare the descriptor (dmaengine_prep_*)
2. use dmaengine_desc_attach_metadata() to attach the buffer to the 2. use dmaengine_desc_attach_metadata() to attach the buffer to the
descriptor descriptor
...@@ -215,6 +218,7 @@ Optional: per descriptor metadata ...@@ -215,6 +218,7 @@ Optional: per descriptor metadata
DESC_METADATA_ENGINE DESC_METADATA_ENGINE
- DMA_MEM_TO_DEV / DEV_MEM_TO_MEM: - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
1. prepare the descriptor (dmaengine_prep_*) 1. prepare the descriptor (dmaengine_prep_*)
2. use dmaengine_desc_get_metadata_ptr() to get the pointer to the 2. use dmaengine_desc_get_metadata_ptr() to get the pointer to the
engine's metadata area engine's metadata area
...@@ -222,7 +226,9 @@ Optional: per descriptor metadata ...@@ -222,7 +226,9 @@ Optional: per descriptor metadata
4. use dmaengine_desc_set_metadata_len() to tell the DMA engine the 4. use dmaengine_desc_set_metadata_len() to tell the DMA engine the
amount of data the client has placed into the metadata buffer amount of data the client has placed into the metadata buffer
5. submit the transfer 5. submit the transfer
- DMA_DEV_TO_MEM: - DMA_DEV_TO_MEM:
1. prepare the descriptor (dmaengine_prep_*) 1. prepare the descriptor (dmaengine_prep_*)
2. submit the transfer 2. submit the transfer
3. on transfer completion, use dmaengine_desc_get_metadata_ptr() to get 3. on transfer completion, use dmaengine_desc_get_metadata_ptr() to get
...@@ -278,8 +284,8 @@ Optional: per descriptor metadata ...@@ -278,8 +284,8 @@ Optional: per descriptor metadata
void dma_async_issue_pending(struct dma_chan *chan); void dma_async_issue_pending(struct dma_chan *chan);
Further APIs: Further APIs
------------- ------------
1. Terminate APIs 1. Terminate APIs
......
...@@ -1947,8 +1947,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc) ...@@ -1947,8 +1947,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc)
return; return;
} }
spin_lock(&cohc->lock);
/* /*
* When we reach this point, at least one queue item * When we reach this point, at least one queue item
* should have been moved over from cohc->queue to * should have been moved over from cohc->queue to
...@@ -1969,8 +1967,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc) ...@@ -1969,8 +1967,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc)
if (coh901318_queue_start(cohc) == NULL) if (coh901318_queue_start(cohc) == NULL)
cohc->busy = 0; cohc->busy = 0;
spin_unlock(&cohc->lock);
/* /*
* This tasklet will remove items from cohc->active * This tasklet will remove items from cohc->active
* and thus terminates them. * and thus terminates them.
......
...@@ -204,6 +204,7 @@ static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq) ...@@ -204,6 +204,7 @@ static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq)
minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL); minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
if (minor < 0) { if (minor < 0) {
rc = minor; rc = minor;
kfree(dev);
goto ida_err; goto ida_err;
} }
...@@ -212,7 +213,6 @@ static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq) ...@@ -212,7 +213,6 @@ static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq)
rc = device_register(dev); rc = device_register(dev);
if (rc < 0) { if (rc < 0) {
dev_err(&idxd->pdev->dev, "device register failed\n"); dev_err(&idxd->pdev->dev, "device register failed\n");
put_device(dev);
goto dev_reg_err; goto dev_reg_err;
} }
idxd_cdev->minor = minor; idxd_cdev->minor = minor;
...@@ -221,8 +221,8 @@ static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq) ...@@ -221,8 +221,8 @@ static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq)
dev_reg_err: dev_reg_err:
ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt)); ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt));
put_device(dev);
ida_err: ida_err:
kfree(dev);
idxd_cdev->dev = NULL; idxd_cdev->dev = NULL;
return rc; return rc;
} }
......
...@@ -124,6 +124,7 @@ static int idxd_config_bus_probe(struct device *dev) ...@@ -124,6 +124,7 @@ static int idxd_config_bus_probe(struct device *dev)
rc = idxd_device_config(idxd); rc = idxd_device_config(idxd);
if (rc < 0) { if (rc < 0) {
spin_unlock_irqrestore(&idxd->dev_lock, flags); spin_unlock_irqrestore(&idxd->dev_lock, flags);
module_put(THIS_MODULE);
dev_warn(dev, "Device config failed: %d\n", rc); dev_warn(dev, "Device config failed: %d\n", rc);
return rc; return rc;
} }
...@@ -132,6 +133,7 @@ static int idxd_config_bus_probe(struct device *dev) ...@@ -132,6 +133,7 @@ static int idxd_config_bus_probe(struct device *dev)
rc = idxd_device_enable(idxd); rc = idxd_device_enable(idxd);
if (rc < 0) { if (rc < 0) {
spin_unlock_irqrestore(&idxd->dev_lock, flags); spin_unlock_irqrestore(&idxd->dev_lock, flags);
module_put(THIS_MODULE);
dev_warn(dev, "Device enable failed: %d\n", rc); dev_warn(dev, "Device enable failed: %d\n", rc);
return rc; return rc;
} }
...@@ -142,6 +144,7 @@ static int idxd_config_bus_probe(struct device *dev) ...@@ -142,6 +144,7 @@ static int idxd_config_bus_probe(struct device *dev)
rc = idxd_register_dma_device(idxd); rc = idxd_register_dma_device(idxd);
if (rc < 0) { if (rc < 0) {
spin_unlock_irqrestore(&idxd->dev_lock, flags); spin_unlock_irqrestore(&idxd->dev_lock, flags);
module_put(THIS_MODULE);
dev_dbg(dev, "Failed to register dmaengine device\n"); dev_dbg(dev, "Failed to register dmaengine device\n");
return rc; return rc;
} }
...@@ -516,7 +519,7 @@ static ssize_t group_tokens_reserved_store(struct device *dev, ...@@ -516,7 +519,7 @@ static ssize_t group_tokens_reserved_store(struct device *dev,
if (val > idxd->max_tokens) if (val > idxd->max_tokens)
return -EINVAL; return -EINVAL;
if (val > idxd->nr_tokens) if (val > idxd->nr_tokens + group->tokens_reserved)
return -EINVAL; return -EINVAL;
group->tokens_reserved = val; group->tokens_reserved = val;
...@@ -901,6 +904,20 @@ static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr, ...@@ -901,6 +904,20 @@ static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%u\n", wq->size); return sprintf(buf, "%u\n", wq->size);
} }
static int total_claimed_wq_size(struct idxd_device *idxd)
{
int i;
int wq_size = 0;
for (i = 0; i < idxd->max_wqs; i++) {
struct idxd_wq *wq = &idxd->wqs[i];
wq_size += wq->size;
}
return wq_size;
}
static ssize_t wq_size_store(struct device *dev, static ssize_t wq_size_store(struct device *dev,
struct device_attribute *attr, const char *buf, struct device_attribute *attr, const char *buf,
size_t count) size_t count)
...@@ -920,7 +937,7 @@ static ssize_t wq_size_store(struct device *dev, ...@@ -920,7 +937,7 @@ static ssize_t wq_size_store(struct device *dev,
if (wq->state != IDXD_WQ_DISABLED) if (wq->state != IDXD_WQ_DISABLED)
return -EPERM; return -EPERM;
if (size > idxd->max_wq_size) if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
return -EINVAL; return -EINVAL;
wq->size = size; wq->size = size;
...@@ -999,12 +1016,14 @@ static ssize_t wq_type_store(struct device *dev, ...@@ -999,12 +1016,14 @@ static ssize_t wq_type_store(struct device *dev,
return -EPERM; return -EPERM;
old_type = wq->type; old_type = wq->type;
if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL])) if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
wq->type = IDXD_WQT_NONE;
else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
wq->type = IDXD_WQT_KERNEL; wq->type = IDXD_WQT_KERNEL;
else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER])) else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
wq->type = IDXD_WQT_USER; wq->type = IDXD_WQT_USER;
else else
wq->type = IDXD_WQT_NONE; return -EINVAL;
/* If we are changing queue type, clear the name */ /* If we are changing queue type, clear the name */
if (wq->type != old_type) if (wq->type != old_type)
......
...@@ -1331,13 +1331,14 @@ static void sdma_free_chan_resources(struct dma_chan *chan) ...@@ -1331,13 +1331,14 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
sdma_channel_synchronize(chan); sdma_channel_synchronize(chan);
if (sdmac->event_id0) if (sdmac->event_id0 >= 0)
sdma_event_disable(sdmac, sdmac->event_id0); sdma_event_disable(sdmac, sdmac->event_id0);
if (sdmac->event_id1) if (sdmac->event_id1)
sdma_event_disable(sdmac, sdmac->event_id1); sdma_event_disable(sdmac, sdmac->event_id1);
sdmac->event_id0 = 0; sdmac->event_id0 = 0;
sdmac->event_id1 = 0; sdmac->event_id1 = 0;
sdmac->context_loaded = false;
sdma_set_channel_priority(sdmac, 0); sdma_set_channel_priority(sdmac, 0);
...@@ -1631,7 +1632,7 @@ static int sdma_config(struct dma_chan *chan, ...@@ -1631,7 +1632,7 @@ static int sdma_config(struct dma_chan *chan,
memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg)); memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
/* Set ENBLn earlier to make sure dma request triggered after that */ /* Set ENBLn earlier to make sure dma request triggered after that */
if (sdmac->event_id0) { if (sdmac->event_id0 >= 0) {
if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
return -EINVAL; return -EINVAL;
sdma_event_enable(sdmac, sdmac->event_id0); sdma_event_enable(sdmac, sdmac->event_id0);
......
...@@ -281,7 +281,7 @@ static struct tegra_dma_desc *tegra_dma_desc_get( ...@@ -281,7 +281,7 @@ static struct tegra_dma_desc *tegra_dma_desc_get(
/* Do not allocate if desc are waiting for ack */ /* Do not allocate if desc are waiting for ack */
list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
if (async_tx_test_ack(&dma_desc->txd)) { if (async_tx_test_ack(&dma_desc->txd) && !dma_desc->cb_count) {
list_del(&dma_desc->node); list_del(&dma_desc->node);
spin_unlock_irqrestore(&tdc->lock, flags); spin_unlock_irqrestore(&tdc->lock, flags);
dma_desc->txd.flags = 0; dma_desc->txd.flags = 0;
...@@ -756,10 +756,6 @@ static int tegra_dma_terminate_all(struct dma_chan *dc) ...@@ -756,10 +756,6 @@ static int tegra_dma_terminate_all(struct dma_chan *dc)
bool was_busy; bool was_busy;
spin_lock_irqsave(&tdc->lock, flags); spin_lock_irqsave(&tdc->lock, flags);
if (list_empty(&tdc->pending_sg_req)) {
spin_unlock_irqrestore(&tdc->lock, flags);
return 0;
}
if (!tdc->busy) if (!tdc->busy)
goto skip_dma_stop; goto skip_dma_stop;
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment