Commit 95475e57 authored by Dan Williams's avatar Dan Williams

async_tx: remove walk of tx->parent chain in dma_wait_for_async_tx

We currently walk the parent chain when waiting for a given tx to
complete however this walk may race with the driver cleanup routine.
The routines in async_raid6_recov.c may fall back to the synchronous
path at any point so we need to be prepared to call async_tx_quiesce()
(which calls  dma_wait_for_async_tx).  To remove the ->parent walk we
guarantee that every time a dependency is attached ->issue_pending() is
invoked, then we can simply poll the initial descriptor until
completion.

This also allows for a lighter weight 'issue pending' implementation as
there is no longer a requirement to iterate through all the channels'
->issue_pending() routines as long as operations have been submitted in
an ordered chain.  async_tx_issue_pending() is added for this case.
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent af1f951e
...@@ -77,8 +77,8 @@ static void ...@@ -77,8 +77,8 @@ static void
async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
struct dma_async_tx_descriptor *tx) struct dma_async_tx_descriptor *tx)
{ {
struct dma_chan *chan; struct dma_chan *chan = depend_tx->chan;
struct dma_device *device; struct dma_device *device = chan->device;
struct dma_async_tx_descriptor *intr_tx = (void *) ~0; struct dma_async_tx_descriptor *intr_tx = (void *) ~0;
/* first check to see if we can still append to depend_tx */ /* first check to see if we can still append to depend_tx */
...@@ -90,11 +90,11 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, ...@@ -90,11 +90,11 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
} }
spin_unlock_bh(&depend_tx->lock); spin_unlock_bh(&depend_tx->lock);
if (!intr_tx) /* attached dependency, flush the parent channel */
if (!intr_tx) {
device->device_issue_pending(chan);
return; return;
}
chan = depend_tx->chan;
device = chan->device;
/* see if we can schedule an interrupt /* see if we can schedule an interrupt
* otherwise poll for completion * otherwise poll for completion
...@@ -128,6 +128,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, ...@@ -128,6 +128,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
intr_tx->tx_submit(intr_tx); intr_tx->tx_submit(intr_tx);
async_tx_ack(intr_tx); async_tx_ack(intr_tx);
} }
device->device_issue_pending(chan);
} else { } else {
if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
panic("%s: DMA_ERROR waiting for depend_tx\n", panic("%s: DMA_ERROR waiting for depend_tx\n",
......
...@@ -934,49 +934,24 @@ EXPORT_SYMBOL(dma_async_tx_descriptor_init); ...@@ -934,49 +934,24 @@ EXPORT_SYMBOL(dma_async_tx_descriptor_init);
/* dma_wait_for_async_tx - spin wait for a transaction to complete /* dma_wait_for_async_tx - spin wait for a transaction to complete
* @tx: in-flight transaction to wait on * @tx: in-flight transaction to wait on
*
* This routine assumes that tx was obtained from a call to async_memcpy,
* async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped
* and submitted). Walking the parent chain is only meant to cover for DMA
* drivers that do not implement the DMA_INTERRUPT capability and may race with
* the driver's descriptor cleanup routine.
*/ */
enum dma_status enum dma_status
dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
{ {
enum dma_status status; unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
struct dma_async_tx_descriptor *iter;
struct dma_async_tx_descriptor *parent;
if (!tx) if (!tx)
return DMA_SUCCESS; return DMA_SUCCESS;
WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for" while (tx->cookie == -EBUSY) {
" %s\n", __func__, dma_chan_name(tx->chan)); if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
pr_err("%s timeout waiting for descriptor submission\n",
/* poll through the dependency chain, return when tx is complete */ __func__);
do { return DMA_ERROR;
iter = tx; }
cpu_relax();
/* find the root of the unsubmitted dependency chain */ }
do { return dma_sync_wait(tx->chan, tx->cookie);
parent = iter->parent;
if (!parent)
break;
else
iter = parent;
} while (parent);
/* there is a small window for ->parent == NULL and
* ->cookie == -EBUSY
*/
while (iter->cookie == -EBUSY)
cpu_relax();
status = dma_sync_wait(iter->chan, iter->cookie);
} while (status == DMA_IN_PROGRESS || (iter != tx));
return status;
} }
EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
......
...@@ -83,6 +83,24 @@ struct async_submit_ctl { ...@@ -83,6 +83,24 @@ struct async_submit_ctl {
#ifdef CONFIG_DMA_ENGINE #ifdef CONFIG_DMA_ENGINE
#define async_tx_issue_pending_all dma_issue_pending_all #define async_tx_issue_pending_all dma_issue_pending_all
/**
* async_tx_issue_pending - send pending descriptor to the hardware channel
* @tx: descriptor handle to retrieve hardware context
*
* Note: any dependent operations will have already been issued by
* async_tx_channel_switch, or (in the case of no channel switch) will
* be already pending on this channel.
*/
static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx)
{
if (likely(tx)) {
struct dma_chan *chan = tx->chan;
struct dma_device *dma = chan->device;
dma->device_issue_pending(chan);
}
}
#ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL
#include <asm/async_tx.h> #include <asm/async_tx.h>
#else #else
...@@ -98,6 +116,11 @@ static inline void async_tx_issue_pending_all(void) ...@@ -98,6 +116,11 @@ static inline void async_tx_issue_pending_all(void)
do { } while (0); do { } while (0);
} }
static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx)
{
do { } while (0);
}
static inline struct dma_chan * static inline struct dma_chan *
async_tx_find_channel(struct async_submit_ctl *submit, async_tx_find_channel(struct async_submit_ctl *submit,
enum dma_transaction_type tx_type, struct page **dst, enum dma_transaction_type tx_type, struct page **dst,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment