Commit 2ba05622 authored by Dan Williams's avatar Dan Williams

dmaengine: provide a common 'issue_pending_all' implementation

async_tx and net_dma each have open-coded versions of issue_pending_all,
so provide a common routine in dmaengine.

The implementation needs to walk the global device list, so implement
rcu to allow dma_issue_pending_all to run lockless.  Clients protect
themselves from channel removal events by holding a dmaengine reference.
Reviewed-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>

parent bec08513
...@@ -45,18 +45,6 @@ static DEFINE_SPINLOCK(async_tx_lock); ...@@ -45,18 +45,6 @@ static DEFINE_SPINLOCK(async_tx_lock);
static LIST_HEAD(async_tx_master_list); static LIST_HEAD(async_tx_master_list);
/* async_tx_issue_pending_all - start all transactions on all channels */
void async_tx_issue_pending_all(void)
{
struct dma_chan_ref *ref;
rcu_read_lock();
list_for_each_entry_rcu(ref, &async_tx_master_list, node)
ref->chan->device->device_issue_pending(ref->chan);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(async_tx_issue_pending_all);
static void static void
free_dma_chan_ref(struct rcu_head *rcu) free_dma_chan_ref(struct rcu_head *rcu)
{ {
......
...@@ -70,6 +70,7 @@ ...@@ -70,6 +70,7 @@
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/rculist.h>
static DEFINE_MUTEX(dma_list_mutex); static DEFINE_MUTEX(dma_list_mutex);
static LIST_HEAD(dma_device_list); static LIST_HEAD(dma_device_list);
...@@ -365,6 +366,26 @@ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) ...@@ -365,6 +366,26 @@ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
} }
EXPORT_SYMBOL(dma_find_channel); EXPORT_SYMBOL(dma_find_channel);
/**
* dma_issue_pending_all - flush all pending operations across all channels
*/
void dma_issue_pending_all(void)
{
struct dma_device *device;
struct dma_chan *chan;
WARN_ONCE(dmaengine_ref_count == 0,
"client called %s without a reference", __func__);
rcu_read_lock();
list_for_each_entry_rcu(device, &dma_device_list, global_node)
list_for_each_entry(chan, &device->channels, device_node)
if (chan->client_count)
device->device_issue_pending(chan);
rcu_read_unlock();
}
EXPORT_SYMBOL(dma_issue_pending_all);
/** /**
* nth_chan - returns the nth channel of the given capability * nth_chan - returns the nth channel of the given capability
* @cap: capability to match * @cap: capability to match
...@@ -490,7 +511,7 @@ void dma_async_client_register(struct dma_client *client) ...@@ -490,7 +511,7 @@ void dma_async_client_register(struct dma_client *client)
err = dma_chan_get(chan); err = dma_chan_get(chan);
if (err == -ENODEV) { if (err == -ENODEV) {
/* module removed before we could use it */ /* module removed before we could use it */
list_del_init(&device->global_node); list_del_rcu(&device->global_node);
break; break;
} else if (err) } else if (err)
pr_err("dmaengine: failed to get %s: (%d)\n", pr_err("dmaengine: failed to get %s: (%d)\n",
...@@ -635,7 +656,7 @@ int dma_async_device_register(struct dma_device *device) ...@@ -635,7 +656,7 @@ int dma_async_device_register(struct dma_device *device)
goto err_out; goto err_out;
} }
} }
list_add_tail(&device->global_node, &dma_device_list); list_add_tail_rcu(&device->global_node, &dma_device_list);
dma_channel_rebalance(); dma_channel_rebalance();
mutex_unlock(&dma_list_mutex); mutex_unlock(&dma_list_mutex);
...@@ -677,7 +698,7 @@ void dma_async_device_unregister(struct dma_device *device) ...@@ -677,7 +698,7 @@ void dma_async_device_unregister(struct dma_device *device)
struct dma_chan *chan; struct dma_chan *chan;
mutex_lock(&dma_list_mutex); mutex_lock(&dma_list_mutex);
list_del(&device->global_node); list_del_rcu(&device->global_node);
dma_channel_rebalance(); dma_channel_rebalance();
mutex_unlock(&dma_list_mutex); mutex_unlock(&dma_list_mutex);
......
...@@ -59,7 +59,7 @@ enum async_tx_flags { ...@@ -59,7 +59,7 @@ enum async_tx_flags {
}; };
#ifdef CONFIG_DMA_ENGINE #ifdef CONFIG_DMA_ENGINE
void async_tx_issue_pending_all(void); #define async_tx_issue_pending_all dma_issue_pending_all
#ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL
#include <asm/async_tx.h> #include <asm/async_tx.h>
#else #else
......
...@@ -471,6 +471,7 @@ int dma_async_device_register(struct dma_device *device); ...@@ -471,6 +471,7 @@ int dma_async_device_register(struct dma_device *device);
void dma_async_device_unregister(struct dma_device *device); void dma_async_device_unregister(struct dma_device *device);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx); void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
void dma_issue_pending_all(void);
/* --- Helper iov-locking functions --- */ /* --- Helper iov-locking functions --- */
......
...@@ -2635,14 +2635,7 @@ static void net_rx_action(struct softirq_action *h) ...@@ -2635,14 +2635,7 @@ static void net_rx_action(struct softirq_action *h)
* There may not be any more sk_buffs coming right now, so push * There may not be any more sk_buffs coming right now, so push
* any pending DMA copies to hardware * any pending DMA copies to hardware
*/ */
if (!cpus_empty(net_dma.channel_mask)) { dma_issue_pending_all();
int chan_idx;
for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
struct dma_chan *chan = net_dma.channels[chan_idx];
if (chan)
dma_async_memcpy_issue_pending(chan);
}
}
#endif #endif
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment