Commit 133e2a31 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx:
  dma: Add SoF and EoF debugging to ipu_idmac.c, minor cleanup
  dw_dmac: add cyclic API to DW DMA driver
  dmaengine: Add privatecnt to revert DMA_PRIVATE property
  dmatest: add dma interrupts and callbacks
  dmatest: add xor test
  dmaengine: allow dma support for async_tx to be toggled
  async_tx: provide __async_inline for HAS_DMA=n archs
  dmaengine: kill some unused headers
  dmaengine: initialize tx_list in dma_async_tx_descriptor_init
  dma: i.MX31 IPU DMA robustness improvements
  dma: improve section assignment in i.MX31 IPU DMA driver
  dma: ipu_idmac driver cosmetic clean-up
  dmaengine: fail device registration if channel registration fails
parents 20bec8ab 8c6db1bb
......@@ -30,7 +30,7 @@
#ifdef CONFIG_DMA_ENGINE
static int __init async_tx_init(void)
{
dmaengine_get();
async_dmaengine_get();
printk(KERN_INFO "async_tx: api initialized (async)\n");
......@@ -39,7 +39,7 @@ static int __init async_tx_init(void)
static void __exit async_tx_exit(void)
{
dmaengine_put();
async_dmaengine_put();
}
/**
......@@ -56,7 +56,7 @@ __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
if (depend_tx &&
dma_has_cap(tx_type, depend_tx->chan->device->cap_mask))
return depend_tx->chan;
return dma_find_channel(tx_type);
return async_dma_find_channel(tx_type);
}
EXPORT_SYMBOL_GPL(__async_tx_find_channel);
#else
......
......@@ -30,11 +30,8 @@
#include <linux/raid/xor.h>
#include <linux/async_tx.h>
/* do_async_xor - dma map the pages and perform the xor with an engine.
* This routine is marked __always_inline so it can be compiled away
* when CONFIG_DMA_ENGINE=n
*/
static __always_inline struct dma_async_tx_descriptor *
/* do_async_xor - dma map the pages and perform the xor with an engine */
static __async_inline struct dma_async_tx_descriptor *
do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
unsigned int offset, int src_cnt, size_t len,
enum async_tx_flags flags,
......
......@@ -98,6 +98,17 @@ config NET_DMA
Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise
say N.
config ASYNC_TX_DMA
bool "Async_tx: Offload support for the async_tx api"
depends on DMA_ENGINE
help
This allows the async_tx api to take advantage of offload engines for
memcpy, memset, xor, and raid6 p+q operations. If your platform has
a dma engine that can perform raid operations and you have enabled
MD_RAID456 say Y.
If unsure, say N.
config DMATEST
tristate "DMA Test client"
depends on DMA_ENGINE
......
......@@ -507,6 +507,7 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
* published in the general-purpose allocator
*/
dma_cap_set(DMA_PRIVATE, device->cap_mask);
device->privatecnt++;
err = dma_chan_get(chan);
if (err == -ENODEV) {
......@@ -518,6 +519,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
dma_chan_name(chan), err);
else
break;
if (--device->privatecnt == 0)
dma_cap_clear(DMA_PRIVATE, device->cap_mask);
chan->private = NULL;
chan = NULL;
}
......@@ -537,6 +540,9 @@ void dma_release_channel(struct dma_chan *chan)
WARN_ONCE(chan->client_count != 1,
"chan reference count %d != 1\n", chan->client_count);
dma_chan_put(chan);
/* drop PRIVATE cap enabled by __dma_request_channel() */
if (--chan->device->privatecnt == 0)
dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
chan->private = NULL;
mutex_unlock(&dma_list_mutex);
}
......@@ -602,6 +608,24 @@ void dmaengine_put(void)
}
EXPORT_SYMBOL(dmaengine_put);
static int get_dma_id(struct dma_device *device)
{
int rc;
idr_retry:
if (!idr_pre_get(&dma_idr, GFP_KERNEL))
return -ENOMEM;
mutex_lock(&dma_list_mutex);
rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
mutex_unlock(&dma_list_mutex);
if (rc == -EAGAIN)
goto idr_retry;
else if (rc != 0)
return rc;
return 0;
}
/**
* dma_async_device_register - registers DMA devices found
* @device: &dma_device
......@@ -640,27 +664,25 @@ int dma_async_device_register(struct dma_device *device)
idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
if (!idr_ref)
return -ENOMEM;
atomic_set(idr_ref, 0);
idr_retry:
if (!idr_pre_get(&dma_idr, GFP_KERNEL))
return -ENOMEM;
mutex_lock(&dma_list_mutex);
rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
mutex_unlock(&dma_list_mutex);
if (rc == -EAGAIN)
goto idr_retry;
else if (rc != 0)
rc = get_dma_id(device);
if (rc != 0) {
kfree(idr_ref);
return rc;
}
atomic_set(idr_ref, 0);
/* represent channels in sysfs. Probably want devs too */
list_for_each_entry(chan, &device->channels, device_node) {
rc = -ENOMEM;
chan->local = alloc_percpu(typeof(*chan->local));
if (chan->local == NULL)
continue;
goto err_out;
chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
if (chan->dev == NULL) {
free_percpu(chan->local);
continue;
chan->local = NULL;
goto err_out;
}
chan->chan_id = chancnt++;
......@@ -677,6 +699,8 @@ int dma_async_device_register(struct dma_device *device)
if (rc) {
free_percpu(chan->local);
chan->local = NULL;
kfree(chan->dev);
atomic_dec(idr_ref);
goto err_out;
}
chan->client_count = 0;
......@@ -701,12 +725,23 @@ int dma_async_device_register(struct dma_device *device)
}
}
list_add_tail_rcu(&device->global_node, &dma_device_list);
if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
device->privatecnt++; /* Always private */
dma_channel_rebalance();
mutex_unlock(&dma_list_mutex);
return 0;
err_out:
/* if we never registered a channel just release the idr */
if (atomic_read(idr_ref) == 0) {
mutex_lock(&dma_list_mutex);
idr_remove(&dma_idr, device->dev_id);
mutex_unlock(&dma_list_mutex);
kfree(idr_ref);
return rc;
}
list_for_each_entry(chan, &device->channels, device_node) {
if (chan->local == NULL)
continue;
......@@ -893,6 +928,7 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
{
tx->chan = chan;
spin_lock_init(&tx->lock);
INIT_LIST_HEAD(&tx->tx_list);
}
EXPORT_SYMBOL(dma_async_tx_descriptor_init);
......
This diff is collapsed.
This diff is collapsed.
......@@ -126,6 +126,10 @@ struct dw_dma_regs {
#define DW_REGLEN 0x400
enum dw_dmac_flags {
DW_DMA_IS_CYCLIC = 0,
};
struct dw_dma_chan {
struct dma_chan chan;
void __iomem *ch_regs;
......@@ -134,10 +138,12 @@ struct dw_dma_chan {
spinlock_t lock;
/* these other elements are all protected by lock */
unsigned long flags;
dma_cookie_t completed;
struct list_head active_list;
struct list_head queue;
struct list_head free_list;
struct dw_cyclic_desc *cdesc;
unsigned int descs_allocated;
};
......@@ -158,7 +164,6 @@ static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
return container_of(chan, struct dw_dma_chan, chan);
}
struct dw_dma {
struct dma_device dma;
void __iomem *regs;
......
......@@ -354,7 +354,6 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
dma_async_tx_descriptor_init(&desc_sw->async_tx,
&fsl_chan->common);
desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
desc_sw->async_tx.phys = pdesc;
}
......
......@@ -693,7 +693,6 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
desc_sw->async_tx.tx_submit = ioat2_tx_submit;
break;
}
INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
desc_sw->hw = desc;
desc_sw->async_tx.phys = phys;
......
......@@ -498,7 +498,6 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
slot->async_tx.tx_submit = iop_adma_tx_submit;
INIT_LIST_HEAD(&slot->chain_node);
INIT_LIST_HEAD(&slot->slot_node);
INIT_LIST_HEAD(&slot->async_tx.tx_list);
hw_desc = (char *) iop_chan->device->dma_desc_pool;
slot->async_tx.phys =
(dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
......
This diff is collapsed.
......@@ -352,7 +352,7 @@ static struct irq_chip ipu_irq_chip = {
};
/* Install the IRQ handler */
int ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev)
int __init ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev)
{
struct ipu_platform_data *pdata = dev->dev.platform_data;
unsigned int irq, irq_base, i;
......
......@@ -632,7 +632,6 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
slot->async_tx.tx_submit = mv_xor_tx_submit;
INIT_LIST_HEAD(&slot->chain_node);
INIT_LIST_HEAD(&slot->slot_node);
INIT_LIST_HEAD(&slot->async_tx.tx_list);
hw_desc = (char *) mv_chan->device->dma_desc_pool;
slot->async_tx.phys =
(dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
......
......@@ -21,6 +21,15 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
/* on architectures without dma-mapping capabilities we need to ensure
* that the asynchronous path compiles away
*/
#ifdef CONFIG_HAS_DMA
#define __async_inline
#else
#define __async_inline __always_inline
#endif
/**
* dma_chan_ref - object used to manage dma channels received from the
* dmaengine core.
......
......@@ -23,9 +23,6 @@
#include <linux/device.h>
#include <linux/uio.h>
#include <linux/kref.h>
#include <linux/completion.h>
#include <linux/rcupdate.h>
#include <linux/dma-mapping.h>
/**
......@@ -205,6 +202,7 @@ struct dma_async_tx_descriptor {
/**
* struct dma_device - info on the entity supplying DMA services
* @chancnt: how many DMA channels are supported
* @privatecnt: how many DMA channels are requested by dma_request_channel
* @channels: the list of struct dma_chan
* @global_node: list_head for global dma_device_list
* @cap_mask: one or more dma_capability flags
......@@ -227,6 +225,7 @@ struct dma_async_tx_descriptor {
struct dma_device {
unsigned int chancnt;
unsigned int privatecnt;
struct list_head channels;
struct list_head global_node;
dma_cap_mask_t cap_mask;
......@@ -291,6 +290,24 @@ static inline void net_dmaengine_put(void)
}
#endif
#ifdef CONFIG_ASYNC_TX_DMA
#define async_dmaengine_get() dmaengine_get()
#define async_dmaengine_put() dmaengine_put()
#define async_dma_find_channel(type) dma_find_channel(type)
#else
static inline void async_dmaengine_get(void)
{
}
static inline void async_dmaengine_put(void)
{
}
static inline struct dma_chan *
async_dma_find_channel(enum dma_transaction_type type)
{
return NULL;
}
#endif
dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
void *dest, void *src, size_t len);
dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
......@@ -337,6 +354,13 @@ __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
set_bit(tx_type, dstp->bits);
}
#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
static inline void
__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
{
clear_bit(tx_type, dstp->bits);
}
#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
{
......
......@@ -74,4 +74,23 @@ struct dw_dma_slave {
#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */
#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */
/* DMA API extensions */
struct dw_cyclic_desc {
struct dw_desc **desc;
unsigned long periods;
void (*period_callback)(void *param);
void *period_callback_param;
};
struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
dma_addr_t buf_addr, size_t buf_len, size_t period_len,
enum dma_data_direction direction);
void dw_dma_cyclic_free(struct dma_chan *chan);
int dw_dma_cyclic_start(struct dma_chan *chan);
void dw_dma_cyclic_stop(struct dma_chan *chan);
dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
#endif /* DW_DMAC_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment