Commit 6f68fbaa authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx:
  DMAENGINE: extend the control command to include an arg
  async_tx: trim dma_async_tx_descriptor in 'no channel switch' case
  DMAENGINE: DMA40 fix for allocation of logical channel 0
  DMAENGINE: DMA40 support paused channel status
  dmaengine: mpc512x: Use resource_size
  DMA ENGINE: Do not reset 'private' of channel
  ioat: Remove duplicated devm_kzalloc() calls for ioatdma_device
  ioat3: disable cacheline-unaligned transfers for raid operations
  ioat2,3: convert to producer/consumer locking
  ioat: convert to circ_buf
  DMAENGINE: Support for ST-Ericssons DMA40 block v3
  async_tx: use of kzalloc/kfree requires the include of slab.h
  dmaengine: provide helper for setting txstate
  DMAENGINE: generic channel status v2
  DMAENGINE: generic slave control v2
  dma: timb-dma: Update comment and fix compiler warning
  dma: Add timb-dma
  DMAENGINE: COH 901 318 fix bytesleft
  DMAENGINE: COH 901 318 rename confusing vars
parents 6e451397 0b28330e
...@@ -102,27 +102,6 @@ struct coh901318_platform { ...@@ -102,27 +102,6 @@ struct coh901318_platform {
const int max_channels; const int max_channels;
}; };
/**
* coh901318_get_bytes_left() - Get number of bytes left on a current transfer
* @chan: dma channel handle
* return number of bytes left, or negative on error
*/
u32 coh901318_get_bytes_left(struct dma_chan *chan);
/**
* coh901318_stop() - Stops dma transfer
* @chan: dma channel handle
* return 0 on success otherwise negative value
*/
void coh901318_stop(struct dma_chan *chan);
/**
* coh901318_continue() - Resumes a stopped dma transfer
* @chan: dma channel handle
* return 0 on success otherwise negative value
*/
void coh901318_continue(struct dma_chan *chan);
/** /**
* coh901318_filter_id() - DMA channel filter function * coh901318_filter_id() - DMA channel filter function
* @chan: dma channel handle * @chan: dma channel handle
......
/*
* arch/arm/plat-nomadik/include/plat/ste_dma40.h
*
* Copyright (C) ST-Ericsson 2007-2010
* License terms: GNU General Public License (GPL) version 2
* Author: Per Friden <per.friden@stericsson.com>
* Author: Jonas Aaberg <jonas.aberg@stericsson.com>
*/
#ifndef STE_DMA40_H
#define STE_DMA40_H
#include <linux/dmaengine.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
/* dev types for memcpy */
#define STEDMA40_DEV_DST_MEMORY (-1)
#define STEDMA40_DEV_SRC_MEMORY (-1)
/*
* Description of bitfields of channel_type variable is available in
* the info structure.
*/
/* Priority */
#define STEDMA40_INFO_PRIO_TYPE_POS 2
#define STEDMA40_HIGH_PRIORITY_CHANNEL (0x1 << STEDMA40_INFO_PRIO_TYPE_POS)
#define STEDMA40_LOW_PRIORITY_CHANNEL (0x2 << STEDMA40_INFO_PRIO_TYPE_POS)
/* Mode */
#define STEDMA40_INFO_CH_MODE_TYPE_POS 6
#define STEDMA40_CHANNEL_IN_PHY_MODE (0x1 << STEDMA40_INFO_CH_MODE_TYPE_POS)
#define STEDMA40_CHANNEL_IN_LOG_MODE (0x2 << STEDMA40_INFO_CH_MODE_TYPE_POS)
#define STEDMA40_CHANNEL_IN_OPER_MODE (0x3 << STEDMA40_INFO_CH_MODE_TYPE_POS)
/* Mode options */
#define STEDMA40_INFO_CH_MODE_OPT_POS 8
#define STEDMA40_PCHAN_BASIC_MODE (0x1 << STEDMA40_INFO_CH_MODE_OPT_POS)
#define STEDMA40_PCHAN_MODULO_MODE (0x2 << STEDMA40_INFO_CH_MODE_OPT_POS)
#define STEDMA40_PCHAN_DOUBLE_DST_MODE (0x3 << STEDMA40_INFO_CH_MODE_OPT_POS)
#define STEDMA40_LCHAN_SRC_PHY_DST_LOG (0x1 << STEDMA40_INFO_CH_MODE_OPT_POS)
#define STEDMA40_LCHAN_SRC_LOG_DST_PHS (0x2 << STEDMA40_INFO_CH_MODE_OPT_POS)
#define STEDMA40_LCHAN_SRC_LOG_DST_LOG (0x3 << STEDMA40_INFO_CH_MODE_OPT_POS)
/* Interrupt */
#define STEDMA40_INFO_TIM_POS 10
#define STEDMA40_NO_TIM_FOR_LINK (0x0 << STEDMA40_INFO_TIM_POS)
#define STEDMA40_TIM_FOR_LINK (0x1 << STEDMA40_INFO_TIM_POS)
/* End of channel_type configuration */
#define STEDMA40_ESIZE_8_BIT 0x0
#define STEDMA40_ESIZE_16_BIT 0x1
#define STEDMA40_ESIZE_32_BIT 0x2
#define STEDMA40_ESIZE_64_BIT 0x3
/* The value 4 indicates that PEN-reg shall be set to 0 */
#define STEDMA40_PSIZE_PHY_1 0x4
#define STEDMA40_PSIZE_PHY_2 0x0
#define STEDMA40_PSIZE_PHY_4 0x1
#define STEDMA40_PSIZE_PHY_8 0x2
#define STEDMA40_PSIZE_PHY_16 0x3
/*
* The number of elements differ in logical and
* physical mode
*/
#define STEDMA40_PSIZE_LOG_1 STEDMA40_PSIZE_PHY_2
#define STEDMA40_PSIZE_LOG_4 STEDMA40_PSIZE_PHY_4
#define STEDMA40_PSIZE_LOG_8 STEDMA40_PSIZE_PHY_8
#define STEDMA40_PSIZE_LOG_16 STEDMA40_PSIZE_PHY_16
enum stedma40_flow_ctrl {
STEDMA40_NO_FLOW_CTRL,
STEDMA40_FLOW_CTRL,
};
enum stedma40_endianess {
STEDMA40_LITTLE_ENDIAN,
STEDMA40_BIG_ENDIAN
};
enum stedma40_periph_data_width {
STEDMA40_BYTE_WIDTH = STEDMA40_ESIZE_8_BIT,
STEDMA40_HALFWORD_WIDTH = STEDMA40_ESIZE_16_BIT,
STEDMA40_WORD_WIDTH = STEDMA40_ESIZE_32_BIT,
STEDMA40_DOUBLEWORD_WIDTH = STEDMA40_ESIZE_64_BIT
};
struct stedma40_half_channel_info {
enum stedma40_endianess endianess;
enum stedma40_periph_data_width data_width;
int psize;
enum stedma40_flow_ctrl flow_ctrl;
};
enum stedma40_xfer_dir {
STEDMA40_MEM_TO_MEM,
STEDMA40_MEM_TO_PERIPH,
STEDMA40_PERIPH_TO_MEM,
STEDMA40_PERIPH_TO_PERIPH
};
/**
* struct stedma40_chan_cfg - Structure to be filled by client drivers.
*
* @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH
* @channel_type: priority, mode, mode options and interrupt configuration.
* @src_dev_type: Src device type
* @dst_dev_type: Dst device type
* @src_info: Parameters for dst half channel
* @dst_info: Parameters for dst half channel
* @pre_transfer_data: Data to be passed on to the pre_transfer() function.
* @pre_transfer: Callback used if needed before preparation of transfer.
* Only called if device is set. size of bytes to transfer
* (in case of multiple element transfer size is size of the first element).
*
*
* This structure has to be filled by the client drivers.
* It is recommended to do all dma configurations for clients in the machine.
*
*/
struct stedma40_chan_cfg {
enum stedma40_xfer_dir dir;
unsigned int channel_type;
int src_dev_type;
int dst_dev_type;
struct stedma40_half_channel_info src_info;
struct stedma40_half_channel_info dst_info;
void *pre_transfer_data;
int (*pre_transfer) (struct dma_chan *chan,
void *data,
int size);
};
/**
* struct stedma40_platform_data - Configuration struct for the dma device.
*
* @dev_len: length of dev_tx and dev_rx
* @dev_tx: mapping between destination event line and io address
* @dev_rx: mapping between source event line and io address
* @memcpy: list of memcpy event lines
* @memcpy_len: length of memcpy
* @memcpy_conf_phy: default configuration of physical channel memcpy
* @memcpy_conf_log: default configuration of logical channel memcpy
* @llis_per_log: number of max linked list items per logical channel
*
*/
struct stedma40_platform_data {
u32 dev_len;
const dma_addr_t *dev_tx;
const dma_addr_t *dev_rx;
int *memcpy;
u32 memcpy_len;
struct stedma40_chan_cfg *memcpy_conf_phy;
struct stedma40_chan_cfg *memcpy_conf_log;
unsigned int llis_per_log;
};
/**
* setdma40_set_psize() - Used for changing the package size of an
* already configured dma channel.
*
* @chan: dmaengine handle
* @src_psize: new package side for src. (STEDMA40_PSIZE*)
* @src_psize: new package side for dst. (STEDMA40_PSIZE*)
*
* returns 0 on ok, otherwise negative error number.
*/
int stedma40_set_psize(struct dma_chan *chan,
int src_psize,
int dst_psize);
/**
* stedma40_filter() - Provides stedma40_chan_cfg to the
* ste_dma40 dma driver via the dmaengine framework.
* does some checking of what's provided.
*
* Never directly called by client. It used by dmaengine.
* @chan: dmaengine handle.
* @data: Must be of type: struct stedma40_chan_cfg and is
* the configuration of the framework.
*
*
*/
bool stedma40_filter(struct dma_chan *chan, void *data);
/**
* stedma40_memcpy_sg() - extension of the dma framework, memcpy to/from
* scattergatter lists.
*
* @chan: dmaengine handle
* @sgl_dst: Destination scatter list
* @sgl_src: Source scatter list
* @sgl_len: The length of each scatterlist. Both lists must be of equal length
* and each element must match the corresponding element in the other scatter
* list.
* @flags: is actually enum dma_ctrl_flags. See dmaengine.h
*/
struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
struct scatterlist *sgl_dst,
struct scatterlist *sgl_src,
unsigned int sgl_len,
unsigned long flags);
/**
* stedma40_slave_mem() - Transfers a raw data buffer to or from a slave
* (=device)
*
* @chan: dmaengine handle
* @addr: source or destination physicall address.
* @size: bytes to transfer
* @direction: direction of transfer
* @flags: is actually enum dma_ctrl_flags. See dmaengine.h
*/
static inline struct
dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
dma_addr_t addr,
unsigned int size,
enum dma_data_direction direction,
unsigned long flags)
{
struct scatterlist sg;
sg_init_table(&sg, 1);
sg.dma_address = addr;
sg.length = size;
return chan->device->device_prep_slave_sg(chan, &sg, 1,
direction, flags);
}
#endif
...@@ -81,18 +81,13 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, ...@@ -81,18 +81,13 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
struct dma_device *device = chan->device; struct dma_device *device = chan->device;
struct dma_async_tx_descriptor *intr_tx = (void *) ~0; struct dma_async_tx_descriptor *intr_tx = (void *) ~0;
#ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
BUG();
#endif
/* first check to see if we can still append to depend_tx */ /* first check to see if we can still append to depend_tx */
spin_lock_bh(&depend_tx->lock); txd_lock(depend_tx);
if (depend_tx->parent && depend_tx->chan == tx->chan) { if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) {
tx->parent = depend_tx; txd_chain(depend_tx, tx);
depend_tx->next = tx;
intr_tx = NULL; intr_tx = NULL;
} }
spin_unlock_bh(&depend_tx->lock); txd_unlock(depend_tx);
/* attached dependency, flush the parent channel */ /* attached dependency, flush the parent channel */
if (!intr_tx) { if (!intr_tx) {
...@@ -111,24 +106,22 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, ...@@ -111,24 +106,22 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
if (intr_tx) { if (intr_tx) {
intr_tx->callback = NULL; intr_tx->callback = NULL;
intr_tx->callback_param = NULL; intr_tx->callback_param = NULL;
tx->parent = intr_tx; /* safe to chain outside the lock since we know we are
/* safe to set ->next outside the lock since we know we are
* not submitted yet * not submitted yet
*/ */
intr_tx->next = tx; txd_chain(intr_tx, tx);
/* check if we need to append */ /* check if we need to append */
spin_lock_bh(&depend_tx->lock); txd_lock(depend_tx);
if (depend_tx->parent) { if (txd_parent(depend_tx)) {
intr_tx->parent = depend_tx; txd_chain(depend_tx, intr_tx);
depend_tx->next = intr_tx;
async_tx_ack(intr_tx); async_tx_ack(intr_tx);
intr_tx = NULL; intr_tx = NULL;
} }
spin_unlock_bh(&depend_tx->lock); txd_unlock(depend_tx);
if (intr_tx) { if (intr_tx) {
intr_tx->parent = NULL; txd_clear_parent(intr_tx);
intr_tx->tx_submit(intr_tx); intr_tx->tx_submit(intr_tx);
async_tx_ack(intr_tx); async_tx_ack(intr_tx);
} }
...@@ -176,21 +169,20 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, ...@@ -176,21 +169,20 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
* 2/ dependencies are 1:1 i.e. two transactions can * 2/ dependencies are 1:1 i.e. two transactions can
* not depend on the same parent * not depend on the same parent
*/ */
BUG_ON(async_tx_test_ack(depend_tx) || depend_tx->next || BUG_ON(async_tx_test_ack(depend_tx) || txd_next(depend_tx) ||
tx->parent); txd_parent(tx));
/* the lock prevents async_tx_run_dependencies from missing /* the lock prevents async_tx_run_dependencies from missing
* the setting of ->next when ->parent != NULL * the setting of ->next when ->parent != NULL
*/ */
spin_lock_bh(&depend_tx->lock); txd_lock(depend_tx);
if (depend_tx->parent) { if (txd_parent(depend_tx)) {
/* we have a parent so we can not submit directly /* we have a parent so we can not submit directly
* if we are staying on the same channel: append * if we are staying on the same channel: append
* else: channel switch * else: channel switch
*/ */
if (depend_tx->chan == chan) { if (depend_tx->chan == chan) {
tx->parent = depend_tx; txd_chain(depend_tx, tx);
depend_tx->next = tx;
s = ASYNC_TX_SUBMITTED; s = ASYNC_TX_SUBMITTED;
} else } else
s = ASYNC_TX_CHANNEL_SWITCH; s = ASYNC_TX_CHANNEL_SWITCH;
...@@ -203,7 +195,7 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, ...@@ -203,7 +195,7 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
else else
s = ASYNC_TX_CHANNEL_SWITCH; s = ASYNC_TX_CHANNEL_SWITCH;
} }
spin_unlock_bh(&depend_tx->lock); txd_unlock(depend_tx);
switch (s) { switch (s) {
case ASYNC_TX_SUBMITTED: case ASYNC_TX_SUBMITTED:
...@@ -212,12 +204,12 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, ...@@ -212,12 +204,12 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
async_tx_channel_switch(depend_tx, tx); async_tx_channel_switch(depend_tx, tx);
break; break;
case ASYNC_TX_DIRECT_SUBMIT: case ASYNC_TX_DIRECT_SUBMIT:
tx->parent = NULL; txd_clear_parent(tx);
tx->tx_submit(tx); tx->tx_submit(tx);
break; break;
} }
} else { } else {
tx->parent = NULL; txd_clear_parent(tx);
tx->tx_submit(tx); tx->tx_submit(tx);
} }
......
...@@ -141,6 +141,13 @@ config COH901318 ...@@ -141,6 +141,13 @@ config COH901318
help help
Enable support for ST-Ericsson COH 901 318 DMA. Enable support for ST-Ericsson COH 901 318 DMA.
config STE_DMA40
bool "ST-Ericsson DMA40 support"
depends on ARCH_U8500
select DMA_ENGINE
help
Support for ST-Ericsson DMA40 controller
config AMCC_PPC440SPE_ADMA config AMCC_PPC440SPE_ADMA
tristate "AMCC PPC440SPe ADMA support" tristate "AMCC PPC440SPe ADMA support"
depends on 440SPe || 440SP depends on 440SPe || 440SP
...@@ -149,6 +156,13 @@ config AMCC_PPC440SPE_ADMA ...@@ -149,6 +156,13 @@ config AMCC_PPC440SPE_ADMA
help help
Enable support for the AMCC PPC440SPe RAID engines. Enable support for the AMCC PPC440SPe RAID engines.
config TIMB_DMA
tristate "Timberdale FPGA DMA support"
depends on MFD_TIMBERDALE || HAS_IOMEM
select DMA_ENGINE
help
Enable support for the Timberdale FPGA DMA engine.
config ARCH_HAS_ASYNC_TX_FIND_CHANNEL config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
bool bool
......
...@@ -20,3 +20,5 @@ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o ...@@ -20,3 +20,5 @@ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
obj-$(CONFIG_SH_DMAE) += shdma.o obj-$(CONFIG_SH_DMAE) += shdma.o
obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
obj-$(CONFIG_TIMB_DMA) += timb_dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
...@@ -760,13 +760,18 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -760,13 +760,18 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return NULL; return NULL;
} }
static void atc_terminate_all(struct dma_chan *chan) static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
{ {
struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma *atdma = to_at_dma(chan->device); struct at_dma *atdma = to_at_dma(chan->device);
struct at_desc *desc, *_desc; struct at_desc *desc, *_desc;
LIST_HEAD(list); LIST_HEAD(list);
/* Only supports DMA_TERMINATE_ALL */
if (cmd != DMA_TERMINATE_ALL)
return -ENXIO;
/* /*
* This is only called when something went wrong elsewhere, so * This is only called when something went wrong elsewhere, so
* we don't really care about the data. Just disable the * we don't really care about the data. Just disable the
...@@ -790,32 +795,30 @@ static void atc_terminate_all(struct dma_chan *chan) ...@@ -790,32 +795,30 @@ static void atc_terminate_all(struct dma_chan *chan)
/* Flush all pending and queued descriptors */ /* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node) list_for_each_entry_safe(desc, _desc, &list, desc_node)
atc_chain_complete(atchan, desc); atc_chain_complete(atchan, desc);
return 0;
} }
/** /**
* atc_is_tx_complete - poll for transaction completion * atc_tx_status - poll for transaction completion
* @chan: DMA channel * @chan: DMA channel
* @cookie: transaction identifier to check status of * @cookie: transaction identifier to check status of
* @done: if not %NULL, updated with last completed transaction * @txstate: if not %NULL updated with transaction state
* @used: if not %NULL, updated with last used transaction
* *
* If @done and @used are passed in, upon return they reflect the driver * If @txstate is passed in, upon return it reflect the driver
* internal state and can be used with dma_async_is_complete() to check * internal state and can be used with dma_async_is_complete() to check
* the status of multiple cookies without re-checking hardware state. * the status of multiple cookies without re-checking hardware state.
*/ */
static enum dma_status static enum dma_status
atc_is_tx_complete(struct dma_chan *chan, atc_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, dma_cookie_t cookie,
dma_cookie_t *done, dma_cookie_t *used) struct dma_tx_state *txstate)
{ {
struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma_chan *atchan = to_at_dma_chan(chan);
dma_cookie_t last_used; dma_cookie_t last_used;
dma_cookie_t last_complete; dma_cookie_t last_complete;
enum dma_status ret; enum dma_status ret;
dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n",
cookie, done ? *done : 0, used ? *used : 0);
spin_lock_bh(&atchan->lock); spin_lock_bh(&atchan->lock);
last_complete = atchan->completed_cookie; last_complete = atchan->completed_cookie;
...@@ -833,10 +836,10 @@ atc_is_tx_complete(struct dma_chan *chan, ...@@ -833,10 +836,10 @@ atc_is_tx_complete(struct dma_chan *chan,
spin_unlock_bh(&atchan->lock); spin_unlock_bh(&atchan->lock);
if (done) dma_set_tx_state(txstate, last_complete, last_used, 0);
*done = last_complete; dev_vdbg(chan2dev(chan), "tx_status: %d (d%d, u%d)\n",
if (used) cookie, last_complete ? last_complete : 0,
*used = last_used; last_used ? last_used : 0);
return ret; return ret;
} }
...@@ -1082,7 +1085,7 @@ static int __init at_dma_probe(struct platform_device *pdev) ...@@ -1082,7 +1085,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
/* set base routines */ /* set base routines */
atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources; atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
atdma->dma_common.device_free_chan_resources = atc_free_chan_resources; atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
atdma->dma_common.device_is_tx_complete = atc_is_tx_complete; atdma->dma_common.device_tx_status = atc_tx_status;
atdma->dma_common.device_issue_pending = atc_issue_pending; atdma->dma_common.device_issue_pending = atc_issue_pending;
atdma->dma_common.dev = &pdev->dev; atdma->dma_common.dev = &pdev->dev;
...@@ -1092,7 +1095,7 @@ static int __init at_dma_probe(struct platform_device *pdev) ...@@ -1092,7 +1095,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
atdma->dma_common.device_terminate_all = atc_terminate_all; atdma->dma_common.device_control = atc_control;
} }
dma_writel(atdma, EN, AT_DMA_ENABLE); dma_writel(atdma, EN, AT_DMA_ENABLE);
......
This diff is collapsed.
...@@ -515,7 +515,6 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v ...@@ -515,7 +515,6 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
break; break;
if (--device->privatecnt == 0) if (--device->privatecnt == 0)
dma_cap_clear(DMA_PRIVATE, device->cap_mask); dma_cap_clear(DMA_PRIVATE, device->cap_mask);
chan->private = NULL;
chan = NULL; chan = NULL;
} }
} }
...@@ -537,7 +536,6 @@ void dma_release_channel(struct dma_chan *chan) ...@@ -537,7 +536,6 @@ void dma_release_channel(struct dma_chan *chan)
/* drop PRIVATE cap enabled by __dma_request_channel() */ /* drop PRIVATE cap enabled by __dma_request_channel() */
if (--chan->device->privatecnt == 0) if (--chan->device->privatecnt == 0)
dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
chan->private = NULL;
mutex_unlock(&dma_list_mutex); mutex_unlock(&dma_list_mutex);
} }
EXPORT_SYMBOL_GPL(dma_release_channel); EXPORT_SYMBOL_GPL(dma_release_channel);
...@@ -695,11 +693,11 @@ int dma_async_device_register(struct dma_device *device) ...@@ -695,11 +693,11 @@ int dma_async_device_register(struct dma_device *device)
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
!device->device_prep_slave_sg); !device->device_prep_slave_sg);
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
!device->device_terminate_all); !device->device_control);
BUG_ON(!device->device_alloc_chan_resources); BUG_ON(!device->device_alloc_chan_resources);
BUG_ON(!device->device_free_chan_resources); BUG_ON(!device->device_free_chan_resources);
BUG_ON(!device->device_is_tx_complete); BUG_ON(!device->device_tx_status);
BUG_ON(!device->device_issue_pending); BUG_ON(!device->device_issue_pending);
BUG_ON(!device->dev); BUG_ON(!device->dev);
...@@ -978,7 +976,9 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, ...@@ -978,7 +976,9 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
struct dma_chan *chan) struct dma_chan *chan)
{ {
tx->chan = chan; tx->chan = chan;
#ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
spin_lock_init(&tx->lock); spin_lock_init(&tx->lock);
#endif
} }
EXPORT_SYMBOL(dma_async_tx_descriptor_init); EXPORT_SYMBOL(dma_async_tx_descriptor_init);
...@@ -1011,7 +1011,7 @@ EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); ...@@ -1011,7 +1011,7 @@ EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
*/ */
void dma_run_dependencies(struct dma_async_tx_descriptor *tx) void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
{ {
struct dma_async_tx_descriptor *dep = tx->next; struct dma_async_tx_descriptor *dep = txd_next(tx);
struct dma_async_tx_descriptor *dep_next; struct dma_async_tx_descriptor *dep_next;
struct dma_chan *chan; struct dma_chan *chan;
...@@ -1019,7 +1019,7 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx) ...@@ -1019,7 +1019,7 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
return; return;
/* we'll submit tx->next now, so clear the link */ /* we'll submit tx->next now, so clear the link */
tx->next = NULL; txd_clear_next(tx);
chan = dep->chan; chan = dep->chan;
/* keep submitting up until a channel switch is detected /* keep submitting up until a channel switch is detected
...@@ -1027,14 +1027,14 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx) ...@@ -1027,14 +1027,14 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
* processing the interrupt from async_tx_channel_switch * processing the interrupt from async_tx_channel_switch
*/ */
for (; dep; dep = dep_next) { for (; dep; dep = dep_next) {
spin_lock_bh(&dep->lock); txd_lock(dep);
dep->parent = NULL; txd_clear_parent(dep);
dep_next = dep->next; dep_next = txd_next(dep);
if (dep_next && dep_next->chan == chan) if (dep_next && dep_next->chan == chan)
dep->next = NULL; /* ->next will be submitted */ txd_clear_next(dep); /* ->next will be submitted */
else else
dep_next = NULL; /* submit current dep and terminate */ dep_next = NULL; /* submit current dep and terminate */
spin_unlock_bh(&dep->lock); txd_unlock(dep);
dep->tx_submit(dep); dep->tx_submit(dep);
} }
......
...@@ -781,13 +781,18 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -781,13 +781,18 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return NULL; return NULL;
} }
static void dwc_terminate_all(struct dma_chan *chan) static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
{ {
struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device); struct dw_dma *dw = to_dw_dma(chan->device);
struct dw_desc *desc, *_desc; struct dw_desc *desc, *_desc;
LIST_HEAD(list); LIST_HEAD(list);
/* Only supports DMA_TERMINATE_ALL */
if (cmd != DMA_TERMINATE_ALL)
return -ENXIO;
/* /*
* This is only called when something went wrong elsewhere, so * This is only called when something went wrong elsewhere, so
* we don't really care about the data. Just disable the * we don't really care about the data. Just disable the
...@@ -810,12 +815,14 @@ static void dwc_terminate_all(struct dma_chan *chan) ...@@ -810,12 +815,14 @@ static void dwc_terminate_all(struct dma_chan *chan)
/* Flush all pending and queued descriptors */ /* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node) list_for_each_entry_safe(desc, _desc, &list, desc_node)
dwc_descriptor_complete(dwc, desc); dwc_descriptor_complete(dwc, desc);
return 0;
} }
static enum dma_status static enum dma_status
dwc_is_tx_complete(struct dma_chan *chan, dwc_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, dma_cookie_t cookie,
dma_cookie_t *done, dma_cookie_t *used) struct dma_tx_state *txstate)
{ {
struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
dma_cookie_t last_used; dma_cookie_t last_used;
...@@ -835,10 +842,7 @@ dwc_is_tx_complete(struct dma_chan *chan, ...@@ -835,10 +842,7 @@ dwc_is_tx_complete(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used); ret = dma_async_is_complete(cookie, last_complete, last_used);
} }
if (done) dma_set_tx_state(txstate, last_complete, last_used, 0);
*done = last_complete;
if (used)
*used = last_used;
return ret; return ret;
} }
...@@ -1338,9 +1342,9 @@ static int __init dw_probe(struct platform_device *pdev) ...@@ -1338,9 +1342,9 @@ static int __init dw_probe(struct platform_device *pdev)
dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
dw->dma.device_terminate_all = dwc_terminate_all; dw->dma.device_control = dwc_control;
dw->dma.device_is_tx_complete = dwc_is_tx_complete; dw->dma.device_tx_status = dwc_tx_status;
dw->dma.device_issue_pending = dwc_issue_pending; dw->dma.device_issue_pending = dwc_issue_pending;
dma_writel(dw, CFG, DW_CFG_DMA_EN); dma_writel(dw, CFG, DW_CFG_DMA_EN);
......
...@@ -775,13 +775,18 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( ...@@ -775,13 +775,18 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
return NULL; return NULL;
} }
static void fsl_dma_device_terminate_all(struct dma_chan *dchan) static int fsl_dma_device_control(struct dma_chan *dchan,
enum dma_ctrl_cmd cmd, unsigned long arg)
{ {
struct fsldma_chan *chan; struct fsldma_chan *chan;
unsigned long flags; unsigned long flags;
/* Only supports DMA_TERMINATE_ALL */
if (cmd != DMA_TERMINATE_ALL)
return -ENXIO;
if (!dchan) if (!dchan)
return; return -EINVAL;
chan = to_fsl_chan(dchan); chan = to_fsl_chan(dchan);
...@@ -795,6 +800,8 @@ static void fsl_dma_device_terminate_all(struct dma_chan *dchan) ...@@ -795,6 +800,8 @@ static void fsl_dma_device_terminate_all(struct dma_chan *dchan)
fsldma_free_desc_list(chan, &chan->ld_running); fsldma_free_desc_list(chan, &chan->ld_running);
spin_unlock_irqrestore(&chan->desc_lock, flags); spin_unlock_irqrestore(&chan->desc_lock, flags);
return 0;
} }
/** /**
...@@ -965,13 +972,12 @@ static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) ...@@ -965,13 +972,12 @@ static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
} }
/** /**
* fsl_dma_is_complete - Determine the DMA status * fsl_tx_status - Determine the DMA status
* @chan : Freescale DMA channel * @chan : Freescale DMA channel
*/ */
static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan, static enum dma_status fsl_tx_status(struct dma_chan *dchan,
dma_cookie_t cookie, dma_cookie_t cookie,
dma_cookie_t *done, struct dma_tx_state *txstate)
dma_cookie_t *used)
{ {
struct fsldma_chan *chan = to_fsl_chan(dchan); struct fsldma_chan *chan = to_fsl_chan(dchan);
dma_cookie_t last_used; dma_cookie_t last_used;
...@@ -982,11 +988,7 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan, ...@@ -982,11 +988,7 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan,
last_used = dchan->cookie; last_used = dchan->cookie;
last_complete = chan->completed_cookie; last_complete = chan->completed_cookie;
if (done) dma_set_tx_state(txstate, last_complete, last_used, 0);
*done = last_complete;
if (used)
*used = last_used;
return dma_async_is_complete(cookie, last_complete, last_used); return dma_async_is_complete(cookie, last_complete, last_used);
} }
...@@ -1330,10 +1332,10 @@ static int __devinit fsldma_of_probe(struct of_device *op, ...@@ -1330,10 +1332,10 @@ static int __devinit fsldma_of_probe(struct of_device *op,
fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
fdev->common.device_is_tx_complete = fsl_dma_is_complete; fdev->common.device_tx_status = fsl_tx_status;
fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
fdev->common.device_terminate_all = fsl_dma_device_terminate_all; fdev->common.device_control = fsl_dma_device_control;
fdev->common.dev = &op->dev; fdev->common.dev = &op->dev;
dev_set_drvdata(&op->dev, fdev); dev_set_drvdata(&op->dev, fdev);
......
...@@ -727,18 +727,18 @@ static void ioat1_timer_event(unsigned long data) ...@@ -727,18 +727,18 @@ static void ioat1_timer_event(unsigned long data)
} }
enum dma_status enum dma_status
ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie, ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
dma_cookie_t *done, dma_cookie_t *used) struct dma_tx_state *txstate)
{ {
struct ioat_chan_common *chan = to_chan_common(c); struct ioat_chan_common *chan = to_chan_common(c);
struct ioatdma_device *device = chan->device; struct ioatdma_device *device = chan->device;
if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS)
return DMA_SUCCESS; return DMA_SUCCESS;
device->cleanup_fn((unsigned long) c); device->cleanup_fn((unsigned long) c);
return ioat_is_complete(c, cookie, done, used); return ioat_tx_status(c, cookie, txstate);
} }
static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
...@@ -858,7 +858,7 @@ int __devinit ioat_dma_self_test(struct ioatdma_device *device) ...@@ -858,7 +858,7 @@ int __devinit ioat_dma_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
if (tmo == 0 || if (tmo == 0 ||
dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) dma->device_tx_status(dma_chan, cookie, NULL)
!= DMA_SUCCESS) { != DMA_SUCCESS) {
dev_err(dev, "Self-test copy timed out, disabling\n"); dev_err(dev, "Self-test copy timed out, disabling\n");
err = -ENODEV; err = -ENODEV;
...@@ -1199,7 +1199,7 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca) ...@@ -1199,7 +1199,7 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca)
dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources; dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
dma->device_free_chan_resources = ioat1_dma_free_chan_resources; dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
dma->device_is_tx_complete = ioat_is_dma_complete; dma->device_tx_status = ioat_dma_tx_status;
err = ioat_probe(device); err = ioat_probe(device);
if (err) if (err)
......
...@@ -96,6 +96,7 @@ struct ioat_chan_common { ...@@ -96,6 +96,7 @@ struct ioat_chan_common {
#define IOAT_COMPLETION_ACK 1 #define IOAT_COMPLETION_ACK 1
#define IOAT_RESET_PENDING 2 #define IOAT_RESET_PENDING 2
#define IOAT_KOBJ_INIT_FAIL 3 #define IOAT_KOBJ_INIT_FAIL 3
#define IOAT_RESHAPE_PENDING 4
struct timer_list timer; struct timer_list timer;
#define COMPLETION_TIMEOUT msecs_to_jiffies(100) #define COMPLETION_TIMEOUT msecs_to_jiffies(100)
#define IDLE_TIMEOUT msecs_to_jiffies(2000) #define IDLE_TIMEOUT msecs_to_jiffies(2000)
...@@ -142,15 +143,14 @@ static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c) ...@@ -142,15 +143,14 @@ static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c)
} }
/** /**
* ioat_is_complete - poll the status of an ioat transaction * ioat_tx_status - poll the status of an ioat transaction
* @c: channel handle * @c: channel handle
* @cookie: transaction identifier * @cookie: transaction identifier
* @done: if set, updated with last completed transaction * @txstate: if set, updated with the transaction state
* @used: if set, updated with last used transaction
*/ */
static inline enum dma_status static inline enum dma_status
ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie, ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
dma_cookie_t *done, dma_cookie_t *used) struct dma_tx_state *txstate)
{ {
struct ioat_chan_common *chan = to_chan_common(c); struct ioat_chan_common *chan = to_chan_common(c);
dma_cookie_t last_used; dma_cookie_t last_used;
...@@ -159,10 +159,7 @@ ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie, ...@@ -159,10 +159,7 @@ ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie,
last_used = c->cookie; last_used = c->cookie;
last_complete = chan->completed_cookie; last_complete = chan->completed_cookie;
if (done) dma_set_tx_state(txstate, last_complete, last_used, 0);
*done = last_complete;
if (used)
*used = last_used;
return dma_async_is_complete(cookie, last_complete, last_used); return dma_async_is_complete(cookie, last_complete, last_used);
} }
...@@ -338,8 +335,8 @@ struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev, ...@@ -338,8 +335,8 @@ struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev,
unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); unsigned long ioat_get_current_completion(struct ioat_chan_common *chan);
void ioat_init_channel(struct ioatdma_device *device, void ioat_init_channel(struct ioatdma_device *device,
struct ioat_chan_common *chan, int idx); struct ioat_chan_common *chan, int idx);
enum dma_status ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie, enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
dma_cookie_t *done, dma_cookie_t *used); struct dma_tx_state *txstate);
void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
size_t len, struct ioat_dma_descriptor *hw); size_t len, struct ioat_dma_descriptor *hw);
bool ioat_cleanup_preamble(struct ioat_chan_common *chan, bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
......
This diff is collapsed.
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#define IOATDMA_V2_H #define IOATDMA_V2_H
#include <linux/dmaengine.h> #include <linux/dmaengine.h>
#include <linux/circ_buf.h>
#include "dma.h" #include "dma.h"
#include "hw.h" #include "hw.h"
...@@ -49,8 +50,9 @@ extern int ioat_ring_alloc_order; ...@@ -49,8 +50,9 @@ extern int ioat_ring_alloc_order;
* @tail: cleanup index * @tail: cleanup index
* @dmacount: identical to 'head' except for occasionally resetting to zero * @dmacount: identical to 'head' except for occasionally resetting to zero
* @alloc_order: log2 of the number of allocated descriptors * @alloc_order: log2 of the number of allocated descriptors
* @produce: number of descriptors to produce at submit time
* @ring: software ring buffer implementation of hardware ring * @ring: software ring buffer implementation of hardware ring
* @ring_lock: protects ring attributes * @prep_lock: serializes descriptor preparation (producers)
*/ */
struct ioat2_dma_chan { struct ioat2_dma_chan {
struct ioat_chan_common base; struct ioat_chan_common base;
...@@ -60,8 +62,9 @@ struct ioat2_dma_chan { ...@@ -60,8 +62,9 @@ struct ioat2_dma_chan {
u16 tail; u16 tail;
u16 dmacount; u16 dmacount;
u16 alloc_order; u16 alloc_order;
u16 produce;
struct ioat_ring_ent **ring; struct ioat_ring_ent **ring;
spinlock_t ring_lock; spinlock_t prep_lock;
}; };
static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c) static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c)
...@@ -71,38 +74,26 @@ static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c) ...@@ -71,38 +74,26 @@ static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c)
return container_of(chan, struct ioat2_dma_chan, base); return container_of(chan, struct ioat2_dma_chan, base);
} }
static inline u16 ioat2_ring_mask(struct ioat2_dma_chan *ioat) static inline u16 ioat2_ring_size(struct ioat2_dma_chan *ioat)
{ {
return (1 << ioat->alloc_order) - 1; return 1 << ioat->alloc_order;
} }
/* count of descriptors in flight with the engine */ /* count of descriptors in flight with the engine */
static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat) static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat)
{ {
return (ioat->head - ioat->tail) & ioat2_ring_mask(ioat); return CIRC_CNT(ioat->head, ioat->tail, ioat2_ring_size(ioat));
} }
/* count of descriptors pending submission to hardware */ /* count of descriptors pending submission to hardware */
static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat) static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat)
{ {
return (ioat->head - ioat->issued) & ioat2_ring_mask(ioat); return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat));
} }
static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat) static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat)
{ {
u16 num_descs = ioat2_ring_mask(ioat) + 1; return ioat2_ring_size(ioat) - ioat2_ring_active(ioat);
u16 active = ioat2_ring_active(ioat);
BUG_ON(active > num_descs);
return num_descs - active;
}
/* assumes caller already checked space */
static inline u16 ioat2_desc_alloc(struct ioat2_dma_chan *ioat, u16 len)
{
ioat->head += len;
return ioat->head - len;
} }
static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len) static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len)
...@@ -151,7 +142,7 @@ struct ioat_ring_ent { ...@@ -151,7 +142,7 @@ struct ioat_ring_ent {
static inline struct ioat_ring_ent * static inline struct ioat_ring_ent *
ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx) ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx)
{ {
return ioat->ring[idx & ioat2_ring_mask(ioat)]; return ioat->ring[idx & (ioat2_ring_size(ioat) - 1)];
} }
static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr) static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr)
...@@ -168,7 +159,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca); ...@@ -168,7 +159,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca);
int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca); int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca);
struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs); int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs);
int ioat2_enumerate_channels(struct ioatdma_device *device); int ioat2_enumerate_channels(struct ioatdma_device *device);
struct dma_async_tx_descriptor * struct dma_async_tx_descriptor *
ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
......
This diff is collapsed.
...@@ -138,15 +138,10 @@ static int __devinit ioat_pci_probe(struct pci_dev *pdev, const struct pci_devic ...@@ -138,15 +138,10 @@ static int __devinit ioat_pci_probe(struct pci_dev *pdev, const struct pci_devic
if (err) if (err)
return err; return err;
device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL);
if (!device)
return -ENOMEM;
pci_set_master(pdev);
device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]); device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]);
if (!device) if (!device)
return -ENOMEM; return -ENOMEM;
pci_set_master(pdev);
pci_set_drvdata(pdev, device); pci_set_drvdata(pdev, device);
device->version = readb(device->reg_base + IOAT_VER_OFFSET); device->version = readb(device->reg_base + IOAT_VER_OFFSET);
......
...@@ -894,14 +894,14 @@ static void iop_adma_free_chan_resources(struct dma_chan *chan) ...@@ -894,14 +894,14 @@ static void iop_adma_free_chan_resources(struct dma_chan *chan)
} }
/** /**
* iop_adma_is_complete - poll the status of an ADMA transaction * iop_adma_status - poll the status of an ADMA transaction
* @chan: ADMA channel handle * @chan: ADMA channel handle
* @cookie: ADMA transaction identifier * @cookie: ADMA transaction identifier
* @txstate: a holder for the current state of the channel or NULL
*/ */
static enum dma_status iop_adma_is_complete(struct dma_chan *chan, static enum dma_status iop_adma_status(struct dma_chan *chan,
dma_cookie_t cookie, dma_cookie_t cookie,
dma_cookie_t *done, struct dma_tx_state *txstate)
dma_cookie_t *used)
{ {
struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
dma_cookie_t last_used; dma_cookie_t last_used;
...@@ -910,12 +910,7 @@ static enum dma_status iop_adma_is_complete(struct dma_chan *chan, ...@@ -910,12 +910,7 @@ static enum dma_status iop_adma_is_complete(struct dma_chan *chan,
last_used = chan->cookie; last_used = chan->cookie;
last_complete = iop_chan->completed_cookie; last_complete = iop_chan->completed_cookie;
dma_set_tx_state(txstate, last_complete, last_used, 0);
if (done)
*done = last_complete;
if (used)
*used = last_used;
ret = dma_async_is_complete(cookie, last_complete, last_used); ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret == DMA_SUCCESS) if (ret == DMA_SUCCESS)
return ret; return ret;
...@@ -924,11 +919,7 @@ static enum dma_status iop_adma_is_complete(struct dma_chan *chan, ...@@ -924,11 +919,7 @@ static enum dma_status iop_adma_is_complete(struct dma_chan *chan,
last_used = chan->cookie; last_used = chan->cookie;
last_complete = iop_chan->completed_cookie; last_complete = iop_chan->completed_cookie;
dma_set_tx_state(txstate, last_complete, last_used, 0);
if (done)
*done = last_complete;
if (used)
*used = last_used;
return dma_async_is_complete(cookie, last_complete, last_used); return dma_async_is_complete(cookie, last_complete, last_used);
} }
...@@ -1043,7 +1034,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device) ...@@ -1043,7 +1034,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan); iop_adma_issue_pending(dma_chan);
msleep(1); msleep(1);
if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) { DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev, dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test copy timed out, disabling\n"); "Self-test copy timed out, disabling\n");
...@@ -1143,7 +1134,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) ...@@ -1143,7 +1134,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan); iop_adma_issue_pending(dma_chan);
msleep(8); msleep(8);
if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) { DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev, dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test xor timed out, disabling\n"); "Self-test xor timed out, disabling\n");
...@@ -1190,7 +1181,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) ...@@ -1190,7 +1181,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan); iop_adma_issue_pending(dma_chan);
msleep(8); msleep(8);
if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev, dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test zero sum timed out, disabling\n"); "Self-test zero sum timed out, disabling\n");
err = -ENODEV; err = -ENODEV;
...@@ -1214,7 +1205,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) ...@@ -1214,7 +1205,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan); iop_adma_issue_pending(dma_chan);
msleep(8); msleep(8);
if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev, dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test memset timed out, disabling\n"); "Self-test memset timed out, disabling\n");
err = -ENODEV; err = -ENODEV;
...@@ -1246,7 +1237,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device) ...@@ -1246,7 +1237,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan); iop_adma_issue_pending(dma_chan);
msleep(8); msleep(8);
if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev, dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test non-zero sum timed out, disabling\n"); "Self-test non-zero sum timed out, disabling\n");
err = -ENODEV; err = -ENODEV;
...@@ -1341,7 +1332,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) ...@@ -1341,7 +1332,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan); iop_adma_issue_pending(dma_chan);
msleep(8); msleep(8);
if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) { DMA_SUCCESS) {
dev_err(dev, "Self-test pq timed out, disabling\n"); dev_err(dev, "Self-test pq timed out, disabling\n");
err = -ENODEV; err = -ENODEV;
...@@ -1378,7 +1369,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) ...@@ -1378,7 +1369,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan); iop_adma_issue_pending(dma_chan);
msleep(8); msleep(8);
if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) { DMA_SUCCESS) {
dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n"); dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n");
err = -ENODEV; err = -ENODEV;
...@@ -1410,7 +1401,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) ...@@ -1410,7 +1401,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan); iop_adma_issue_pending(dma_chan);
msleep(8); msleep(8);
if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) { DMA_SUCCESS) {
dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n"); dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n");
err = -ENODEV; err = -ENODEV;
...@@ -1508,7 +1499,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) ...@@ -1508,7 +1499,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
/* set base routines */ /* set base routines */
dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources; dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources;
dma_dev->device_free_chan_resources = iop_adma_free_chan_resources; dma_dev->device_free_chan_resources = iop_adma_free_chan_resources;
dma_dev->device_is_tx_complete = iop_adma_is_complete; dma_dev->device_tx_status = iop_adma_status;
dma_dev->device_issue_pending = iop_adma_issue_pending; dma_dev->device_issue_pending = iop_adma_issue_pending;
dma_dev->dev = &pdev->dev; dma_dev->dev = &pdev->dev;
......
...@@ -1472,13 +1472,18 @@ static void idmac_issue_pending(struct dma_chan *chan) ...@@ -1472,13 +1472,18 @@ static void idmac_issue_pending(struct dma_chan *chan)
*/ */
} }
static void __idmac_terminate_all(struct dma_chan *chan) static int __idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
{ {
struct idmac_channel *ichan = to_idmac_chan(chan); struct idmac_channel *ichan = to_idmac_chan(chan);
struct idmac *idmac = to_idmac(chan->device); struct idmac *idmac = to_idmac(chan->device);
unsigned long flags; unsigned long flags;
int i; int i;
/* Only supports DMA_TERMINATE_ALL */
if (cmd != DMA_TERMINATE_ALL)
return -ENXIO;
ipu_disable_channel(idmac, ichan, ipu_disable_channel(idmac, ichan,
ichan->status >= IPU_CHANNEL_ENABLED); ichan->status >= IPU_CHANNEL_ENABLED);
...@@ -1505,17 +1510,23 @@ static void __idmac_terminate_all(struct dma_chan *chan) ...@@ -1505,17 +1510,23 @@ static void __idmac_terminate_all(struct dma_chan *chan)
tasklet_enable(&to_ipu(idmac)->tasklet); tasklet_enable(&to_ipu(idmac)->tasklet);
ichan->status = IPU_CHANNEL_INITIALIZED; ichan->status = IPU_CHANNEL_INITIALIZED;
return 0;
} }
static void idmac_terminate_all(struct dma_chan *chan) static int idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
{ {
struct idmac_channel *ichan = to_idmac_chan(chan); struct idmac_channel *ichan = to_idmac_chan(chan);
int ret;
mutex_lock(&ichan->chan_mutex); mutex_lock(&ichan->chan_mutex);
__idmac_terminate_all(chan); ret = __idmac_control(chan, cmd, arg);
mutex_unlock(&ichan->chan_mutex); mutex_unlock(&ichan->chan_mutex);
return ret;
} }
#ifdef DEBUG #ifdef DEBUG
...@@ -1607,7 +1618,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan) ...@@ -1607,7 +1618,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
mutex_lock(&ichan->chan_mutex); mutex_lock(&ichan->chan_mutex);
__idmac_terminate_all(chan); __idmac_control(chan, DMA_TERMINATE_ALL, 0);
if (ichan->status > IPU_CHANNEL_FREE) { if (ichan->status > IPU_CHANNEL_FREE) {
#ifdef DEBUG #ifdef DEBUG
...@@ -1637,15 +1648,12 @@ static void idmac_free_chan_resources(struct dma_chan *chan) ...@@ -1637,15 +1648,12 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
tasklet_schedule(&to_ipu(idmac)->tasklet); tasklet_schedule(&to_ipu(idmac)->tasklet);
} }
static enum dma_status idmac_is_tx_complete(struct dma_chan *chan, static enum dma_status idmac_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used) dma_cookie_t cookie, struct dma_tx_state *txstate)
{ {
struct idmac_channel *ichan = to_idmac_chan(chan); struct idmac_channel *ichan = to_idmac_chan(chan);
if (done) dma_set_tx_state(txstate, ichan->completed, chan->cookie, 0);
*done = ichan->completed;
if (used)
*used = chan->cookie;
if (cookie != chan->cookie) if (cookie != chan->cookie)
return DMA_ERROR; return DMA_ERROR;
return DMA_SUCCESS; return DMA_SUCCESS;
...@@ -1664,12 +1672,12 @@ static int __init ipu_idmac_init(struct ipu *ipu) ...@@ -1664,12 +1672,12 @@ static int __init ipu_idmac_init(struct ipu *ipu)
dma->dev = ipu->dev; dma->dev = ipu->dev;
dma->device_alloc_chan_resources = idmac_alloc_chan_resources; dma->device_alloc_chan_resources = idmac_alloc_chan_resources;
dma->device_free_chan_resources = idmac_free_chan_resources; dma->device_free_chan_resources = idmac_free_chan_resources;
dma->device_is_tx_complete = idmac_is_tx_complete; dma->device_tx_status = idmac_tx_status;
dma->device_issue_pending = idmac_issue_pending; dma->device_issue_pending = idmac_issue_pending;
/* Compulsory for DMA_SLAVE fields */ /* Compulsory for DMA_SLAVE fields */
dma->device_prep_slave_sg = idmac_prep_slave_sg; dma->device_prep_slave_sg = idmac_prep_slave_sg;
dma->device_terminate_all = idmac_terminate_all; dma->device_control = idmac_control;
INIT_LIST_HEAD(&dma->channels); INIT_LIST_HEAD(&dma->channels);
for (i = 0; i < IPU_CHANNELS_NUM; i++) { for (i = 0; i < IPU_CHANNELS_NUM; i++) {
...@@ -1703,7 +1711,7 @@ static void __exit ipu_idmac_exit(struct ipu *ipu) ...@@ -1703,7 +1711,7 @@ static void __exit ipu_idmac_exit(struct ipu *ipu)
for (i = 0; i < IPU_CHANNELS_NUM; i++) { for (i = 0; i < IPU_CHANNELS_NUM; i++) {
struct idmac_channel *ichan = ipu->channel + i; struct idmac_channel *ichan = ipu->channel + i;
idmac_terminate_all(&ichan->dma_chan); idmac_control(&ichan->dma_chan, DMA_TERMINATE_ALL, 0);
idmac_prep_slave_sg(&ichan->dma_chan, NULL, 0, DMA_NONE, 0); idmac_prep_slave_sg(&ichan->dma_chan, NULL, 0, DMA_NONE, 0);
} }
......
...@@ -541,8 +541,8 @@ static void mpc_dma_issue_pending(struct dma_chan *chan) ...@@ -541,8 +541,8 @@ static void mpc_dma_issue_pending(struct dma_chan *chan)
/* Check request completion status */ /* Check request completion status */
static enum dma_status static enum dma_status
mpc_dma_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie, mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
dma_cookie_t *done, dma_cookie_t *used) struct dma_tx_state *txstate)
{ {
struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
unsigned long flags; unsigned long flags;
...@@ -554,12 +554,7 @@ mpc_dma_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie, ...@@ -554,12 +554,7 @@ mpc_dma_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie,
last_complete = mchan->completed_cookie; last_complete = mchan->completed_cookie;
spin_unlock_irqrestore(&mchan->lock, flags); spin_unlock_irqrestore(&mchan->lock, flags);
if (done) dma_set_tx_state(txstate, last_complete, last_used, 0);
*done = last_complete;
if (used)
*used = last_used;
return dma_async_is_complete(cookie, last_complete, last_used); return dma_async_is_complete(cookie, last_complete, last_used);
} }
...@@ -663,7 +658,7 @@ static int __devinit mpc_dma_probe(struct of_device *op, ...@@ -663,7 +658,7 @@ static int __devinit mpc_dma_probe(struct of_device *op,
} }
regs_start = res.start; regs_start = res.start;
regs_size = res.end - res.start + 1; regs_size = resource_size(&res);
if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) { if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
dev_err(dev, "Error requesting memory region!\n"); dev_err(dev, "Error requesting memory region!\n");
...@@ -694,7 +689,7 @@ static int __devinit mpc_dma_probe(struct of_device *op, ...@@ -694,7 +689,7 @@ static int __devinit mpc_dma_probe(struct of_device *op,
dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources; dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
dma->device_free_chan_resources = mpc_dma_free_chan_resources; dma->device_free_chan_resources = mpc_dma_free_chan_resources;
dma->device_issue_pending = mpc_dma_issue_pending; dma->device_issue_pending = mpc_dma_issue_pending;
dma->device_is_tx_complete = mpc_dma_is_tx_complete; dma->device_tx_status = mpc_dma_tx_status;
dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy; dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
INIT_LIST_HEAD(&dma->channels); INIT_LIST_HEAD(&dma->channels);
......
...@@ -810,14 +810,14 @@ static void mv_xor_free_chan_resources(struct dma_chan *chan) ...@@ -810,14 +810,14 @@ static void mv_xor_free_chan_resources(struct dma_chan *chan)
} }
/** /**
* mv_xor_is_complete - poll the status of an XOR transaction * mv_xor_status - poll the status of an XOR transaction
* @chan: XOR channel handle * @chan: XOR channel handle
* @cookie: XOR transaction identifier * @cookie: XOR transaction identifier
* @txstate: XOR transactions state holder (or NULL)
*/ */
static enum dma_status mv_xor_is_complete(struct dma_chan *chan, static enum dma_status mv_xor_status(struct dma_chan *chan,
dma_cookie_t cookie, dma_cookie_t cookie,
dma_cookie_t *done, struct dma_tx_state *txstate)
dma_cookie_t *used)
{ {
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
dma_cookie_t last_used; dma_cookie_t last_used;
...@@ -827,10 +827,7 @@ static enum dma_status mv_xor_is_complete(struct dma_chan *chan, ...@@ -827,10 +827,7 @@ static enum dma_status mv_xor_is_complete(struct dma_chan *chan,
last_used = chan->cookie; last_used = chan->cookie;
last_complete = mv_chan->completed_cookie; last_complete = mv_chan->completed_cookie;
mv_chan->is_complete_cookie = cookie; mv_chan->is_complete_cookie = cookie;
if (done) dma_set_tx_state(txstate, last_complete, last_used, 0);
*done = last_complete;
if (used)
*used = last_used;
ret = dma_async_is_complete(cookie, last_complete, last_used); ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret == DMA_SUCCESS) { if (ret == DMA_SUCCESS) {
...@@ -842,11 +839,7 @@ static enum dma_status mv_xor_is_complete(struct dma_chan *chan, ...@@ -842,11 +839,7 @@ static enum dma_status mv_xor_is_complete(struct dma_chan *chan,
last_used = chan->cookie; last_used = chan->cookie;
last_complete = mv_chan->completed_cookie; last_complete = mv_chan->completed_cookie;
if (done) dma_set_tx_state(txstate, last_complete, last_used, 0);
*done = last_complete;
if (used)
*used = last_used;
return dma_async_is_complete(cookie, last_complete, last_used); return dma_async_is_complete(cookie, last_complete, last_used);
} }
...@@ -975,7 +968,7 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device) ...@@ -975,7 +968,7 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
async_tx_ack(tx); async_tx_ack(tx);
msleep(1); msleep(1);
if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) != if (mv_xor_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) { DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev, dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test copy timed out, disabling\n"); "Self-test copy timed out, disabling\n");
...@@ -1073,7 +1066,7 @@ mv_xor_xor_self_test(struct mv_xor_device *device) ...@@ -1073,7 +1066,7 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
async_tx_ack(tx); async_tx_ack(tx);
msleep(8); msleep(8);
if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) != if (mv_xor_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) { DMA_SUCCESS) {
dev_printk(KERN_ERR, dma_chan->device->dev, dev_printk(KERN_ERR, dma_chan->device->dev,
"Self-test xor timed out, disabling\n"); "Self-test xor timed out, disabling\n");
...@@ -1168,7 +1161,7 @@ static int __devinit mv_xor_probe(struct platform_device *pdev) ...@@ -1168,7 +1161,7 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
/* set base routines */ /* set base routines */
dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources; dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
dma_dev->device_is_tx_complete = mv_xor_is_complete; dma_dev->device_tx_status = mv_xor_status;
dma_dev->device_issue_pending = mv_xor_issue_pending; dma_dev->device_issue_pending = mv_xor_issue_pending;
dma_dev->dev = &pdev->dev; dma_dev->dev = &pdev->dev;
......
...@@ -3935,12 +3935,13 @@ static void ppc440spe_adma_free_chan_resources(struct dma_chan *chan) ...@@ -3935,12 +3935,13 @@ static void ppc440spe_adma_free_chan_resources(struct dma_chan *chan)
} }
/** /**
* ppc440spe_adma_is_complete - poll the status of an ADMA transaction * ppc440spe_adma_tx_status - poll the status of an ADMA transaction
* @chan: ADMA channel handle * @chan: ADMA channel handle
* @cookie: ADMA transaction identifier * @cookie: ADMA transaction identifier
* @txstate: a holder for the current state of the channel
*/ */
static enum dma_status ppc440spe_adma_is_complete(struct dma_chan *chan, static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used) dma_cookie_t cookie, struct dma_tx_state *txstate)
{ {
struct ppc440spe_adma_chan *ppc440spe_chan; struct ppc440spe_adma_chan *ppc440spe_chan;
dma_cookie_t last_used; dma_cookie_t last_used;
...@@ -3951,10 +3952,7 @@ static enum dma_status ppc440spe_adma_is_complete(struct dma_chan *chan, ...@@ -3951,10 +3952,7 @@ static enum dma_status ppc440spe_adma_is_complete(struct dma_chan *chan,
last_used = chan->cookie; last_used = chan->cookie;
last_complete = ppc440spe_chan->completed_cookie; last_complete = ppc440spe_chan->completed_cookie;
if (done) dma_set_tx_state(txstate, last_complete, last_used, 0);
*done = last_complete;
if (used)
*used = last_used;
ret = dma_async_is_complete(cookie, last_complete, last_used); ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret == DMA_SUCCESS) if (ret == DMA_SUCCESS)
...@@ -3965,10 +3963,7 @@ static enum dma_status ppc440spe_adma_is_complete(struct dma_chan *chan, ...@@ -3965,10 +3963,7 @@ static enum dma_status ppc440spe_adma_is_complete(struct dma_chan *chan,
last_used = chan->cookie; last_used = chan->cookie;
last_complete = ppc440spe_chan->completed_cookie; last_complete = ppc440spe_chan->completed_cookie;
if (done) dma_set_tx_state(txstate, last_complete, last_used, 0);
*done = last_complete;
if (used)
*used = last_used;
return dma_async_is_complete(cookie, last_complete, last_used); return dma_async_is_complete(cookie, last_complete, last_used);
} }
...@@ -4180,7 +4175,7 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev) ...@@ -4180,7 +4175,7 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
ppc440spe_adma_alloc_chan_resources; ppc440spe_adma_alloc_chan_resources;
adev->common.device_free_chan_resources = adev->common.device_free_chan_resources =
ppc440spe_adma_free_chan_resources; ppc440spe_adma_free_chan_resources;
adev->common.device_is_tx_complete = ppc440spe_adma_is_complete; adev->common.device_tx_status = ppc440spe_adma_tx_status;
adev->common.device_issue_pending = ppc440spe_adma_issue_pending; adev->common.device_issue_pending = ppc440spe_adma_issue_pending;
/* Set prep routines based on capability */ /* Set prep routines based on capability */
......
...@@ -597,12 +597,17 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( ...@@ -597,12 +597,17 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
direction, flags); direction, flags);
} }
static void sh_dmae_terminate_all(struct dma_chan *chan) static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
{ {
struct sh_dmae_chan *sh_chan = to_sh_chan(chan); struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
/* Only supports DMA_TERMINATE_ALL */
if (cmd != DMA_TERMINATE_ALL)
return -ENXIO;
if (!chan) if (!chan)
return; return -EINVAL;
dmae_halt(sh_chan); dmae_halt(sh_chan);
...@@ -618,6 +623,8 @@ static void sh_dmae_terminate_all(struct dma_chan *chan) ...@@ -618,6 +623,8 @@ static void sh_dmae_terminate_all(struct dma_chan *chan)
spin_unlock_bh(&sh_chan->desc_lock); spin_unlock_bh(&sh_chan->desc_lock);
sh_dmae_chan_ld_cleanup(sh_chan, true); sh_dmae_chan_ld_cleanup(sh_chan, true);
return 0;
} }
static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
...@@ -749,10 +756,9 @@ static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan) ...@@ -749,10 +756,9 @@ static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
sh_chan_xfer_ld_queue(sh_chan); sh_chan_xfer_ld_queue(sh_chan);
} }
static enum dma_status sh_dmae_is_complete(struct dma_chan *chan, static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, dma_cookie_t cookie,
dma_cookie_t *done, struct dma_tx_state *txstate)
dma_cookie_t *used)
{ {
struct sh_dmae_chan *sh_chan = to_sh_chan(chan); struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
dma_cookie_t last_used; dma_cookie_t last_used;
...@@ -764,12 +770,7 @@ static enum dma_status sh_dmae_is_complete(struct dma_chan *chan, ...@@ -764,12 +770,7 @@ static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
last_used = chan->cookie; last_used = chan->cookie;
last_complete = sh_chan->completed_cookie; last_complete = sh_chan->completed_cookie;
BUG_ON(last_complete < 0); BUG_ON(last_complete < 0);
dma_set_tx_state(txstate, last_complete, last_used, 0);
if (done)
*done = last_complete;
if (used)
*used = last_used;
spin_lock_bh(&sh_chan->desc_lock); spin_lock_bh(&sh_chan->desc_lock);
...@@ -1041,12 +1042,12 @@ static int __init sh_dmae_probe(struct platform_device *pdev) ...@@ -1041,12 +1042,12 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
= sh_dmae_alloc_chan_resources; = sh_dmae_alloc_chan_resources;
shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
shdev->common.device_is_tx_complete = sh_dmae_is_complete; shdev->common.device_tx_status = sh_dmae_tx_status;
shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
/* Compulsory for DMA_SLAVE fields */ /* Compulsory for DMA_SLAVE fields */
shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg; shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
shdev->common.device_terminate_all = sh_dmae_terminate_all; shdev->common.device_control = sh_dmae_control;
shdev->common.dev = &pdev->dev; shdev->common.dev = &pdev->dev;
/* Default transfer size of 32 bytes requires 32-byte alignment */ /* Default transfer size of 32 bytes requires 32-byte alignment */
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -938,12 +938,17 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -938,12 +938,17 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return &first->txd; return &first->txd;
} }
static void txx9dmac_terminate_all(struct dma_chan *chan) static int txx9dmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
{ {
struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
struct txx9dmac_desc *desc, *_desc; struct txx9dmac_desc *desc, *_desc;
LIST_HEAD(list); LIST_HEAD(list);
/* Only supports DMA_TERMINATE_ALL */
if (cmd != DMA_TERMINATE_ALL)
return -EINVAL;
dev_vdbg(chan2dev(chan), "terminate_all\n"); dev_vdbg(chan2dev(chan), "terminate_all\n");
spin_lock_bh(&dc->lock); spin_lock_bh(&dc->lock);
...@@ -958,12 +963,13 @@ static void txx9dmac_terminate_all(struct dma_chan *chan) ...@@ -958,12 +963,13 @@ static void txx9dmac_terminate_all(struct dma_chan *chan)
/* Flush all pending and queued descriptors */ /* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node) list_for_each_entry_safe(desc, _desc, &list, desc_node)
txx9dmac_descriptor_complete(dc, desc); txx9dmac_descriptor_complete(dc, desc);
return 0;
} }
static enum dma_status static enum dma_status
txx9dmac_is_tx_complete(struct dma_chan *chan, txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
dma_cookie_t cookie, struct dma_tx_state *txstate)
dma_cookie_t *done, dma_cookie_t *used)
{ {
struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
dma_cookie_t last_used; dma_cookie_t last_used;
...@@ -985,10 +991,7 @@ txx9dmac_is_tx_complete(struct dma_chan *chan, ...@@ -985,10 +991,7 @@ txx9dmac_is_tx_complete(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used); ret = dma_async_is_complete(cookie, last_complete, last_used);
} }
if (done) dma_set_tx_state(txstate, last_complete, last_used, 0);
*done = last_complete;
if (used)
*used = last_used;
return ret; return ret;
} }
...@@ -1153,8 +1156,8 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev) ...@@ -1153,8 +1156,8 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev)
dc->dma.dev = &pdev->dev; dc->dma.dev = &pdev->dev;
dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources; dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources;
dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources; dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources;
dc->dma.device_terminate_all = txx9dmac_terminate_all; dc->dma.device_control = txx9dmac_control;
dc->dma.device_is_tx_complete = txx9dmac_is_tx_complete; dc->dma.device_tx_status = txx9dmac_tx_status;
dc->dma.device_issue_pending = txx9dmac_issue_pending; dc->dma.device_issue_pending = txx9dmac_issue_pending;
if (pdata && pdata->memcpy_chan == ch) { if (pdata && pdata->memcpy_chan == ch) {
dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy; dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy;
......
...@@ -580,7 +580,7 @@ static void atmci_stop_dma(struct atmel_mci *host) ...@@ -580,7 +580,7 @@ static void atmci_stop_dma(struct atmel_mci *host)
struct dma_chan *chan = host->data_chan; struct dma_chan *chan = host->data_chan;
if (chan) { if (chan) {
chan->device->device_terminate_all(chan); chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
atmci_dma_cleanup(host); atmci_dma_cleanup(host);
} else { } else {
/* Data transfer was stopped by the interrupt handler */ /* Data transfer was stopped by the interrupt handler */
......
...@@ -1091,7 +1091,7 @@ static void work_fn_rx(struct work_struct *work) ...@@ -1091,7 +1091,7 @@ static void work_fn_rx(struct work_struct *work)
unsigned long flags; unsigned long flags;
int count; int count;
chan->device->device_terminate_all(chan); chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
dev_dbg(port->dev, "Read %u bytes with cookie %d\n", dev_dbg(port->dev, "Read %u bytes with cookie %d\n",
sh_desc->partial, sh_desc->cookie); sh_desc->partial, sh_desc->cookie);
......
...@@ -387,7 +387,8 @@ static void sdc_disable_channel(struct mx3fb_info *mx3_fbi) ...@@ -387,7 +387,8 @@ static void sdc_disable_channel(struct mx3fb_info *mx3_fbi)
spin_unlock_irqrestore(&mx3fb->lock, flags); spin_unlock_irqrestore(&mx3fb->lock, flags);
mx3_fbi->txd->chan->device->device_terminate_all(mx3_fbi->txd->chan); mx3_fbi->txd->chan->device->device_control(mx3_fbi->txd->chan,
DMA_TERMINATE_ALL, 0);
mx3_fbi->txd = NULL; mx3_fbi->txd = NULL;
mx3_fbi->cookie = -EINVAL; mx3_fbi->cookie = -EINVAL;
} }
......
...@@ -40,11 +40,13 @@ typedef s32 dma_cookie_t; ...@@ -40,11 +40,13 @@ typedef s32 dma_cookie_t;
* enum dma_status - DMA transaction status * enum dma_status - DMA transaction status
* @DMA_SUCCESS: transaction completed successfully * @DMA_SUCCESS: transaction completed successfully
* @DMA_IN_PROGRESS: transaction not yet processed * @DMA_IN_PROGRESS: transaction not yet processed
* @DMA_PAUSED: transaction is paused
* @DMA_ERROR: transaction failed * @DMA_ERROR: transaction failed
*/ */
enum dma_status { enum dma_status {
DMA_SUCCESS, DMA_SUCCESS,
DMA_IN_PROGRESS, DMA_IN_PROGRESS,
DMA_PAUSED,
DMA_ERROR, DMA_ERROR,
}; };
...@@ -106,6 +108,19 @@ enum dma_ctrl_flags { ...@@ -106,6 +108,19 @@ enum dma_ctrl_flags {
DMA_PREP_FENCE = (1 << 9), DMA_PREP_FENCE = (1 << 9),
}; };
/**
* enum dma_ctrl_cmd - DMA operations that can optionally be exercised
* on a running channel.
* @DMA_TERMINATE_ALL: terminate all ongoing transfers
* @DMA_PAUSE: pause ongoing transfers
* @DMA_RESUME: resume paused transfer
*/
enum dma_ctrl_cmd {
DMA_TERMINATE_ALL,
DMA_PAUSE,
DMA_RESUME,
};
/** /**
* enum sum_check_bits - bit position of pq_check_flags * enum sum_check_bits - bit position of pq_check_flags
*/ */
...@@ -230,9 +245,84 @@ struct dma_async_tx_descriptor { ...@@ -230,9 +245,84 @@ struct dma_async_tx_descriptor {
dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
dma_async_tx_callback callback; dma_async_tx_callback callback;
void *callback_param; void *callback_param;
#ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
struct dma_async_tx_descriptor *next; struct dma_async_tx_descriptor *next;
struct dma_async_tx_descriptor *parent; struct dma_async_tx_descriptor *parent;
spinlock_t lock; spinlock_t lock;
#endif
};
#ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
static inline void txd_lock(struct dma_async_tx_descriptor *txd)
{
}
static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
{
}
static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
{
BUG();
}
static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
{
}
static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
{
}
static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
{
return NULL;
}
static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
{
return NULL;
}
#else
static inline void txd_lock(struct dma_async_tx_descriptor *txd)
{
spin_lock_bh(&txd->lock);
}
static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
{
spin_unlock_bh(&txd->lock);
}
static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
{
txd->next = next;
next->parent = txd;
}
static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
{
txd->parent = NULL;
}
static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
{
txd->next = NULL;
}
static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
{
return txd->parent;
}
static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
{
return txd->next;
}
#endif
/**
* struct dma_tx_state - filled in to report the status of
* a transfer.
* @last: last completed DMA cookie
* @used: last issued DMA cookie (i.e. the one in progress)
* @residue: the remaining number of bytes left to transmit
* on the selected transfer for states DMA_IN_PROGRESS and
* DMA_PAUSED if this is implemented in the driver, else 0
*/
struct dma_tx_state {
dma_cookie_t last;
dma_cookie_t used;
u32 residue;
}; };
/** /**
...@@ -261,8 +351,12 @@ struct dma_async_tx_descriptor { ...@@ -261,8 +351,12 @@ struct dma_async_tx_descriptor {
* @device_prep_dma_memset: prepares a memset operation * @device_prep_dma_memset: prepares a memset operation
* @device_prep_dma_interrupt: prepares an end of chain interrupt operation * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
* @device_prep_slave_sg: prepares a slave dma operation * @device_prep_slave_sg: prepares a slave dma operation
* @device_terminate_all: terminate all pending operations * @device_control: manipulate all pending operations on a channel, returns
* @device_is_tx_complete: poll for transaction completion * zero or error code
* @device_tx_status: poll for transaction completion, the optional
* txstate parameter can be supplied with a pointer to get a
* struct with auxilary transfer status information, otherwise the call
* will just return a simple status code
* @device_issue_pending: push pending transactions to hardware * @device_issue_pending: push pending transactions to hardware
*/ */
struct dma_device { struct dma_device {
...@@ -313,11 +407,12 @@ struct dma_device { ...@@ -313,11 +407,12 @@ struct dma_device {
struct dma_chan *chan, struct scatterlist *sgl, struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_data_direction direction, unsigned int sg_len, enum dma_data_direction direction,
unsigned long flags); unsigned long flags);
void (*device_terminate_all)(struct dma_chan *chan); int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg);
enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, enum dma_status (*device_tx_status)(struct dma_chan *chan,
dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t cookie,
dma_cookie_t *used); struct dma_tx_state *txstate);
void (*device_issue_pending)(struct dma_chan *chan); void (*device_issue_pending)(struct dma_chan *chan);
}; };
...@@ -558,7 +653,15 @@ static inline void dma_async_issue_pending(struct dma_chan *chan) ...@@ -558,7 +653,15 @@ static inline void dma_async_issue_pending(struct dma_chan *chan)
static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
{ {
return chan->device->device_is_tx_complete(chan, cookie, last, used); struct dma_tx_state state;
enum dma_status status;
status = chan->device->device_tx_status(chan, cookie, &state);
if (last)
*last = state.last;
if (used)
*used = state.used;
return status;
} }
#define dma_async_memcpy_complete(chan, cookie, last, used)\ #define dma_async_memcpy_complete(chan, cookie, last, used)\
...@@ -586,6 +689,16 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, ...@@ -586,6 +689,16 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
return DMA_IN_PROGRESS; return DMA_IN_PROGRESS;
} }
static inline void
dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
{
if (st) {
st->last = last;
st->used = used;
st->residue = residue;
}
}
enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
#ifdef CONFIG_DMA_ENGINE #ifdef CONFIG_DMA_ENGINE
enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
......
/*
* timb_dma.h timberdale FPGA DMA driver defines
* Copyright (c) 2010 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* Supports:
* Timberdale FPGA DMA engine
*/
#ifndef _LINUX_TIMB_DMA_H
#define _LINUX_TIMB_DMA_H
/**
* struct timb_dma_platform_data_channel - Description of each individual
* DMA channel for the timberdale DMA driver
* @rx: true if this channel handles data in the direction to
* the CPU.
* @bytes_per_line: Number of bytes per line, this is specific for channels
* handling video data. For other channels this shall be left to 0.
* @descriptors: Number of descriptors to allocate for this channel.
* @descriptor_elements: Number of elements in each descriptor.
*
*/
struct timb_dma_platform_data_channel {
bool rx;
unsigned int bytes_per_line;
unsigned int descriptors;
unsigned int descriptor_elements;
};
/**
* struct timb_dma_platform_data - Platform data of the timberdale DMA driver
* @nr_channels: Number of defined channels in the channels array.
* @channels: Definition of the each channel.
*
*/
struct timb_dma_platform_data {
unsigned nr_channels;
struct timb_dma_platform_data_channel channels[32];
};
#endif
...@@ -160,7 +160,7 @@ static void txx9aclc_dma_tasklet(unsigned long data) ...@@ -160,7 +160,7 @@ static void txx9aclc_dma_tasklet(unsigned long data)
void __iomem *base = drvdata->base; void __iomem *base = drvdata->base;
spin_unlock_irqrestore(&dmadata->dma_lock, flags); spin_unlock_irqrestore(&dmadata->dma_lock, flags);
chan->device->device_terminate_all(chan); chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
/* first time */ /* first time */
for (i = 0; i < NR_DMA_CHAIN; i++) { for (i = 0; i < NR_DMA_CHAIN; i++) {
desc = txx9aclc_dma_submit(dmadata, desc = txx9aclc_dma_submit(dmadata,
...@@ -268,7 +268,7 @@ static int txx9aclc_pcm_close(struct snd_pcm_substream *substream) ...@@ -268,7 +268,7 @@ static int txx9aclc_pcm_close(struct snd_pcm_substream *substream)
struct dma_chan *chan = dmadata->dma_chan; struct dma_chan *chan = dmadata->dma_chan;
dmadata->frag_count = -1; dmadata->frag_count = -1;
chan->device->device_terminate_all(chan); chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
return 0; return 0;
} }
...@@ -397,7 +397,8 @@ static int txx9aclc_pcm_remove(struct platform_device *pdev) ...@@ -397,7 +397,8 @@ static int txx9aclc_pcm_remove(struct platform_device *pdev)
struct dma_chan *chan = dmadata->dma_chan; struct dma_chan *chan = dmadata->dma_chan;
if (chan) { if (chan) {
dmadata->frag_count = -1; dmadata->frag_count = -1;
chan->device->device_terminate_all(chan); chan->device->device_control(chan,
DMA_TERMINATE_ALL, 0);
dma_release_channel(chan); dma_release_channel(chan);
} }
dev->dmadata[i].dma_chan = NULL; dev->dmadata[i].dma_chan = NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment