Commit 4aff1ce7 authored by Alexandre Bounine's avatar Alexandre Bounine Committed by Linus Torvalds

rapidio: add new RapidIO DMA interface routines

Add RapidIO DMA interface routines that directly use reference to the mport
device object and/or target device destination ID as parameters.
This allows to perform RapidIO DMA transfer requests by modules that do not
have an access to the RapidIO device list.
Signed-off-by: default avatarAlexandre Bounine <alexandre.bounine@idt.com>
Cc: Matt Porter <mporter@kernel.crashing.org>
Cc: Andre van Herk <andre.van.herk@prodrive-technologies.com>
Cc: Stef van Os <stef.van.os@prodrive-technologies.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 93b7aca3
...@@ -1509,30 +1509,39 @@ EXPORT_SYMBOL_GPL(rio_route_clr_table); ...@@ -1509,30 +1509,39 @@ EXPORT_SYMBOL_GPL(rio_route_clr_table);
static bool rio_chan_filter(struct dma_chan *chan, void *arg) static bool rio_chan_filter(struct dma_chan *chan, void *arg)
{ {
struct rio_dev *rdev = arg; struct rio_mport *mport = arg;
/* Check that DMA device belongs to the right MPORT */ /* Check that DMA device belongs to the right MPORT */
return (rdev->net->hport == return mport == container_of(chan->device, struct rio_mport, dma);
container_of(chan->device, struct rio_mport, dma));
} }
/** /**
* rio_request_dma - request RapidIO capable DMA channel that supports * rio_request_mport_dma - request RapidIO capable DMA channel associated
* specified target RapidIO device. * with specified local RapidIO mport device.
* @rdev: RIO device control structure * @mport: RIO mport to perform DMA data transfers
* *
* Returns pointer to allocated DMA channel or NULL if failed. * Returns pointer to allocated DMA channel or NULL if failed.
*/ */
struct dma_chan *rio_request_dma(struct rio_dev *rdev) struct dma_chan *rio_request_mport_dma(struct rio_mport *mport)
{ {
dma_cap_mask_t mask; dma_cap_mask_t mask;
struct dma_chan *dchan;
dma_cap_zero(mask); dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask); dma_cap_set(DMA_SLAVE, mask);
dchan = dma_request_channel(mask, rio_chan_filter, rdev); return dma_request_channel(mask, rio_chan_filter, mport);
}
EXPORT_SYMBOL_GPL(rio_request_mport_dma);
return dchan; /**
* rio_request_dma - request RapidIO capable DMA channel that supports
* specified target RapidIO device.
* @rdev: RIO device associated with DMA transfer
*
* Returns pointer to allocated DMA channel or NULL if failed.
*/
struct dma_chan *rio_request_dma(struct rio_dev *rdev)
{
return rio_request_mport_dma(rdev->net->hport);
} }
EXPORT_SYMBOL_GPL(rio_request_dma); EXPORT_SYMBOL_GPL(rio_request_dma);
...@@ -1547,10 +1556,10 @@ void rio_release_dma(struct dma_chan *dchan) ...@@ -1547,10 +1556,10 @@ void rio_release_dma(struct dma_chan *dchan)
EXPORT_SYMBOL_GPL(rio_release_dma); EXPORT_SYMBOL_GPL(rio_release_dma);
/** /**
* rio_dma_prep_slave_sg - RapidIO specific wrapper * rio_dma_prep_xfer - RapidIO specific wrapper
* for device_prep_slave_sg callback defined by DMAENGINE. * for device_prep_slave_sg callback defined by DMAENGINE.
* @rdev: RIO device control structure
* @dchan: DMA channel to configure * @dchan: DMA channel to configure
* @destid: target RapidIO device destination ID
* @data: RIO specific data descriptor * @data: RIO specific data descriptor
* @direction: DMA data transfer direction (TO or FROM the device) * @direction: DMA data transfer direction (TO or FROM the device)
* @flags: dmaengine defined flags * @flags: dmaengine defined flags
...@@ -1560,11 +1569,10 @@ EXPORT_SYMBOL_GPL(rio_release_dma); ...@@ -1560,11 +1569,10 @@ EXPORT_SYMBOL_GPL(rio_release_dma);
* target RIO device. * target RIO device.
* Returns pointer to DMA transaction descriptor or NULL if failed. * Returns pointer to DMA transaction descriptor or NULL if failed.
*/ */
struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev, struct dma_async_tx_descriptor *rio_dma_prep_xfer(struct dma_chan *dchan,
struct dma_chan *dchan, struct rio_dma_data *data, u16 destid, struct rio_dma_data *data,
enum dma_transfer_direction direction, unsigned long flags) enum dma_transfer_direction direction, unsigned long flags)
{ {
struct dma_async_tx_descriptor *txd = NULL;
struct rio_dma_ext rio_ext; struct rio_dma_ext rio_ext;
if (dchan->device->device_prep_slave_sg == NULL) { if (dchan->device->device_prep_slave_sg == NULL) {
...@@ -1572,15 +1580,35 @@ struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev, ...@@ -1572,15 +1580,35 @@ struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev,
return NULL; return NULL;
} }
rio_ext.destid = rdev->destid; rio_ext.destid = destid;
rio_ext.rio_addr_u = data->rio_addr_u; rio_ext.rio_addr_u = data->rio_addr_u;
rio_ext.rio_addr = data->rio_addr; rio_ext.rio_addr = data->rio_addr;
rio_ext.wr_type = data->wr_type; rio_ext.wr_type = data->wr_type;
txd = dmaengine_prep_rio_sg(dchan, data->sg, data->sg_len, return dmaengine_prep_rio_sg(dchan, data->sg, data->sg_len,
direction, flags, &rio_ext); direction, flags, &rio_ext);
}
EXPORT_SYMBOL_GPL(rio_dma_prep_xfer);
return txd; /**
* rio_dma_prep_slave_sg - RapidIO specific wrapper
* for device_prep_slave_sg callback defined by DMAENGINE.
* @rdev: RIO device control structure
* @dchan: DMA channel to configure
* @data: RIO specific data descriptor
* @direction: DMA data transfer direction (TO or FROM the device)
* @flags: dmaengine defined flags
*
* Initializes RapidIO capable DMA channel for the specified data transfer.
* Uses DMA channel private extension to pass information related to remote
* target RIO device.
* Returns pointer to DMA transaction descriptor or NULL if failed.
*/
struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev,
struct dma_chan *dchan, struct rio_dma_data *data,
enum dma_transfer_direction direction, unsigned long flags)
{
return rio_dma_prep_xfer(dchan, rdev->destid, data, direction, flags);
} }
EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg); EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg);
......
...@@ -384,11 +384,16 @@ void rio_dev_put(struct rio_dev *); ...@@ -384,11 +384,16 @@ void rio_dev_put(struct rio_dev *);
#ifdef CONFIG_RAPIDIO_DMA_ENGINE #ifdef CONFIG_RAPIDIO_DMA_ENGINE
extern struct dma_chan *rio_request_dma(struct rio_dev *rdev); extern struct dma_chan *rio_request_dma(struct rio_dev *rdev);
extern struct dma_chan *rio_request_mport_dma(struct rio_mport *mport);
extern void rio_release_dma(struct dma_chan *dchan); extern void rio_release_dma(struct dma_chan *dchan);
extern struct dma_async_tx_descriptor *rio_dma_prep_slave_sg( extern struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(
struct rio_dev *rdev, struct dma_chan *dchan, struct rio_dev *rdev, struct dma_chan *dchan,
struct rio_dma_data *data, struct rio_dma_data *data,
enum dma_transfer_direction direction, unsigned long flags); enum dma_transfer_direction direction, unsigned long flags);
extern struct dma_async_tx_descriptor *rio_dma_prep_xfer(
struct dma_chan *dchan, u16 destid,
struct rio_dma_data *data,
enum dma_transfer_direction direction, unsigned long flags);
#endif #endif
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment