Commit c1b91566 authored by Paul Cercueil's avatar Paul Cercueil Committed by Jonathan Cameron

iio: buffer-dmaengine: Support specifying buffer direction

Update the devm_iio_dmaengine_buffer_setup() function to support
specifying the buffer direction.

Update the iio_dmaengine_buffer_submit() function to handle input
buffers as well as output buffers.
Signed-off-by: default avatarPaul Cercueil <paul@crapouillou.net>
Reviewed-by: default avatarAlexandru Ardelean <ardeleanalex@gmail.com>
Signed-off-by: default avatarNuno Sa <nuno.sa@analog.com>
Link: https://lore.kernel.org/r/20240419-iio-backend-axi-dac-v4-4-5ca45b4de294@analog.comSigned-off-by: default avatarJonathan Cameron <Jonathan.Cameron@huawei.com>
parent fb09feba
...@@ -64,14 +64,25 @@ static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue, ...@@ -64,14 +64,25 @@ static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
struct dmaengine_buffer *dmaengine_buffer = struct dmaengine_buffer *dmaengine_buffer =
iio_buffer_to_dmaengine_buffer(&queue->buffer); iio_buffer_to_dmaengine_buffer(&queue->buffer);
struct dma_async_tx_descriptor *desc; struct dma_async_tx_descriptor *desc;
enum dma_transfer_direction dma_dir;
size_t max_size;
dma_cookie_t cookie; dma_cookie_t cookie;
block->bytes_used = min(block->size, dmaengine_buffer->max_size); max_size = min(block->size, dmaengine_buffer->max_size);
block->bytes_used = round_down(block->bytes_used, max_size = round_down(max_size, dmaengine_buffer->align);
dmaengine_buffer->align);
if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) {
block->bytes_used = max_size;
dma_dir = DMA_DEV_TO_MEM;
} else {
dma_dir = DMA_MEM_TO_DEV;
}
if (!block->bytes_used || block->bytes_used > max_size)
return -EINVAL;
desc = dmaengine_prep_slave_single(dmaengine_buffer->chan, desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
block->phys_addr, block->bytes_used, DMA_DEV_TO_MEM, block->phys_addr, block->bytes_used, dma_dir,
DMA_PREP_INTERRUPT); DMA_PREP_INTERRUPT);
if (!desc) if (!desc)
return -ENOMEM; return -ENOMEM;
...@@ -229,9 +240,10 @@ void iio_dmaengine_buffer_free(struct iio_buffer *buffer) ...@@ -229,9 +240,10 @@ void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
} }
EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, IIO_DMAENGINE_BUFFER); EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, IIO_DMAENGINE_BUFFER);
struct iio_buffer *iio_dmaengine_buffer_setup(struct device *dev, struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
struct iio_dev *indio_dev, struct iio_dev *indio_dev,
const char *channel) const char *channel,
enum iio_buffer_direction dir)
{ {
struct iio_buffer *buffer; struct iio_buffer *buffer;
int ret; int ret;
...@@ -242,6 +254,8 @@ struct iio_buffer *iio_dmaengine_buffer_setup(struct device *dev, ...@@ -242,6 +254,8 @@ struct iio_buffer *iio_dmaengine_buffer_setup(struct device *dev,
indio_dev->modes |= INDIO_BUFFER_HARDWARE; indio_dev->modes |= INDIO_BUFFER_HARDWARE;
buffer->direction = dir;
ret = iio_device_attach_buffer(indio_dev, buffer); ret = iio_device_attach_buffer(indio_dev, buffer);
if (ret) { if (ret) {
iio_dmaengine_buffer_free(buffer); iio_dmaengine_buffer_free(buffer);
...@@ -250,7 +264,7 @@ struct iio_buffer *iio_dmaengine_buffer_setup(struct device *dev, ...@@ -250,7 +264,7 @@ struct iio_buffer *iio_dmaengine_buffer_setup(struct device *dev,
return buffer; return buffer;
} }
EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup, IIO_DMAENGINE_BUFFER); EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup_ext, IIO_DMAENGINE_BUFFER);
static void __devm_iio_dmaengine_buffer_free(void *buffer) static void __devm_iio_dmaengine_buffer_free(void *buffer)
{ {
...@@ -258,30 +272,32 @@ static void __devm_iio_dmaengine_buffer_free(void *buffer) ...@@ -258,30 +272,32 @@ static void __devm_iio_dmaengine_buffer_free(void *buffer)
} }
/** /**
* devm_iio_dmaengine_buffer_setup() - Setup a DMA buffer for an IIO device * devm_iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device
* @dev: Parent device for the buffer * @dev: Parent device for the buffer
* @indio_dev: IIO device to which to attach this buffer. * @indio_dev: IIO device to which to attach this buffer.
* @channel: DMA channel name, typically "rx". * @channel: DMA channel name, typically "rx".
* @dir: Direction of buffer (in or out)
* *
* This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc() * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc()
* and attaches it to an IIO device with iio_device_attach_buffer(). * and attaches it to an IIO device with iio_device_attach_buffer().
* It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the
* IIO device. * IIO device.
*/ */
int devm_iio_dmaengine_buffer_setup(struct device *dev, int devm_iio_dmaengine_buffer_setup_ext(struct device *dev,
struct iio_dev *indio_dev, struct iio_dev *indio_dev,
const char *channel) const char *channel,
enum iio_buffer_direction dir)
{ {
struct iio_buffer *buffer; struct iio_buffer *buffer;
buffer = iio_dmaengine_buffer_setup(dev, indio_dev, channel); buffer = iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, dir);
if (IS_ERR(buffer)) if (IS_ERR(buffer))
return PTR_ERR(buffer); return PTR_ERR(buffer);
return devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free, return devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free,
buffer); buffer);
} }
EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup, IIO_DMAENGINE_BUFFER); EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_ext, IIO_DMAENGINE_BUFFER);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("DMA buffer for the IIO framework"); MODULE_DESCRIPTION("DMA buffer for the IIO framework");
......
...@@ -7,15 +7,28 @@ ...@@ -7,15 +7,28 @@
#ifndef __IIO_DMAENGINE_H__ #ifndef __IIO_DMAENGINE_H__
#define __IIO_DMAENGINE_H__ #define __IIO_DMAENGINE_H__
#include <linux/iio/buffer.h>
struct iio_dev; struct iio_dev;
struct device; struct device;
void iio_dmaengine_buffer_free(struct iio_buffer *buffer); void iio_dmaengine_buffer_free(struct iio_buffer *buffer);
struct iio_buffer *iio_dmaengine_buffer_setup(struct device *dev, struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
struct iio_dev *indio_dev, struct iio_dev *indio_dev,
const char *channel); const char *channel,
int devm_iio_dmaengine_buffer_setup(struct device *dev, enum iio_buffer_direction dir);
#define iio_dmaengine_buffer_setup(dev, indio_dev, channel) \
iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, \
IIO_BUFFER_DIRECTION_IN)
int devm_iio_dmaengine_buffer_setup_ext(struct device *dev,
struct iio_dev *indio_dev, struct iio_dev *indio_dev,
const char *channel); const char *channel,
enum iio_buffer_direction dir);
#define devm_iio_dmaengine_buffer_setup(dev, indio_dev, channel) \
devm_iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, \
IIO_BUFFER_DIRECTION_IN)
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment