Commit a094de22 authored by Nuno Sa's avatar Nuno Sa Committed by Jonathan Cameron

iio: buffer-dma: add iio_dmaengine_buffer_setup()

This brings the DMA buffer API more in line with what we have in the
triggered buffer. There's no need of having both
devm_iio_dmaengine_buffer_setup() and devm_iio_dmaengine_buffer_alloc().
Hence we introduce the new iio_dmaengine_buffer_setup() that together
with devm_iio_dmaengine_buffer_setup() should be all we need.

Note that as part of this change iio_dmaengine_buffer_alloc() is again
static and the axi-adc was updated accordingly.
Signed-off-by: default avatarNuno Sa <nuno.sa@analog.com>
Link: https://lore.kernel.org/r/20240419-iio-backend-axi-dac-v4-1-5ca45b4de294@analog.comSigned-off-by: default avatarJonathan Cameron <Jonathan.Cameron@huawei.com>
parent 5826711e
......@@ -124,26 +124,12 @@ static struct iio_buffer *axi_adc_request_buffer(struct iio_backend *back,
struct iio_dev *indio_dev)
{
struct adi_axi_adc_state *st = iio_backend_get_priv(back);
struct iio_buffer *buffer;
const char *dma_name;
int ret;
if (device_property_read_string(st->dev, "dma-names", &dma_name))
dma_name = "rx";
buffer = iio_dmaengine_buffer_alloc(st->dev, dma_name);
if (IS_ERR(buffer)) {
dev_err(st->dev, "Could not get DMA buffer, %ld\n",
PTR_ERR(buffer));
return ERR_CAST(buffer);
}
indio_dev->modes |= INDIO_BUFFER_HARDWARE;
ret = iio_device_attach_buffer(indio_dev, buffer);
if (ret)
return ERR_PTR(ret);
return buffer;
return iio_dmaengine_buffer_setup(st->dev, indio_dev, dma_name);
}
static void axi_adc_free_buffer(struct iio_backend *back,
......
......@@ -159,7 +159,7 @@ static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = {
* Once done using the buffer iio_dmaengine_buffer_free() should be used to
* release it.
*/
struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
const char *channel)
{
struct dmaengine_buffer *dmaengine_buffer;
......@@ -210,7 +210,6 @@ struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
kfree(dmaengine_buffer);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_alloc, IIO_DMAENGINE_BUFFER);
/**
* iio_dmaengine_buffer_free() - Free dmaengine buffer
......@@ -230,39 +229,33 @@ void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
}
EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, IIO_DMAENGINE_BUFFER);
static void __devm_iio_dmaengine_buffer_free(void *buffer)
{
iio_dmaengine_buffer_free(buffer);
}
/**
* devm_iio_dmaengine_buffer_alloc() - Resource-managed iio_dmaengine_buffer_alloc()
* @dev: Parent device for the buffer
* @channel: DMA channel name, typically "rx".
*
* This allocates a new IIO buffer which internally uses the DMAengine framework
* to perform its transfers. The parent device will be used to request the DMA
* channel.
*
* The buffer will be automatically de-allocated once the device gets destroyed.
*/
static struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev,
const char *channel)
struct iio_buffer *iio_dmaengine_buffer_setup(struct device *dev,
struct iio_dev *indio_dev,
const char *channel)
{
struct iio_buffer *buffer;
int ret;
buffer = iio_dmaengine_buffer_alloc(dev, channel);
if (IS_ERR(buffer))
return buffer;
return ERR_CAST(buffer);
indio_dev->modes |= INDIO_BUFFER_HARDWARE;
ret = devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free,
buffer);
if (ret)
ret = iio_device_attach_buffer(indio_dev, buffer);
if (ret) {
iio_dmaengine_buffer_free(buffer);
return ERR_PTR(ret);
}
return buffer;
}
EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup, IIO_DMAENGINE_BUFFER);
static void __devm_iio_dmaengine_buffer_free(void *buffer)
{
iio_dmaengine_buffer_free(buffer);
}
/**
* devm_iio_dmaengine_buffer_setup() - Setup a DMA buffer for an IIO device
......@@ -281,13 +274,12 @@ int devm_iio_dmaengine_buffer_setup(struct device *dev,
{
struct iio_buffer *buffer;
buffer = devm_iio_dmaengine_buffer_alloc(dev, channel);
buffer = iio_dmaengine_buffer_setup(dev, indio_dev, channel);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
indio_dev->modes |= INDIO_BUFFER_HARDWARE;
return iio_device_attach_buffer(indio_dev, buffer);
return devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free,
buffer);
}
EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup, IIO_DMAENGINE_BUFFER);
......
......@@ -10,9 +10,10 @@
struct iio_dev;
struct device;
struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
const char *channel);
void iio_dmaengine_buffer_free(struct iio_buffer *buffer);
struct iio_buffer *iio_dmaengine_buffer_setup(struct device *dev,
struct iio_dev *indio_dev,
const char *channel);
int devm_iio_dmaengine_buffer_setup(struct device *dev,
struct iio_dev *indio_dev,
const char *channel);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment