iio: buffer-dma: add iio_dmaengine_buffer_setup()
authorNuno Sa <nuno.sa@analog.com>
Fri, 19 Apr 2024 08:25:34 +0000 (10:25 +0200)
committerJonathan Cameron <Jonathan.Cameron@huawei.com>
Sat, 20 Apr 2024 14:36:41 +0000 (15:36 +0100)
This brings the DMA buffer API more in line with what we have in the
triggered buffer. There's no need of having both
devm_iio_dmaengine_buffer_setup() and devm_iio_dmaengine_buffer_alloc().
Hence we introduce the new iio_dmaengine_buffer_setup() that together
with devm_iio_dmaengine_buffer_setup() should be all we need.

Note that as part of this change iio_dmaengine_buffer_alloc() is again
static and the axi-adc was updated accordingly.

Signed-off-by: Nuno Sa <nuno.sa@analog.com>
Link: https://lore.kernel.org/r/20240419-iio-backend-axi-dac-v4-1-5ca45b4de294@analog.com
Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
drivers/iio/adc/adi-axi-adc.c
drivers/iio/buffer/industrialio-buffer-dmaengine.c
include/linux/iio/buffer-dmaengine.h

index 4156639b3c8bdb5a4101dacc47dc665aa5db2166..184b36dca6d035e2de1bf6d8ae347bdca6945bc5 100644 (file)
@@ -124,26 +124,12 @@ static struct iio_buffer *axi_adc_request_buffer(struct iio_backend *back,
                                                 struct iio_dev *indio_dev)
 {
        struct adi_axi_adc_state *st = iio_backend_get_priv(back);
-       struct iio_buffer *buffer;
        const char *dma_name;
-       int ret;
 
        if (device_property_read_string(st->dev, "dma-names", &dma_name))
                dma_name = "rx";
 
-       buffer = iio_dmaengine_buffer_alloc(st->dev, dma_name);
-       if (IS_ERR(buffer)) {
-               dev_err(st->dev, "Could not get DMA buffer, %ld\n",
-                       PTR_ERR(buffer));
-               return ERR_CAST(buffer);
-       }
-
-       indio_dev->modes |= INDIO_BUFFER_HARDWARE;
-       ret = iio_device_attach_buffer(indio_dev, buffer);
-       if (ret)
-               return ERR_PTR(ret);
-
-       return buffer;
+       return iio_dmaengine_buffer_setup(st->dev, indio_dev, dma_name);
 }
 
 static void axi_adc_free_buffer(struct iio_backend *back,
index a18c1da292af22822adab6504c9d58929d598c73..97f3116566f580c2c8f1ac6be59c9c7240fcff23 100644 (file)
@@ -159,7 +159,7 @@ static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = {
  * Once done using the buffer iio_dmaengine_buffer_free() should be used to
  * release it.
  */
-struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
+static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
        const char *channel)
 {
        struct dmaengine_buffer *dmaengine_buffer;
@@ -210,7 +210,6 @@ err_free:
        kfree(dmaengine_buffer);
        return ERR_PTR(ret);
 }
-EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_alloc, IIO_DMAENGINE_BUFFER);
 
 /**
  * iio_dmaengine_buffer_free() - Free dmaengine buffer
@@ -230,39 +229,33 @@ void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
 }
 EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, IIO_DMAENGINE_BUFFER);
 
-static void __devm_iio_dmaengine_buffer_free(void *buffer)
-{
-       iio_dmaengine_buffer_free(buffer);
-}
-
-/**
- * devm_iio_dmaengine_buffer_alloc() - Resource-managed iio_dmaengine_buffer_alloc()
- * @dev: Parent device for the buffer
- * @channel: DMA channel name, typically "rx".
- *
- * This allocates a new IIO buffer which internally uses the DMAengine framework
- * to perform its transfers. The parent device will be used to request the DMA
- * channel.
- *
- * The buffer will be automatically de-allocated once the device gets destroyed.
- */
-static struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev,
-       const char *channel)
+struct iio_buffer *iio_dmaengine_buffer_setup(struct device *dev,
+                                             struct iio_dev *indio_dev,
+                                             const char *channel)
 {
        struct iio_buffer *buffer;
        int ret;
 
        buffer = iio_dmaengine_buffer_alloc(dev, channel);
        if (IS_ERR(buffer))
-               return buffer;
+               return ERR_CAST(buffer);
+
+       indio_dev->modes |= INDIO_BUFFER_HARDWARE;
 
-       ret = devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free,
-                                      buffer);
-       if (ret)
+       ret = iio_device_attach_buffer(indio_dev, buffer);
+       if (ret) {
+               iio_dmaengine_buffer_free(buffer);
                return ERR_PTR(ret);
+       }
 
        return buffer;
 }
+EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup, IIO_DMAENGINE_BUFFER);
+
+static void __devm_iio_dmaengine_buffer_free(void *buffer)
+{
+       iio_dmaengine_buffer_free(buffer);
+}
 
 /**
  * devm_iio_dmaengine_buffer_setup() - Setup a DMA buffer for an IIO device
@@ -281,13 +274,12 @@ int devm_iio_dmaengine_buffer_setup(struct device *dev,
 {
        struct iio_buffer *buffer;
 
-       buffer = devm_iio_dmaengine_buffer_alloc(dev, channel);
+       buffer = iio_dmaengine_buffer_setup(dev, indio_dev, channel);
        if (IS_ERR(buffer))
                return PTR_ERR(buffer);
 
-       indio_dev->modes |= INDIO_BUFFER_HARDWARE;
-
-       return iio_device_attach_buffer(indio_dev, buffer);
+       return devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free,
+                                       buffer);
 }
 EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup, IIO_DMAENGINE_BUFFER);
 
index cbb8ba957fade348855f1deebf8527eaabf447af..acb60f9a3fffa2f96b38db3d232a966108bcacd3 100644 (file)
 struct iio_dev;
 struct device;
 
-struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
-                                             const char *channel);
 void iio_dmaengine_buffer_free(struct iio_buffer *buffer);
+struct iio_buffer *iio_dmaengine_buffer_setup(struct device *dev,
+                                             struct iio_dev *indio_dev,
+                                             const char *channel);
 int devm_iio_dmaengine_buffer_setup(struct device *dev,
                                    struct iio_dev *indio_dev,
                                    const char *channel);