struct iio_dev *indio_dev)
 {
        struct adi_axi_adc_state *st = iio_backend_get_priv(back);
-       struct iio_buffer *buffer;
        const char *dma_name;
-       int ret;
 
        if (device_property_read_string(st->dev, "dma-names", &dma_name))
                dma_name = "rx";
 
-       buffer = iio_dmaengine_buffer_alloc(st->dev, dma_name);
-       if (IS_ERR(buffer)) {
-               dev_err(st->dev, "Could not get DMA buffer, %ld\n",
-                       PTR_ERR(buffer));
-               return ERR_CAST(buffer);
-       }
-
-       indio_dev->modes |= INDIO_BUFFER_HARDWARE;
-       ret = iio_device_attach_buffer(indio_dev, buffer);
-       if (ret)
-               return ERR_PTR(ret);
-
-       return buffer;
+       return iio_dmaengine_buffer_setup(st->dev, indio_dev, dma_name);
 }
 
 static void axi_adc_free_buffer(struct iio_backend *back,
 
  * Once done using the buffer iio_dmaengine_buffer_free() should be used to
  * release it.
  */
-struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
+static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
        const char *channel)
 {
        struct dmaengine_buffer *dmaengine_buffer;
        kfree(dmaengine_buffer);
        return ERR_PTR(ret);
 }
-EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_alloc, IIO_DMAENGINE_BUFFER);
 
 /**
  * iio_dmaengine_buffer_free() - Free dmaengine buffer
 }
 EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, IIO_DMAENGINE_BUFFER);
 
-static void __devm_iio_dmaengine_buffer_free(void *buffer)
-{
-       iio_dmaengine_buffer_free(buffer);
-}
-
-/**
- * devm_iio_dmaengine_buffer_alloc() - Resource-managed iio_dmaengine_buffer_alloc()
- * @dev: Parent device for the buffer
- * @channel: DMA channel name, typically "rx".
- *
- * This allocates a new IIO buffer which internally uses the DMAengine framework
- * to perform its transfers. The parent device will be used to request the DMA
- * channel.
- *
- * The buffer will be automatically de-allocated once the device gets destroyed.
- */
-static struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev,
-       const char *channel)
+struct iio_buffer *iio_dmaengine_buffer_setup(struct device *dev,
+                                             struct iio_dev *indio_dev,
+                                             const char *channel)
 {
        struct iio_buffer *buffer;
        int ret;
 
        buffer = iio_dmaengine_buffer_alloc(dev, channel);
        if (IS_ERR(buffer))
-               return buffer;
+               return ERR_CAST(buffer);
+
+       indio_dev->modes |= INDIO_BUFFER_HARDWARE;
 
-       ret = devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free,
-                                      buffer);
-       if (ret)
+       ret = iio_device_attach_buffer(indio_dev, buffer);
+       if (ret) {
+               iio_dmaengine_buffer_free(buffer);
                return ERR_PTR(ret);
+       }
 
        return buffer;
 }
+EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup, IIO_DMAENGINE_BUFFER);
+
+static void __devm_iio_dmaengine_buffer_free(void *buffer)
+{
+       iio_dmaengine_buffer_free(buffer);
+}
 
 /**
  * devm_iio_dmaengine_buffer_setup() - Setup a DMA buffer for an IIO device
 {
        struct iio_buffer *buffer;
 
-       buffer = devm_iio_dmaengine_buffer_alloc(dev, channel);
+       buffer = iio_dmaengine_buffer_setup(dev, indio_dev, channel);
        if (IS_ERR(buffer))
                return PTR_ERR(buffer);
 
-       indio_dev->modes |= INDIO_BUFFER_HARDWARE;
-
-       return iio_device_attach_buffer(indio_dev, buffer);
+       return devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free,
+                                       buffer);
 }
 EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup, IIO_DMAENGINE_BUFFER);