struct dmaengine_buffer *dmaengine_buffer =
                iio_buffer_to_dmaengine_buffer(&queue->buffer);
        struct dma_async_tx_descriptor *desc;
+       enum dma_transfer_direction dma_dir;
+       size_t max_size;
        dma_cookie_t cookie;
 
-       block->bytes_used = min(block->size, dmaengine_buffer->max_size);
-       block->bytes_used = round_down(block->bytes_used,
-                       dmaengine_buffer->align);
+       max_size = min(block->size, dmaengine_buffer->max_size);
+       max_size = round_down(max_size, dmaengine_buffer->align);
+
+       if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) {
+               block->bytes_used = max_size;
+               dma_dir = DMA_DEV_TO_MEM;
+       } else {
+               dma_dir = DMA_MEM_TO_DEV;
+       }
+
+       if (!block->bytes_used || block->bytes_used > max_size)
+               return -EINVAL;
 
        desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
-               block->phys_addr, block->bytes_used, DMA_DEV_TO_MEM,
+               block->phys_addr, block->bytes_used, dma_dir,
                DMA_PREP_INTERRUPT);
        if (!desc)
                return -ENOMEM;
 }
 EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, IIO_DMAENGINE_BUFFER);
 
-struct iio_buffer *iio_dmaengine_buffer_setup(struct device *dev,
-                                             struct iio_dev *indio_dev,
-                                             const char *channel)
+struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
+                                                 struct iio_dev *indio_dev,
+                                                 const char *channel,
+                                                 enum iio_buffer_direction dir)
 {
        struct iio_buffer *buffer;
        int ret;
 
        indio_dev->modes |= INDIO_BUFFER_HARDWARE;
 
+       buffer->direction = dir;
+
        ret = iio_device_attach_buffer(indio_dev, buffer);
        if (ret) {
                iio_dmaengine_buffer_free(buffer);
 
        return buffer;
 }
-EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup, IIO_DMAENGINE_BUFFER);
+EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup_ext, IIO_DMAENGINE_BUFFER);
 
 static void __devm_iio_dmaengine_buffer_free(void *buffer)
 {
 }
 
 /**
- * devm_iio_dmaengine_buffer_setup() - Setup a DMA buffer for an IIO device
+ * devm_iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device
  * @dev: Parent device for the buffer
  * @indio_dev: IIO device to which to attach this buffer.
  * @channel: DMA channel name, typically "rx".
+ * @dir: Direction of buffer (in or out)
  *
  * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc()
  * and attaches it to an IIO device with iio_device_attach_buffer().
  * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the
  * IIO device.
  */
-int devm_iio_dmaengine_buffer_setup(struct device *dev,
-                                   struct iio_dev *indio_dev,
-                                   const char *channel)
+int devm_iio_dmaengine_buffer_setup_ext(struct device *dev,
+                                       struct iio_dev *indio_dev,
+                                       const char *channel,
+                                       enum iio_buffer_direction dir)
 {
        struct iio_buffer *buffer;
 
-       buffer = iio_dmaengine_buffer_setup(dev, indio_dev, channel);
+       buffer = iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, dir);
        if (IS_ERR(buffer))
                return PTR_ERR(buffer);
 
        return devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free,
                                        buffer);
 }
-EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup, IIO_DMAENGINE_BUFFER);
+EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_ext, IIO_DMAENGINE_BUFFER);
 
 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
 MODULE_DESCRIPTION("DMA buffer for the IIO framework");
 
 #ifndef __IIO_DMAENGINE_H__
 #define __IIO_DMAENGINE_H__
 
+#include <linux/iio/buffer.h>
+
 struct iio_dev;
 struct device;
 
 void iio_dmaengine_buffer_free(struct iio_buffer *buffer);
-struct iio_buffer *iio_dmaengine_buffer_setup(struct device *dev,
-                                             struct iio_dev *indio_dev,
-                                             const char *channel);
-int devm_iio_dmaengine_buffer_setup(struct device *dev,
-                                   struct iio_dev *indio_dev,
-                                   const char *channel);
+struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
+                                                 struct iio_dev *indio_dev,
+                                                 const char *channel,
+                                                 enum iio_buffer_direction dir);
+
+#define iio_dmaengine_buffer_setup(dev, indio_dev, channel)    \
+       iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, \
+                                      IIO_BUFFER_DIRECTION_IN)
+
+int devm_iio_dmaengine_buffer_setup_ext(struct device *dev,
+                                       struct iio_dev *indio_dev,
+                                       const char *channel,
+                                       enum iio_buffer_direction dir);
+
+#define devm_iio_dmaengine_buffer_setup(dev, indio_dev, channel)       \
+       devm_iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel,    \
+                                           IIO_BUFFER_DIRECTION_IN)
 
 #endif