struct xilly_endpoint {
        struct device *dev;
-       struct xilly_endpoint_hardware *ephw;
+       struct module *owner;
 
        int dma_using_dac; /* =1 if 64-bit DMA is used, =0 otherwise. */
        __iomem void *registers;
        unsigned int msg_buf_size;
 };
 
-struct xilly_endpoint_hardware {
-       struct module *owner;
-       void (*hw_sync_sgl_for_cpu)(struct xilly_endpoint *,
-                                   dma_addr_t,
-                                   size_t,
-                                   int);
-       void (*hw_sync_sgl_for_device)(struct xilly_endpoint *,
-                                      dma_addr_t,
-                                      size_t,
-                                      int);
-       int (*map_single)(struct xilly_endpoint *,
-                         void *,
-                         size_t,
-                         int,
-                         dma_addr_t *);
-};
-
 struct xilly_mapping {
        struct device *device;
        dma_addr_t dma_addr;
 
 irqreturn_t xillybus_isr(int irq, void *data);
 
-struct xilly_endpoint *xillybus_init_endpoint(struct device *dev,
-                                             struct xilly_endpoint_hardware
-                                             *ephw);
+struct xilly_endpoint *xillybus_init_endpoint(struct device *dev);
 
 int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint);
 
 
        buf = ep->msgbuf_addr;
        buf_size = ep->msg_buf_size/sizeof(u32);
 
-       ep->ephw->hw_sync_sgl_for_cpu(ep,
-                                     ep->msgbuf_dma_addr,
-                                     ep->msg_buf_size,
-                                     DMA_FROM_DEVICE);
+       dma_sync_single_for_cpu(ep->dev, ep->msgbuf_dma_addr,
+                               ep->msg_buf_size, DMA_FROM_DEVICE);
 
        for (i = 0; i < buf_size; i += 2) {
                if (((buf[i+1] >> 28) & 0xf) != ep->msg_counter) {
                                dev_err(ep->dev,
                                        "Lost sync with interrupt messages. Stopping.\n");
                        } else {
-                               ep->ephw->hw_sync_sgl_for_device(
-                                       ep,
-                                       ep->msgbuf_dma_addr,
-                                       ep->msg_buf_size,
-                                       DMA_FROM_DEVICE);
+                               dma_sync_single_for_device(ep->dev,
+                                                          ep->msgbuf_dma_addr,
+                                                          ep->msg_buf_size,
+                                                          DMA_FROM_DEVICE);
 
                                iowrite32(0x01,  /* Message NACK */
                                          ep->registers + fpga_msg_ctrl_reg);
                }
        }
 
-       ep->ephw->hw_sync_sgl_for_device(ep,
-                                        ep->msgbuf_dma_addr,
-                                        ep->msg_buf_size,
-                                        DMA_FROM_DEVICE);
+       dma_sync_single_for_device(ep->dev, ep->msgbuf_dma_addr,
+                                  ep->msg_buf_size, DMA_FROM_DEVICE);
 
        ep->msg_counter = (ep->msg_counter + 1) & 0xf;
        ep->failed_messages = 0;
        u32 regdirection;
 };
 
+static void xilly_unmap(void *ptr)
+{
+       struct xilly_mapping *data = ptr;
+
+       dma_unmap_single(data->device, data->dma_addr,
+                        data->size, data->direction);
+
+       kfree(ptr);
+}
+
+static int xilly_map_single(struct xilly_endpoint *ep,
+                           void *ptr,
+                           size_t size,
+                           int direction,
+                           dma_addr_t *ret_dma_handle
+       )
+{
+       dma_addr_t addr;
+       struct xilly_mapping *this;
+
+       this = kzalloc(sizeof(*this), GFP_KERNEL);
+       if (!this)
+               return -ENOMEM;
+
+       addr = dma_map_single(ep->dev, ptr, size, direction);
+
+       if (dma_mapping_error(ep->dev, addr)) {
+               kfree(this);
+               return -ENODEV;
+       }
+
+       this->device = ep->dev;
+       this->dma_addr = addr;
+       this->size = size;
+       this->direction = direction;
+
+       *ret_dma_handle = addr;
+
+       return devm_add_action_or_reset(ep->dev, xilly_unmap, this);
+}
+
 static int xilly_get_dma_buffers(struct xilly_endpoint *ep,
                                 struct xilly_alloc_state *s,
                                 struct xilly_buffer **buffers,
                        s->left_of_salami = allocsize;
                }
 
-               rc = ep->ephw->map_single(ep, s->salami,
-                                         bytebufsize, s->direction,
-                                         &dma_addr);
+               rc = xilly_map_single(ep, s->salami,
+                                     bytebufsize, s->direction,
+                                     &dma_addr);
                if (rc)
                        return rc;
 
                return -ENODEV;
        }
 
-       endpoint->ephw->hw_sync_sgl_for_cpu(
-               channel->endpoint,
-               channel->wr_buffers[0]->dma_addr,
-               channel->wr_buf_size,
-               DMA_FROM_DEVICE);
+       dma_sync_single_for_cpu(channel->endpoint->dev,
+                               channel->wr_buffers[0]->dma_addr,
+                               channel->wr_buf_size,
+                               DMA_FROM_DEVICE);
 
        if (channel->wr_buffers[0]->end_offset != endpoint->idtlen) {
                dev_err(endpoint->dev,
                if (!empty) { /* Go on, now without the spinlock */
 
                        if (bufpos == 0) /* Position zero means it's virgin */
-                               channel->endpoint->ephw->hw_sync_sgl_for_cpu(
-                                       channel->endpoint,
-                                       channel->wr_buffers[bufidx]->dma_addr,
-                                       channel->wr_buf_size,
-                                       DMA_FROM_DEVICE);
+                               dma_sync_single_for_cpu(channel->endpoint->dev,
+                                                       channel->wr_buffers[bufidx]->dma_addr,
+                                                       channel->wr_buf_size,
+                                                       DMA_FROM_DEVICE);
 
                        if (copy_to_user(
                                    userbuf,
                        bytes_done += howmany;
 
                        if (bufferdone) {
-                               channel->endpoint->ephw->hw_sync_sgl_for_device(
-                                       channel->endpoint,
-                                       channel->wr_buffers[bufidx]->dma_addr,
-                                       channel->wr_buf_size,
-                                       DMA_FROM_DEVICE);
+                               dma_sync_single_for_device(channel->endpoint->dev,
+                                                          channel->wr_buffers[bufidx]->dma_addr,
+                                                          channel->wr_buf_size,
+                                                          DMA_FROM_DEVICE);
 
                                /*
                                 * Tell FPGA the buffer is done with. It's an
                else
                        channel->rd_host_buf_idx++;
 
-               channel->endpoint->ephw->hw_sync_sgl_for_device(
-                       channel->endpoint,
-                       channel->rd_buffers[bufidx]->dma_addr,
-                       channel->rd_buf_size,
-                       DMA_TO_DEVICE);
+               dma_sync_single_for_device(channel->endpoint->dev,
+                                          channel->rd_buffers[bufidx]->dma_addr,
+                                          channel->rd_buf_size,
+                                          DMA_TO_DEVICE);
 
                mutex_lock(&channel->endpoint->register_mutex);
 
 
                        if ((bufpos == 0) || /* Zero means it's virgin */
                            (channel->rd_leftovers[3] != 0)) {
-                               channel->endpoint->ephw->hw_sync_sgl_for_cpu(
-                                       channel->endpoint,
-                                       channel->rd_buffers[bufidx]->dma_addr,
-                                       channel->rd_buf_size,
-                                       DMA_TO_DEVICE);
+                               dma_sync_single_for_cpu(channel->endpoint->dev,
+                                                       channel->rd_buffers[bufidx]->dma_addr,
+                                                       channel->rd_buf_size,
+                                                       DMA_TO_DEVICE);
 
                                /* Virgin, but leftovers are due */
                                for (i = 0; i < bufpos; i++)
                        bytes_done += howmany;
 
                        if (bufferdone) {
-                               channel->endpoint->ephw->hw_sync_sgl_for_device(
-                                       channel->endpoint,
-                                       channel->rd_buffers[bufidx]->dma_addr,
-                                       channel->rd_buf_size,
-                                       DMA_TO_DEVICE);
+                               dma_sync_single_for_device(channel->endpoint->dev,
+                                                          channel->rd_buffers[bufidx]->dma_addr,
+                                                          channel->rd_buf_size,
+                                                          DMA_TO_DEVICE);
 
                                mutex_lock(&channel->endpoint->register_mutex);
 
        .poll       = xillybus_poll,
 };
 
-struct xilly_endpoint *xillybus_init_endpoint(struct device *dev,
-                                             struct xilly_endpoint_hardware
-                                             *ephw)
+struct xilly_endpoint *xillybus_init_endpoint(struct device *dev)
 {
        struct xilly_endpoint *endpoint;
 
                return NULL;
 
        endpoint->dev = dev;
-       endpoint->ephw = ephw;
        endpoint->msg_counter = 0x0b;
        endpoint->failed_messages = 0;
        endpoint->fatal_error = 0;
                goto failed_idt;
 
        rc = xillybus_init_chrdev(dev, &xillybus_fops,
-                                 endpoint->ephw->owner, endpoint,
+                                 endpoint->owner, endpoint,
                                  idt_handle.names,
                                  idt_handle.names_len,
                                  endpoint->num_channels,
 
 
 MODULE_DEVICE_TABLE(of, xillybus_of_match);
 
-static void xilly_dma_sync_single_for_cpu_of(struct xilly_endpoint *ep,
-                                            dma_addr_t dma_handle,
-                                            size_t size,
-                                            int direction)
-{
-       dma_sync_single_for_cpu(ep->dev, dma_handle, size, direction);
-}
-
-static void xilly_dma_sync_single_for_device_of(struct xilly_endpoint *ep,
-                                               dma_addr_t dma_handle,
-                                               size_t size,
-                                               int direction)
-{
-       dma_sync_single_for_device(ep->dev, dma_handle, size, direction);
-}
-
-static void xilly_dma_sync_single_nop(struct xilly_endpoint *ep,
-                                     dma_addr_t dma_handle,
-                                     size_t size,
-                                     int direction)
-{
-}
-
-static void xilly_of_unmap(void *ptr)
-{
-       struct xilly_mapping *data = ptr;
-
-       dma_unmap_single(data->device, data->dma_addr,
-                        data->size, data->direction);
-
-       kfree(ptr);
-}
-
-static int xilly_map_single_of(struct xilly_endpoint *ep,
-                              void *ptr,
-                              size_t size,
-                              int direction,
-                              dma_addr_t *ret_dma_handle
-       )
-{
-       dma_addr_t addr;
-       struct xilly_mapping *this;
-
-       this = kzalloc(sizeof(*this), GFP_KERNEL);
-       if (!this)
-               return -ENOMEM;
-
-       addr = dma_map_single(ep->dev, ptr, size, direction);
-
-       if (dma_mapping_error(ep->dev, addr)) {
-               kfree(this);
-               return -ENODEV;
-       }
-
-       this->device = ep->dev;
-       this->dma_addr = addr;
-       this->size = size;
-       this->direction = direction;
-
-       *ret_dma_handle = addr;
-
-       return devm_add_action_or_reset(ep->dev, xilly_of_unmap, this);
-}
-
-static struct xilly_endpoint_hardware of_hw = {
-       .owner = THIS_MODULE,
-       .hw_sync_sgl_for_cpu = xilly_dma_sync_single_for_cpu_of,
-       .hw_sync_sgl_for_device = xilly_dma_sync_single_for_device_of,
-       .map_single = xilly_map_single_of,
-};
-
-static struct xilly_endpoint_hardware of_hw_coherent = {
-       .owner = THIS_MODULE,
-       .hw_sync_sgl_for_cpu = xilly_dma_sync_single_nop,
-       .hw_sync_sgl_for_device = xilly_dma_sync_single_nop,
-       .map_single = xilly_map_single_of,
-};
-
 static int xilly_drv_probe(struct platform_device *op)
 {
        struct device *dev = &op->dev;
        struct xilly_endpoint *endpoint;
        int rc;
        int irq;
-       struct xilly_endpoint_hardware *ephw = &of_hw;
 
-       if (of_property_read_bool(dev->of_node, "dma-coherent"))
-               ephw = &of_hw_coherent;
-
-       endpoint = xillybus_init_endpoint(dev, ephw);
+       endpoint = xillybus_init_endpoint(dev);
 
        if (!endpoint)
                return -ENOMEM;
 
        dev_set_drvdata(dev, endpoint);
 
+       endpoint->owner = THIS_MODULE;
+
        endpoint->registers = devm_platform_ioremap_resource(op, 0);
        if (IS_ERR(endpoint->registers))
                return PTR_ERR(endpoint->registers);
 
        { /* End: all zeroes */ }
 };
 
-static int xilly_pci_direction(int direction)
-{
-       switch (direction) {
-       case DMA_TO_DEVICE:
-       case DMA_FROM_DEVICE:
-               return direction;
-       default:
-               return DMA_BIDIRECTIONAL;
-       }
-}
-
-static void xilly_dma_sync_single_for_cpu_pci(struct xilly_endpoint *ep,
-                                             dma_addr_t dma_handle,
-                                             size_t size,
-                                             int direction)
-{
-       dma_sync_single_for_cpu(ep->dev, dma_handle, size,
-                               xilly_pci_direction(direction));
-}
-
-static void xilly_dma_sync_single_for_device_pci(struct xilly_endpoint *ep,
-                                                dma_addr_t dma_handle,
-                                                size_t size,
-                                                int direction)
-{
-       dma_sync_single_for_device(ep->dev, dma_handle, size,
-                                  xilly_pci_direction(direction));
-}
-
-static void xilly_pci_unmap(void *ptr)
-{
-       struct xilly_mapping *data = ptr;
-
-       dma_unmap_single(data->device, data->dma_addr, data->size,
-                        data->direction);
-
-       kfree(ptr);
-}
-
-/*
- * Map either through the PCI DMA mapper or the non_PCI one. Behind the
- * scenes exactly the same functions are called with the same parameters,
- * but that can change.
- */
-
-static int xilly_map_single_pci(struct xilly_endpoint *ep,
-                               void *ptr,
-                               size_t size,
-                               int direction,
-                               dma_addr_t *ret_dma_handle
-       )
-{
-       int pci_direction;
-       dma_addr_t addr;
-       struct xilly_mapping *this;
-
-       this = kzalloc(sizeof(*this), GFP_KERNEL);
-       if (!this)
-               return -ENOMEM;
-
-       pci_direction = xilly_pci_direction(direction);
-
-       addr = dma_map_single(ep->dev, ptr, size, pci_direction);
-
-       if (dma_mapping_error(ep->dev, addr)) {
-               kfree(this);
-               return -ENODEV;
-       }
-
-       this->device = ep->dev;
-       this->dma_addr = addr;
-       this->size = size;
-       this->direction = pci_direction;
-
-       *ret_dma_handle = addr;
-
-       return devm_add_action_or_reset(ep->dev, xilly_pci_unmap, this);
-}
-
-static struct xilly_endpoint_hardware pci_hw = {
-       .owner = THIS_MODULE,
-       .hw_sync_sgl_for_cpu = xilly_dma_sync_single_for_cpu_pci,
-       .hw_sync_sgl_for_device = xilly_dma_sync_single_for_device_pci,
-       .map_single = xilly_map_single_pci,
-};
-
 static int xilly_probe(struct pci_dev *pdev,
                       const struct pci_device_id *ent)
 {
        struct xilly_endpoint *endpoint;
        int rc;
 
-       endpoint = xillybus_init_endpoint(&pdev->dev, &pci_hw);
+       endpoint = xillybus_init_endpoint(&pdev->dev);
 
        if (!endpoint)
                return -ENOMEM;
 
        pci_set_drvdata(pdev, endpoint);
 
+       endpoint->owner = THIS_MODULE;
+
        rc = pcim_enable_device(pdev);
        if (rc) {
                dev_err(endpoint->dev,