* DMA controller
 
 Required properties:
-- compatible:  should be "renesas,shdma"
+- compatible:  should be of the form "renesas,shdma-<soc>", where <soc> should
+               be replaced with the desired SoC model, e.g.
+               "renesas,shdma-r8a73a4" for the system DMAC on r8a73a4 SoC
 
 Example:
-       dmac: dma-mux0 {
+       dmac: dma-multiplexer@0 {
                compatible = "renesas,shdma-mux";
                #dma-cells = <1>;
-               dma-channels = <6>;
+               dma-channels = <20>;
                dma-requests = <256>;
-               reg = <0 0>;    /* Needed for AUXDATA */
-               #address-cells = <1>;
-               #size-cells = <1>;
+               #address-cells = <2>;
+               #size-cells = <2>;
                ranges;
 
-               dma0: shdma@fe008020 {
-                       compatible = "renesas,shdma";
-                       reg = <0xfe008020 0x270>,
-                               <0xfe009000 0xc>;
+               dma0: dma-controller@e6700020 {
+                       compatible = "renesas,shdma-r8a73a4";
+                       reg = <0 0xe6700020 0 0x89e0>;
                        interrupt-parent = <&gic>;
-                       interrupts = <0 34 4
-                                       0 28 4
-                                       0 29 4
-                                       0 30 4
-                                       0 31 4
-                                       0 32 4
-                                       0 33 4>;
+                       interrupts = <0 220 4
+                                       0 200 4
+                                       0 201 4
+                                       0 202 4
+                                       0 203 4
+                                       0 204 4
+                                       0 205 4
+                                       0 206 4
+                                       0 207 4
+                                       0 208 4
+                                       0 209 4
+                                       0 210 4
+                                       0 211 4
+                                       0 212 4
+                                       0 213 4
+                                       0 214 4
+                                       0 215 4
+                                       0 216 4
+                                       0 217 4
+                                       0 218 4
+                                       0 219 4>;
                        interrupt-names = "error",
                                        "ch0", "ch1", "ch2", "ch3",
-                                       "ch4", "ch5";
-               };
-
-               dma1: shdma@fe018020 {
-                       ...
-               };
-
-               dma2: shdma@fe028020 {
-                       ...
+                                       "ch4", "ch5", "ch6", "ch7",
+                                       "ch8", "ch9", "ch10", "ch11",
+                                       "ch12", "ch13", "ch14", "ch15",
+                                       "ch16", "ch17", "ch18", "ch19";
                };
        };
 
 
 obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o shdma-of.o
 obj-$(CONFIG_SH_DMAE) += shdma.o
+shdma-y := shdmac.o
+shdma-objs := $(shdma-y)
 obj-$(CONFIG_SUDMAC) += sudmac.o
 
        return NULL;
 }
 
-static int shdma_setup_slave(struct shdma_chan *schan, int slave_id)
+static int shdma_setup_slave(struct shdma_chan *schan, int slave_id,
+                            dma_addr_t slave_addr)
 {
        struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
        const struct shdma_ops *ops = sdev->ops;
 
        if (schan->dev->of_node) {
                match = schan->hw_req;
-               ret = ops->set_slave(schan, match, true);
+               ret = ops->set_slave(schan, match, slave_addr, true);
                if (ret < 0)
                        return ret;
 
        if (test_and_set_bit(slave_id, shdma_slave_used))
                return -EBUSY;
 
-       ret = ops->set_slave(schan, match, false);
+       ret = ops->set_slave(schan, match, slave_addr, false);
        if (ret < 0) {
                clear_bit(slave_id, shdma_slave_used);
                return ret;
        if (!schan->dev->of_node && match >= slave_num)
                return false;
 
-       ret = ops->set_slave(schan, match, true);
+       ret = ops->set_slave(schan, match, 0, true);
        if (ret < 0)
                return false;
 
         */
        if (slave) {
                /* Legacy mode: .private is set in filter */
-               ret = shdma_setup_slave(schan, slave->slave_id);
+               ret = shdma_setup_slave(schan, slave->slave_id, 0);
                if (ret < 0)
                        goto esetslave;
        } else {
                 * channel, while using it...
                 */
                config = (struct dma_slave_config *)arg;
-               ret = shdma_setup_slave(schan, config->slave_id);
+               ret = shdma_setup_slave(schan, config->slave_id,
+                                       config->direction == DMA_DEV_TO_MEM ?
+                                       config->src_addr : config->dst_addr);
                if (ret < 0)
                        return ret;
                break;
 
        const struct of_dev_auxdata *lookup = pdev->dev.platform_data;
        int ret;
 
-       if (!lookup)
-               return -EINVAL;
-
        ret = of_dma_controller_register(pdev->dev.of_node,
                                         shdma_of_xlate, pdev);
        if (ret < 0)
 
+++ /dev/null
-/*
- * Renesas SuperH DMA Engine support
- *
- * base is drivers/dma/flsdma.c
- *
- * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
- * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
- * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * - DMA of SuperH does not have Hardware DMA chain mode.
- * - MAX DMA size is 16MB.
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/dmaengine.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/sh_dma.h>
-#include <linux/notifier.h>
-#include <linux/kdebug.h>
-#include <linux/spinlock.h>
-#include <linux/rculist.h>
-
-#include "../dmaengine.h"
-#include "shdma.h"
-
-#define SH_DMAE_DRV_NAME "sh-dma-engine"
-
-/* Default MEMCPY transfer size = 2^2 = 4 bytes */
-#define LOG2_DEFAULT_XFER_SIZE 2
-#define SH_DMA_SLAVE_NUMBER 256
-#define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1)
-
-/*
- * Used for write-side mutual exclusion for the global device list,
- * read-side synchronization by way of RCU, and per-controller data.
- */
-static DEFINE_SPINLOCK(sh_dmae_lock);
-static LIST_HEAD(sh_dmae_devices);
-
-/*
- * Different DMAC implementations provide different ways to clear DMA channels:
- * (1) none - no CHCLR registers are available
- * (2) one CHCLR register per channel - 0 has to be written to it to clear
- *     channel buffers
- * (3) one CHCLR per several channels - 1 has to be written to the bit,
- *     corresponding to the specific channel to reset it
- */
-static void channel_clear(struct sh_dmae_chan *sh_dc)
-{
-       struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
-       const struct sh_dmae_channel *chan_pdata = shdev->pdata->channel +
-               sh_dc->shdma_chan.id;
-       u32 val = shdev->pdata->chclr_bitwise ? 1 << chan_pdata->chclr_bit : 0;
-
-       __raw_writel(val, shdev->chan_reg + chan_pdata->chclr_offset);
-}
-
-static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
-{
-       __raw_writel(data, sh_dc->base + reg);
-}
-
-static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
-{
-       return __raw_readl(sh_dc->base + reg);
-}
-
-static u16 dmaor_read(struct sh_dmae_device *shdev)
-{
-       void __iomem *addr = shdev->chan_reg + DMAOR;
-
-       if (shdev->pdata->dmaor_is_32bit)
-               return __raw_readl(addr);
-       else
-               return __raw_readw(addr);
-}
-
-static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
-{
-       void __iomem *addr = shdev->chan_reg + DMAOR;
-
-       if (shdev->pdata->dmaor_is_32bit)
-               __raw_writel(data, addr);
-       else
-               __raw_writew(data, addr);
-}
-
-static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
-{
-       struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
-
-       __raw_writel(data, sh_dc->base + shdev->chcr_offset);
-}
-
-static u32 chcr_read(struct sh_dmae_chan *sh_dc)
-{
-       struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
-
-       return __raw_readl(sh_dc->base + shdev->chcr_offset);
-}
-
-/*
- * Reset DMA controller
- *
- * SH7780 has two DMAOR register
- */
-static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
-{
-       unsigned short dmaor;
-       unsigned long flags;
-
-       spin_lock_irqsave(&sh_dmae_lock, flags);
-
-       dmaor = dmaor_read(shdev);
-       dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
-
-       spin_unlock_irqrestore(&sh_dmae_lock, flags);
-}
-
-static int sh_dmae_rst(struct sh_dmae_device *shdev)
-{
-       unsigned short dmaor;
-       unsigned long flags;
-
-       spin_lock_irqsave(&sh_dmae_lock, flags);
-
-       dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
-
-       if (shdev->pdata->chclr_present) {
-               int i;
-               for (i = 0; i < shdev->pdata->channel_num; i++) {
-                       struct sh_dmae_chan *sh_chan = shdev->chan[i];
-                       if (sh_chan)
-                               channel_clear(sh_chan);
-               }
-       }
-
-       dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
-
-       dmaor = dmaor_read(shdev);
-
-       spin_unlock_irqrestore(&sh_dmae_lock, flags);
-
-       if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
-               dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n");
-               return -EIO;
-       }
-       if (shdev->pdata->dmaor_init & ~dmaor)
-               dev_warn(shdev->shdma_dev.dma_dev.dev,
-                        "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
-                        dmaor, shdev->pdata->dmaor_init);
-       return 0;
-}
-
-static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
-{
-       u32 chcr = chcr_read(sh_chan);
-
-       if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
-               return true; /* working */
-
-       return false; /* waiting */
-}
-
-static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
-{
-       struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
-       const struct sh_dmae_pdata *pdata = shdev->pdata;
-       int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
-               ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
-
-       if (cnt >= pdata->ts_shift_num)
-               cnt = 0;
-
-       return pdata->ts_shift[cnt];
-}
-
-static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
-{
-       struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
-       const struct sh_dmae_pdata *pdata = shdev->pdata;
-       int i;
-
-       for (i = 0; i < pdata->ts_shift_num; i++)
-               if (pdata->ts_shift[i] == l2size)
-                       break;
-
-       if (i == pdata->ts_shift_num)
-               i = 0;
-
-       return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
-               ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
-}
-
-static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
-{
-       sh_dmae_writel(sh_chan, hw->sar, SAR);
-       sh_dmae_writel(sh_chan, hw->dar, DAR);
-       sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
-}
-
-static void dmae_start(struct sh_dmae_chan *sh_chan)
-{
-       struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
-       u32 chcr = chcr_read(sh_chan);
-
-       if (shdev->pdata->needs_tend_set)
-               sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);
-
-       chcr |= CHCR_DE | shdev->chcr_ie_bit;
-       chcr_write(sh_chan, chcr & ~CHCR_TE);
-}
-
-static void dmae_init(struct sh_dmae_chan *sh_chan)
-{
-       /*
-        * Default configuration for dual address memory-memory transfer.
-        * 0x400 represents auto-request.
-        */
-       u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
-                                                  LOG2_DEFAULT_XFER_SIZE);
-       sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
-       chcr_write(sh_chan, chcr);
-}
-
-static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
-{
-       /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
-       if (dmae_is_busy(sh_chan))
-               return -EBUSY;
-
-       sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
-       chcr_write(sh_chan, val);
-
-       return 0;
-}
-
-static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
-{
-       struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
-       const struct sh_dmae_pdata *pdata = shdev->pdata;
-       const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id];
-       void __iomem *addr = shdev->dmars;
-       unsigned int shift = chan_pdata->dmars_bit;
-
-       if (dmae_is_busy(sh_chan))
-               return -EBUSY;
-
-       if (pdata->no_dmars)
-               return 0;
-
-       /* in the case of a missing DMARS resource use first memory window */
-       if (!addr)
-               addr = shdev->chan_reg;
-       addr += chan_pdata->dmars;
-
-       __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
-                    addr);
-
-       return 0;
-}
-
-static void sh_dmae_start_xfer(struct shdma_chan *schan,
-                              struct shdma_desc *sdesc)
-{
-       struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
-                                                   shdma_chan);
-       struct sh_dmae_desc *sh_desc = container_of(sdesc,
-                                       struct sh_dmae_desc, shdma_desc);
-       dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n",
-               sdesc->async_tx.cookie, sh_chan->shdma_chan.id,
-               sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar);
-       /* Get the ld start address from ld_queue */
-       dmae_set_reg(sh_chan, &sh_desc->hw);
-       dmae_start(sh_chan);
-}
-
-static bool sh_dmae_channel_busy(struct shdma_chan *schan)
-{
-       struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
-                                                   shdma_chan);
-       return dmae_is_busy(sh_chan);
-}
-
-static void sh_dmae_setup_xfer(struct shdma_chan *schan,
-                              int slave_id)
-{
-       struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
-                                                   shdma_chan);
-
-       if (slave_id >= 0) {
-               const struct sh_dmae_slave_config *cfg =
-                       sh_chan->config;
-
-               dmae_set_dmars(sh_chan, cfg->mid_rid);
-               dmae_set_chcr(sh_chan, cfg->chcr);
-       } else {
-               dmae_init(sh_chan);
-       }
-}
-
-/*
- * Find a slave channel configuration from the contoller list by either a slave
- * ID in the non-DT case, or by a MID/RID value in the DT case
- */
-static const struct sh_dmae_slave_config *dmae_find_slave(
-       struct sh_dmae_chan *sh_chan, int match)
-{
-       struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
-       const struct sh_dmae_pdata *pdata = shdev->pdata;
-       const struct sh_dmae_slave_config *cfg;
-       int i;
-
-       if (!sh_chan->shdma_chan.dev->of_node) {
-               if (match >= SH_DMA_SLAVE_NUMBER)
-                       return NULL;
-
-               for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
-                       if (cfg->slave_id == match)
-                               return cfg;
-       } else {
-               for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
-                       if (cfg->mid_rid == match) {
-                               sh_chan->shdma_chan.slave_id = cfg->slave_id;
-                               return cfg;
-                       }
-       }
-
-       return NULL;
-}
-
-static int sh_dmae_set_slave(struct shdma_chan *schan,
-                            int slave_id, bool try)
-{
-       struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
-                                                   shdma_chan);
-       const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id);
-       if (!cfg)
-               return -ENXIO;
-
-       if (!try)
-               sh_chan->config = cfg;
-
-       return 0;
-}
-
-static void dmae_halt(struct sh_dmae_chan *sh_chan)
-{
-       struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
-       u32 chcr = chcr_read(sh_chan);
-
-       chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
-       chcr_write(sh_chan, chcr);
-}
-
-static int sh_dmae_desc_setup(struct shdma_chan *schan,
-                             struct shdma_desc *sdesc,
-                             dma_addr_t src, dma_addr_t dst, size_t *len)
-{
-       struct sh_dmae_desc *sh_desc = container_of(sdesc,
-                                       struct sh_dmae_desc, shdma_desc);
-
-       if (*len > schan->max_xfer_len)
-               *len = schan->max_xfer_len;
-
-       sh_desc->hw.sar = src;
-       sh_desc->hw.dar = dst;
-       sh_desc->hw.tcr = *len;
-
-       return 0;
-}
-
-static void sh_dmae_halt(struct shdma_chan *schan)
-{
-       struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
-                                                   shdma_chan);
-       dmae_halt(sh_chan);
-}
-
-static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq)
-{
-       struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
-                                                   shdma_chan);
-
-       if (!(chcr_read(sh_chan) & CHCR_TE))
-               return false;
-
-       /* DMA stop */
-       dmae_halt(sh_chan);
-
-       return true;
-}
-
-static size_t sh_dmae_get_partial(struct shdma_chan *schan,
-                                 struct shdma_desc *sdesc)
-{
-       struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
-                                                   shdma_chan);
-       struct sh_dmae_desc *sh_desc = container_of(sdesc,
-                                       struct sh_dmae_desc, shdma_desc);
-       return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
-               sh_chan->xmit_shift;
-}
-
-/* Called from error IRQ or NMI */
-static bool sh_dmae_reset(struct sh_dmae_device *shdev)
-{
-       bool ret;
-
-       /* halt the dma controller */
-       sh_dmae_ctl_stop(shdev);
-
-       /* We cannot detect, which channel caused the error, have to reset all */
-       ret = shdma_reset(&shdev->shdma_dev);
-
-       sh_dmae_rst(shdev);
-
-       return ret;
-}
-
-static irqreturn_t sh_dmae_err(int irq, void *data)
-{
-       struct sh_dmae_device *shdev = data;
-
-       if (!(dmaor_read(shdev) & DMAOR_AE))
-               return IRQ_NONE;
-
-       sh_dmae_reset(shdev);
-       return IRQ_HANDLED;
-}
-
-static bool sh_dmae_desc_completed(struct shdma_chan *schan,
-                                  struct shdma_desc *sdesc)
-{
-       struct sh_dmae_chan *sh_chan = container_of(schan,
-                                       struct sh_dmae_chan, shdma_chan);
-       struct sh_dmae_desc *sh_desc = container_of(sdesc,
-                                       struct sh_dmae_desc, shdma_desc);
-       u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
-       u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
-
-       return  (sdesc->direction == DMA_DEV_TO_MEM &&
-                (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) ||
-               (sdesc->direction != DMA_DEV_TO_MEM &&
-                (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf);
-}
-
-static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
-{
-       /* Fast path out if NMIF is not asserted for this controller */
-       if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
-               return false;
-
-       return sh_dmae_reset(shdev);
-}
-
-static int sh_dmae_nmi_handler(struct notifier_block *self,
-                              unsigned long cmd, void *data)
-{
-       struct sh_dmae_device *shdev;
-       int ret = NOTIFY_DONE;
-       bool triggered;
-
-       /*
-        * Only concern ourselves with NMI events.
-        *
-        * Normally we would check the die chain value, but as this needs
-        * to be architecture independent, check for NMI context instead.
-        */
-       if (!in_nmi())
-               return NOTIFY_DONE;
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
-               /*
-                * Only stop if one of the controllers has NMIF asserted,
-                * we do not want to interfere with regular address error
-                * handling or NMI events that don't concern the DMACs.
-                */
-               triggered = sh_dmae_nmi_notify(shdev);
-               if (triggered == true)
-                       ret = NOTIFY_OK;
-       }
-       rcu_read_unlock();
-
-       return ret;
-}
-
-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
-       .notifier_call  = sh_dmae_nmi_handler,
-
-       /* Run before NMI debug handler and KGDB */
-       .priority       = 1,
-};
-
-static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
-                                       int irq, unsigned long flags)
-{
-       const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
-       struct shdma_dev *sdev = &shdev->shdma_dev;
-       struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev);
-       struct sh_dmae_chan *sh_chan;
-       struct shdma_chan *schan;
-       int err;
-
-       sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan),
-                              GFP_KERNEL);
-       if (!sh_chan) {
-               dev_err(sdev->dma_dev.dev,
-                       "No free memory for allocating dma channels!\n");
-               return -ENOMEM;
-       }
-
-       schan = &sh_chan->shdma_chan;
-       schan->max_xfer_len = SH_DMA_TCR_MAX + 1;
-
-       shdma_chan_probe(sdev, schan, id);
-
-       sh_chan->base = shdev->chan_reg + chan_pdata->offset;
-
-       /* set up channel irq */
-       if (pdev->id >= 0)
-               snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
-                        "sh-dmae%d.%d", pdev->id, id);
-       else
-               snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
-                        "sh-dma%d", id);
-
-       err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id);
-       if (err) {
-               dev_err(sdev->dma_dev.dev,
-                       "DMA channel %d request_irq error %d\n",
-                       id, err);
-               goto err_no_irq;
-       }
-
-       shdev->chan[id] = sh_chan;
-       return 0;
-
-err_no_irq:
-       /* remove from dmaengine device node */
-       shdma_chan_remove(schan);
-       return err;
-}
-
-static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
-{
-       struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
-       struct shdma_chan *schan;
-       int i;
-
-       shdma_for_each_chan(schan, &shdev->shdma_dev, i) {
-               BUG_ON(!schan);
-
-               shdma_chan_remove(schan);
-       }
-       dma_dev->chancnt = 0;
-}
-
-static void sh_dmae_shutdown(struct platform_device *pdev)
-{
-       struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
-       sh_dmae_ctl_stop(shdev);
-}
-
-static int sh_dmae_runtime_suspend(struct device *dev)
-{
-       return 0;
-}
-
-static int sh_dmae_runtime_resume(struct device *dev)
-{
-       struct sh_dmae_device *shdev = dev_get_drvdata(dev);
-
-       return sh_dmae_rst(shdev);
-}
-
-#ifdef CONFIG_PM
-static int sh_dmae_suspend(struct device *dev)
-{
-       return 0;
-}
-
-static int sh_dmae_resume(struct device *dev)
-{
-       struct sh_dmae_device *shdev = dev_get_drvdata(dev);
-       int i, ret;
-
-       ret = sh_dmae_rst(shdev);
-       if (ret < 0)
-               dev_err(dev, "Failed to reset!\n");
-
-       for (i = 0; i < shdev->pdata->channel_num; i++) {
-               struct sh_dmae_chan *sh_chan = shdev->chan[i];
-
-               if (!sh_chan->shdma_chan.desc_num)
-                       continue;
-
-               if (sh_chan->shdma_chan.slave_id >= 0) {
-                       const struct sh_dmae_slave_config *cfg = sh_chan->config;
-                       dmae_set_dmars(sh_chan, cfg->mid_rid);
-                       dmae_set_chcr(sh_chan, cfg->chcr);
-               } else {
-                       dmae_init(sh_chan);
-               }
-       }
-
-       return 0;
-}
-#else
-#define sh_dmae_suspend NULL
-#define sh_dmae_resume NULL
-#endif
-
-const struct dev_pm_ops sh_dmae_pm = {
-       .suspend                = sh_dmae_suspend,
-       .resume                 = sh_dmae_resume,
-       .runtime_suspend        = sh_dmae_runtime_suspend,
-       .runtime_resume         = sh_dmae_runtime_resume,
-};
-
-static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
-{
-       struct sh_dmae_chan *sh_chan = container_of(schan,
-                                       struct sh_dmae_chan, shdma_chan);
-
-       /*
-        * Implicit BUG_ON(!sh_chan->config)
-        * This is an exclusive slave DMA operation, may only be called after a
-        * successful slave configuration.
-        */
-       return sh_chan->config->addr;
-}
-
-static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i)
-{
-       return &((struct sh_dmae_desc *)buf)[i].shdma_desc;
-}
-
-static const struct shdma_ops sh_dmae_shdma_ops = {
-       .desc_completed = sh_dmae_desc_completed,
-       .halt_channel = sh_dmae_halt,
-       .channel_busy = sh_dmae_channel_busy,
-       .slave_addr = sh_dmae_slave_addr,
-       .desc_setup = sh_dmae_desc_setup,
-       .set_slave = sh_dmae_set_slave,
-       .setup_xfer = sh_dmae_setup_xfer,
-       .start_xfer = sh_dmae_start_xfer,
-       .embedded_desc = sh_dmae_embedded_desc,
-       .chan_irq = sh_dmae_chan_irq,
-       .get_partial = sh_dmae_get_partial,
-};
-
-static int sh_dmae_probe(struct platform_device *pdev)
-{
-       const struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
-       unsigned long irqflags = IRQF_DISABLED,
-               chan_flag[SH_DMAE_MAX_CHANNELS] = {};
-       int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
-       int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
-       struct sh_dmae_device *shdev;
-       struct dma_device *dma_dev;
-       struct resource *chan, *dmars, *errirq_res, *chanirq_res;
-
-       /* get platform data */
-       if (!pdata || !pdata->channel_num)
-               return -ENODEV;
-
-       chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       /* DMARS area is optional */
-       dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       /*
-        * IRQ resources:
-        * 1. there always must be at least one IRQ IO-resource. On SH4 it is
-        *    the error IRQ, in which case it is the only IRQ in this resource:
-        *    start == end. If it is the only IRQ resource, all channels also
-        *    use the same IRQ.
-        * 2. DMA channel IRQ resources can be specified one per resource or in
-        *    ranges (start != end)
-        * 3. iff all events (channels and, optionally, error) on this
-        *    controller use the same IRQ, only one IRQ resource can be
-        *    specified, otherwise there must be one IRQ per channel, even if
-        *    some of them are equal
-        * 4. if all IRQs on this controller are equal or if some specific IRQs
-        *    specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
-        *    requested with the IRQF_SHARED flag
-        */
-       errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       if (!chan || !errirq_res)
-               return -ENODEV;
-
-       shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device),
-                            GFP_KERNEL);
-       if (!shdev) {
-               dev_err(&pdev->dev, "Not enough memory\n");
-               return -ENOMEM;
-       }
-
-       dma_dev = &shdev->shdma_dev.dma_dev;
-
-       shdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
-       if (IS_ERR(shdev->chan_reg))
-               return PTR_ERR(shdev->chan_reg);
-       if (dmars) {
-               shdev->dmars = devm_ioremap_resource(&pdev->dev, dmars);
-               if (IS_ERR(shdev->dmars))
-                       return PTR_ERR(shdev->dmars);
-       }
-
-       if (!pdata->slave_only)
-               dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
-       if (pdata->slave && pdata->slave_num)
-               dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
-
-       /* Default transfer size of 32 bytes requires 32-byte alignment */
-       dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;
-
-       shdev->shdma_dev.ops = &sh_dmae_shdma_ops;
-       shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc);
-       err = shdma_init(&pdev->dev, &shdev->shdma_dev,
-                             pdata->channel_num);
-       if (err < 0)
-               goto eshdma;
-
-       /* platform data */
-       shdev->pdata = pdata;
-
-       if (pdata->chcr_offset)
-               shdev->chcr_offset = pdata->chcr_offset;
-       else
-               shdev->chcr_offset = CHCR;
-
-       if (pdata->chcr_ie_bit)
-               shdev->chcr_ie_bit = pdata->chcr_ie_bit;
-       else
-               shdev->chcr_ie_bit = CHCR_IE;
-
-       platform_set_drvdata(pdev, shdev);
-
-       pm_runtime_enable(&pdev->dev);
-       err = pm_runtime_get_sync(&pdev->dev);
-       if (err < 0)
-               dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
-
-       spin_lock_irq(&sh_dmae_lock);
-       list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
-       spin_unlock_irq(&sh_dmae_lock);
-
-       /* reset dma controller - only needed as a test */
-       err = sh_dmae_rst(shdev);
-       if (err)
-               goto rst_err;
-
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
-       chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
-
-       if (!chanirq_res)
-               chanirq_res = errirq_res;
-       else
-               irqres++;
-
-       if (chanirq_res == errirq_res ||
-           (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
-               irqflags = IRQF_SHARED;
-
-       errirq = errirq_res->start;
-
-       err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err, irqflags,
-                              "DMAC Address Error", shdev);
-       if (err) {
-               dev_err(&pdev->dev,
-                       "DMA failed requesting irq #%d, error %d\n",
-                       errirq, err);
-               goto eirq_err;
-       }
-
-#else
-       chanirq_res = errirq_res;
-#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
-
-       if (chanirq_res->start == chanirq_res->end &&
-           !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
-               /* Special case - all multiplexed */
-               for (; irq_cnt < pdata->channel_num; irq_cnt++) {
-                       if (irq_cnt < SH_DMAE_MAX_CHANNELS) {
-                               chan_irq[irq_cnt] = chanirq_res->start;
-                               chan_flag[irq_cnt] = IRQF_SHARED;
-                       } else {
-                               irq_cap = 1;
-                               break;
-                       }
-               }
-       } else {
-               do {
-                       for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
-                               if (irq_cnt >= SH_DMAE_MAX_CHANNELS) {
-                                       irq_cap = 1;
-                                       break;
-                               }
-
-                               if ((errirq_res->flags & IORESOURCE_BITS) ==
-                                   IORESOURCE_IRQ_SHAREABLE)
-                                       chan_flag[irq_cnt] = IRQF_SHARED;
-                               else
-                                       chan_flag[irq_cnt] = IRQF_DISABLED;
-                               dev_dbg(&pdev->dev,
-                                       "Found IRQ %d for channel %d\n",
-                                       i, irq_cnt);
-                               chan_irq[irq_cnt++] = i;
-                       }
-
-                       if (irq_cnt >= SH_DMAE_MAX_CHANNELS)
-                               break;
-
-                       chanirq_res = platform_get_resource(pdev,
-                                               IORESOURCE_IRQ, ++irqres);
-               } while (irq_cnt < pdata->channel_num && chanirq_res);
-       }
-
-       /* Create DMA Channel */
-       for (i = 0; i < irq_cnt; i++) {
-               err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
-               if (err)
-                       goto chan_probe_err;
-       }
-
-       if (irq_cap)
-               dev_notice(&pdev->dev, "Attempting to register %d DMA "
-                          "channels when a maximum of %d are supported.\n",
-                          pdata->channel_num, SH_DMAE_MAX_CHANNELS);
-
-       pm_runtime_put(&pdev->dev);
-
-       err = dma_async_device_register(&shdev->shdma_dev.dma_dev);
-       if (err < 0)
-               goto edmadevreg;
-
-       return err;
-
-edmadevreg:
-       pm_runtime_get(&pdev->dev);
-
-chan_probe_err:
-       sh_dmae_chan_remove(shdev);
-
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
-eirq_err:
-#endif
-rst_err:
-       spin_lock_irq(&sh_dmae_lock);
-       list_del_rcu(&shdev->node);
-       spin_unlock_irq(&sh_dmae_lock);
-
-       pm_runtime_put(&pdev->dev);
-       pm_runtime_disable(&pdev->dev);
-
-       platform_set_drvdata(pdev, NULL);
-       shdma_cleanup(&shdev->shdma_dev);
-eshdma:
-       synchronize_rcu();
-
-       return err;
-}
-
-static int sh_dmae_remove(struct platform_device *pdev)
-{
-       struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
-       struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
-       struct resource *res;
-       int errirq = platform_get_irq(pdev, 0);
-
-       dma_async_device_unregister(dma_dev);
-
-       if (errirq > 0)
-               free_irq(errirq, shdev);
-
-       spin_lock_irq(&sh_dmae_lock);
-       list_del_rcu(&shdev->node);
-       spin_unlock_irq(&sh_dmae_lock);
-
-       pm_runtime_disable(&pdev->dev);
-
-       sh_dmae_chan_remove(shdev);
-       shdma_cleanup(&shdev->shdma_dev);
-
-       platform_set_drvdata(pdev, NULL);
-
-       synchronize_rcu();
-
-       return 0;
-}
-
-static const struct of_device_id sh_dmae_of_match[] = {
-       { .compatible = "renesas,shdma", },
-       { }
-};
-MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
-
-static struct platform_driver sh_dmae_driver = {
-       .driver         = {
-               .owner  = THIS_MODULE,
-               .pm     = &sh_dmae_pm,
-               .name   = SH_DMAE_DRV_NAME,
-               .of_match_table = sh_dmae_of_match,
-       },
-       .remove         = sh_dmae_remove,
-       .shutdown       = sh_dmae_shutdown,
-};
-
-static int __init sh_dmae_init(void)
-{
-       /* Wire up NMI handling */
-       int err = register_die_notifier(&sh_dmae_nmi_notifier);
-       if (err)
-               return err;
-
-       return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
-}
-module_init(sh_dmae_init);
-
-static void __exit sh_dmae_exit(void)
-{
-       platform_driver_unregister(&sh_dmae_driver);
-
-       unregister_die_notifier(&sh_dmae_nmi_notifier);
-}
-module_exit(sh_dmae_exit);
-
-MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
-MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME);
 
        void __iomem *base;
        char dev_id[16];                /* unique name per DMAC of channel */
        int pm_error;
+       dma_addr_t slave_addr;
 };
 
 struct sh_dmae_device {
 
--- /dev/null
+/*
+ * Renesas SuperH DMA Engine support
+ *
+ * base is drivers/dma/flsdma.c
+ *
+ * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
+ * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
+ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * - DMA of SuperH does not have Hardware DMA chain mode.
+ * - MAX DMA size is 16MB.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sh_dma.h>
+#include <linux/notifier.h>
+#include <linux/kdebug.h>
+#include <linux/spinlock.h>
+#include <linux/rculist.h>
+
+#include "../dmaengine.h"
+#include "shdma.h"
+
+#define SH_DMAE_DRV_NAME "sh-dma-engine"
+
+/* Default MEMCPY transfer size = 2^2 = 4 bytes */
+#define LOG2_DEFAULT_XFER_SIZE 2
+#define SH_DMA_SLAVE_NUMBER 256
+#define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1)
+
+/*
+ * Used for write-side mutual exclusion for the global device list,
+ * read-side synchronization by way of RCU, and per-controller data.
+ */
+static DEFINE_SPINLOCK(sh_dmae_lock);
+static LIST_HEAD(sh_dmae_devices);
+
+/*
+ * Different DMAC implementations provide different ways to clear DMA channels:
+ * (1) none - no CHCLR registers are available
+ * (2) one CHCLR register per channel - 0 has to be written to it to clear
+ *     channel buffers
+ * (3) one CHCLR per several channels - 1 has to be written to the bit,
+ *     corresponding to the specific channel to reset it
+ */
+static void channel_clear(struct sh_dmae_chan *sh_dc)
+{
+       struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+       const struct sh_dmae_channel *chan_pdata = shdev->pdata->channel +
+               sh_dc->shdma_chan.id;
+       u32 val = shdev->pdata->chclr_bitwise ? 1 << chan_pdata->chclr_bit : 0;
+
+       __raw_writel(val, shdev->chan_reg + chan_pdata->chclr_offset);
+}
+
+static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
+{
+       __raw_writel(data, sh_dc->base + reg);
+}
+
+static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
+{
+       return __raw_readl(sh_dc->base + reg);
+}
+
+static u16 dmaor_read(struct sh_dmae_device *shdev)
+{
+       void __iomem *addr = shdev->chan_reg + DMAOR;
+
+       if (shdev->pdata->dmaor_is_32bit)
+               return __raw_readl(addr);
+       else
+               return __raw_readw(addr);
+}
+
+static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
+{
+       void __iomem *addr = shdev->chan_reg + DMAOR;
+
+       if (shdev->pdata->dmaor_is_32bit)
+               __raw_writel(data, addr);
+       else
+               __raw_writew(data, addr);
+}
+
+static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
+{
+       struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+
+       __raw_writel(data, sh_dc->base + shdev->chcr_offset);
+}
+
+static u32 chcr_read(struct sh_dmae_chan *sh_dc)
+{
+       struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+
+       return __raw_readl(sh_dc->base + shdev->chcr_offset);
+}
+
+/*
+ * Reset DMA controller
+ *
+ * SH7780 has two DMAOR register
+ */
+static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
+{
+       unsigned short dmaor;
+       unsigned long flags;
+
+       spin_lock_irqsave(&sh_dmae_lock, flags);
+
+       dmaor = dmaor_read(shdev);
+       dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
+
+       spin_unlock_irqrestore(&sh_dmae_lock, flags);
+}
+
+static int sh_dmae_rst(struct sh_dmae_device *shdev)
+{
+       unsigned short dmaor;
+       unsigned long flags;
+
+       spin_lock_irqsave(&sh_dmae_lock, flags);
+
+       dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
+
+       if (shdev->pdata->chclr_present) {
+               int i;
+               for (i = 0; i < shdev->pdata->channel_num; i++) {
+                       struct sh_dmae_chan *sh_chan = shdev->chan[i];
+                       if (sh_chan)
+                               channel_clear(sh_chan);
+               }
+       }
+
+       dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
+
+       dmaor = dmaor_read(shdev);
+
+       spin_unlock_irqrestore(&sh_dmae_lock, flags);
+
+       if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
+               dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n");
+               return -EIO;
+       }
+       if (shdev->pdata->dmaor_init & ~dmaor)
+               dev_warn(shdev->shdma_dev.dma_dev.dev,
+                        "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
+                        dmaor, shdev->pdata->dmaor_init);
+       return 0;
+}
+
+static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
+{
+       u32 chcr = chcr_read(sh_chan);
+
+       if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
+               return true; /* working */
+
+       return false; /* waiting */
+}
+
+static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
+{
+       struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+       const struct sh_dmae_pdata *pdata = shdev->pdata;
+       int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
+               ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
+
+       if (cnt >= pdata->ts_shift_num)
+               cnt = 0;
+
+       return pdata->ts_shift[cnt];
+}
+
+static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
+{
+       struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+       const struct sh_dmae_pdata *pdata = shdev->pdata;
+       int i;
+
+       for (i = 0; i < pdata->ts_shift_num; i++)
+               if (pdata->ts_shift[i] == l2size)
+                       break;
+
+       if (i == pdata->ts_shift_num)
+               i = 0;
+
+       return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
+               ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
+}
+
+static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
+{
+       sh_dmae_writel(sh_chan, hw->sar, SAR);
+       sh_dmae_writel(sh_chan, hw->dar, DAR);
+       sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
+}
+
+static void dmae_start(struct sh_dmae_chan *sh_chan)
+{
+       struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+       u32 chcr = chcr_read(sh_chan);
+
+       if (shdev->pdata->needs_tend_set)
+               sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);
+
+       chcr |= CHCR_DE | shdev->chcr_ie_bit;
+       chcr_write(sh_chan, chcr & ~CHCR_TE);
+}
+
+static void dmae_init(struct sh_dmae_chan *sh_chan)
+{
+       /*
+        * Default configuration for dual address memory-memory transfer.
+        * 0x400 represents auto-request.
+        */
+       u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
+                                                  LOG2_DEFAULT_XFER_SIZE);
+       sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
+       chcr_write(sh_chan, chcr);
+}
+
+static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
+{
+       /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
+       if (dmae_is_busy(sh_chan))
+               return -EBUSY;
+
+       sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
+       chcr_write(sh_chan, val);
+
+       return 0;
+}
+
+static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
+{
+       struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+       const struct sh_dmae_pdata *pdata = shdev->pdata;
+       const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id];
+       void __iomem *addr = shdev->dmars;
+       unsigned int shift = chan_pdata->dmars_bit;
+
+       if (dmae_is_busy(sh_chan))
+               return -EBUSY;
+
+       if (pdata->no_dmars)
+               return 0;
+
+       /* in the case of a missing DMARS resource use first memory window */
+       if (!addr)
+               addr = shdev->chan_reg;
+       addr += chan_pdata->dmars;
+
+       __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
+                    addr);
+
+       return 0;
+}
+
+static void sh_dmae_start_xfer(struct shdma_chan *schan,
+                              struct shdma_desc *sdesc)
+{
+       struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+                                                   shdma_chan);
+       struct sh_dmae_desc *sh_desc = container_of(sdesc,
+                                       struct sh_dmae_desc, shdma_desc);
+       dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n",
+               sdesc->async_tx.cookie, sh_chan->shdma_chan.id,
+               sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar);
+       /* Get the ld start address from ld_queue */
+       dmae_set_reg(sh_chan, &sh_desc->hw);
+       dmae_start(sh_chan);
+}
+
+static bool sh_dmae_channel_busy(struct shdma_chan *schan)
+{
+       struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+                                                   shdma_chan);
+       return dmae_is_busy(sh_chan);
+}
+
+static void sh_dmae_setup_xfer(struct shdma_chan *schan,
+                              int slave_id)
+{
+       struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+                                                   shdma_chan);
+
+       if (slave_id >= 0) {
+               const struct sh_dmae_slave_config *cfg =
+                       sh_chan->config;
+
+               dmae_set_dmars(sh_chan, cfg->mid_rid);
+               dmae_set_chcr(sh_chan, cfg->chcr);
+       } else {
+               dmae_init(sh_chan);
+       }
+}
+
+/*
+ * Find a slave channel configuration from the contoller list by either a slave
+ * ID in the non-DT case, or by a MID/RID value in the DT case
+ */
+static const struct sh_dmae_slave_config *dmae_find_slave(
+       struct sh_dmae_chan *sh_chan, int match)
+{
+       struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+       const struct sh_dmae_pdata *pdata = shdev->pdata;
+       const struct sh_dmae_slave_config *cfg;
+       int i;
+
+       if (!sh_chan->shdma_chan.dev->of_node) {
+               if (match >= SH_DMA_SLAVE_NUMBER)
+                       return NULL;
+
+               for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
+                       if (cfg->slave_id == match)
+                               return cfg;
+       } else {
+               for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
+                       if (cfg->mid_rid == match) {
+                               sh_chan->shdma_chan.slave_id = i;
+                               return cfg;
+                       }
+       }
+
+       return NULL;
+}
+
+static int sh_dmae_set_slave(struct shdma_chan *schan,
+                            int slave_id, dma_addr_t slave_addr, bool try)
+{
+       struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+                                                   shdma_chan);
+       const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id);
+       if (!cfg)
+               return -ENXIO;
+
+       if (!try) {
+               sh_chan->config = cfg;
+               sh_chan->slave_addr = slave_addr ? : cfg->addr;
+       }
+
+       return 0;
+}
+
+static void dmae_halt(struct sh_dmae_chan *sh_chan)
+{
+       struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+       u32 chcr = chcr_read(sh_chan);
+
+       chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
+       chcr_write(sh_chan, chcr);
+}
+
+static int sh_dmae_desc_setup(struct shdma_chan *schan,
+                             struct shdma_desc *sdesc,
+                             dma_addr_t src, dma_addr_t dst, size_t *len)
+{
+       struct sh_dmae_desc *sh_desc = container_of(sdesc,
+                                       struct sh_dmae_desc, shdma_desc);
+
+       if (*len > schan->max_xfer_len)
+               *len = schan->max_xfer_len;
+
+       sh_desc->hw.sar = src;
+       sh_desc->hw.dar = dst;
+       sh_desc->hw.tcr = *len;
+
+       return 0;
+}
+
+static void sh_dmae_halt(struct shdma_chan *schan)
+{
+       struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+                                                   shdma_chan);
+       dmae_halt(sh_chan);
+}
+
+static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq)
+{
+       struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+                                                   shdma_chan);
+
+       if (!(chcr_read(sh_chan) & CHCR_TE))
+               return false;
+
+       /* DMA stop */
+       dmae_halt(sh_chan);
+
+       return true;
+}
+
+static size_t sh_dmae_get_partial(struct shdma_chan *schan,
+                                 struct shdma_desc *sdesc)
+{
+       struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+                                                   shdma_chan);
+       struct sh_dmae_desc *sh_desc = container_of(sdesc,
+                                       struct sh_dmae_desc, shdma_desc);
+       return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
+               sh_chan->xmit_shift;
+}
+
+/* Called from error IRQ or NMI */
+static bool sh_dmae_reset(struct sh_dmae_device *shdev)
+{
+       bool ret;
+
+       /* halt the dma controller */
+       sh_dmae_ctl_stop(shdev);
+
+       /* We cannot detect, which channel caused the error, have to reset all */
+       ret = shdma_reset(&shdev->shdma_dev);
+
+       sh_dmae_rst(shdev);
+
+       return ret;
+}
+
+static irqreturn_t sh_dmae_err(int irq, void *data)
+{
+       struct sh_dmae_device *shdev = data;
+
+       if (!(dmaor_read(shdev) & DMAOR_AE))
+               return IRQ_NONE;
+
+       sh_dmae_reset(shdev);
+       return IRQ_HANDLED;
+}
+
+static bool sh_dmae_desc_completed(struct shdma_chan *schan,
+                                  struct shdma_desc *sdesc)
+{
+       struct sh_dmae_chan *sh_chan = container_of(schan,
+                                       struct sh_dmae_chan, shdma_chan);
+       struct sh_dmae_desc *sh_desc = container_of(sdesc,
+                                       struct sh_dmae_desc, shdma_desc);
+       u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
+       u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
+
+       return  (sdesc->direction == DMA_DEV_TO_MEM &&
+                (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) ||
+               (sdesc->direction != DMA_DEV_TO_MEM &&
+                (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf);
+}
+
+static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
+{
+       /* Fast path out if NMIF is not asserted for this controller */
+       if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
+               return false;
+
+       return sh_dmae_reset(shdev);
+}
+
+static int sh_dmae_nmi_handler(struct notifier_block *self,
+                              unsigned long cmd, void *data)
+{
+       struct sh_dmae_device *shdev;
+       int ret = NOTIFY_DONE;
+       bool triggered;
+
+       /*
+        * Only concern ourselves with NMI events.
+        *
+        * Normally we would check the die chain value, but as this needs
+        * to be architecture independent, check for NMI context instead.
+        */
+       if (!in_nmi())
+               return NOTIFY_DONE;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
+               /*
+                * Only stop if one of the controllers has NMIF asserted,
+                * we do not want to interfere with regular address error
+                * handling or NMI events that don't concern the DMACs.
+                */
+               triggered = sh_dmae_nmi_notify(shdev);
+               if (triggered == true)
+                       ret = NOTIFY_OK;
+       }
+       rcu_read_unlock();
+
+       return ret;
+}
+
+static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
+       .notifier_call  = sh_dmae_nmi_handler,
+
+       /* Run before NMI debug handler and KGDB */
+       .priority       = 1,
+};
+
+static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
+                                       int irq, unsigned long flags)
+{
+       const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
+       struct shdma_dev *sdev = &shdev->shdma_dev;
+       struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev);
+       struct sh_dmae_chan *sh_chan;
+       struct shdma_chan *schan;
+       int err;
+
+       sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan),
+                              GFP_KERNEL);
+       if (!sh_chan) {
+               dev_err(sdev->dma_dev.dev,
+                       "No free memory for allocating dma channels!\n");
+               return -ENOMEM;
+       }
+
+       schan = &sh_chan->shdma_chan;
+       schan->max_xfer_len = SH_DMA_TCR_MAX + 1;
+
+       shdma_chan_probe(sdev, schan, id);
+
+       sh_chan->base = shdev->chan_reg + chan_pdata->offset;
+
+       /* set up channel irq */
+       if (pdev->id >= 0)
+               snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
+                        "sh-dmae%d.%d", pdev->id, id);
+       else
+               snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
+                        "sh-dma%d", id);
+
+       err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id);
+       if (err) {
+               dev_err(sdev->dma_dev.dev,
+                       "DMA channel %d request_irq error %d\n",
+                       id, err);
+               goto err_no_irq;
+       }
+
+       shdev->chan[id] = sh_chan;
+       return 0;
+
+err_no_irq:
+       /* remove from dmaengine device node */
+       shdma_chan_remove(schan);
+       return err;
+}
+
+static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
+{
+       struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
+       struct shdma_chan *schan;
+       int i;
+
+       shdma_for_each_chan(schan, &shdev->shdma_dev, i) {
+               BUG_ON(!schan);
+
+               shdma_chan_remove(schan);
+       }
+       dma_dev->chancnt = 0;
+}
+
+static void sh_dmae_shutdown(struct platform_device *pdev)
+{
+       struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
+       sh_dmae_ctl_stop(shdev);
+}
+
+static int sh_dmae_runtime_suspend(struct device *dev)
+{
+       return 0;
+}
+
+static int sh_dmae_runtime_resume(struct device *dev)
+{
+       struct sh_dmae_device *shdev = dev_get_drvdata(dev);
+
+       return sh_dmae_rst(shdev);
+}
+
+#ifdef CONFIG_PM
+static int sh_dmae_suspend(struct device *dev)
+{
+       return 0;
+}
+
+static int sh_dmae_resume(struct device *dev)
+{
+       struct sh_dmae_device *shdev = dev_get_drvdata(dev);
+       int i, ret;
+
+       ret = sh_dmae_rst(shdev);
+       if (ret < 0)
+               dev_err(dev, "Failed to reset!\n");
+
+       for (i = 0; i < shdev->pdata->channel_num; i++) {
+               struct sh_dmae_chan *sh_chan = shdev->chan[i];
+
+               if (!sh_chan->shdma_chan.desc_num)
+                       continue;
+
+               if (sh_chan->shdma_chan.slave_id >= 0) {
+                       const struct sh_dmae_slave_config *cfg = sh_chan->config;
+                       dmae_set_dmars(sh_chan, cfg->mid_rid);
+                       dmae_set_chcr(sh_chan, cfg->chcr);
+               } else {
+                       dmae_init(sh_chan);
+               }
+       }
+
+       return 0;
+}
+#else
+#define sh_dmae_suspend NULL
+#define sh_dmae_resume NULL
+#endif
+
+const struct dev_pm_ops sh_dmae_pm = {
+       .suspend                = sh_dmae_suspend,
+       .resume                 = sh_dmae_resume,
+       .runtime_suspend        = sh_dmae_runtime_suspend,
+       .runtime_resume         = sh_dmae_runtime_resume,
+};
+
+static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
+{
+       struct sh_dmae_chan *sh_chan = container_of(schan,
+                                       struct sh_dmae_chan, shdma_chan);
+
+       /*
+        * Implicit BUG_ON(!sh_chan->config)
+        * This is an exclusive slave DMA operation, may only be called after a
+        * successful slave configuration.
+        */
+       return sh_chan->slave_addr;
+}
+
+static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i)
+{
+       return &((struct sh_dmae_desc *)buf)[i].shdma_desc;
+}
+
+static const struct shdma_ops sh_dmae_shdma_ops = {
+       .desc_completed = sh_dmae_desc_completed,
+       .halt_channel = sh_dmae_halt,
+       .channel_busy = sh_dmae_channel_busy,
+       .slave_addr = sh_dmae_slave_addr,
+       .desc_setup = sh_dmae_desc_setup,
+       .set_slave = sh_dmae_set_slave,
+       .setup_xfer = sh_dmae_setup_xfer,
+       .start_xfer = sh_dmae_start_xfer,
+       .embedded_desc = sh_dmae_embedded_desc,
+       .chan_irq = sh_dmae_chan_irq,
+       .get_partial = sh_dmae_get_partial,
+};
+
+static const struct of_device_id sh_dmae_of_match[] = {
+       {}
+};
+MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
+
+static int sh_dmae_probe(struct platform_device *pdev)
+{
+       const struct sh_dmae_pdata *pdata;
+       unsigned long irqflags = IRQF_DISABLED,
+               chan_flag[SH_DMAE_MAX_CHANNELS] = {};
+       int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
+       int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
+       struct sh_dmae_device *shdev;
+       struct dma_device *dma_dev;
+       struct resource *chan, *dmars, *errirq_res, *chanirq_res;
+
+       if (pdev->dev.of_node)
+               pdata = of_match_device(sh_dmae_of_match, &pdev->dev)->data;
+       else
+               pdata = pdev->dev.platform_data;
+
+       /* get platform data */
+       if (!pdata || !pdata->channel_num)
+               return -ENODEV;
+
+       chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       /* DMARS area is optional */
+       dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       /*
+        * IRQ resources:
+        * 1. there always must be at least one IRQ IO-resource. On SH4 it is
+        *    the error IRQ, in which case it is the only IRQ in this resource:
+        *    start == end. If it is the only IRQ resource, all channels also
+        *    use the same IRQ.
+        * 2. DMA channel IRQ resources can be specified one per resource or in
+        *    ranges (start != end)
+        * 3. iff all events (channels and, optionally, error) on this
+        *    controller use the same IRQ, only one IRQ resource can be
+        *    specified, otherwise there must be one IRQ per channel, even if
+        *    some of them are equal
+        * 4. if all IRQs on this controller are equal or if some specific IRQs
+        *    specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
+        *    requested with the IRQF_SHARED flag
+        */
+       errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (!chan || !errirq_res)
+               return -ENODEV;
+
+       shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device),
+                            GFP_KERNEL);
+       if (!shdev) {
+               dev_err(&pdev->dev, "Not enough memory\n");
+               return -ENOMEM;
+       }
+
+       dma_dev = &shdev->shdma_dev.dma_dev;
+
+       shdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
+       if (IS_ERR(shdev->chan_reg))
+               return PTR_ERR(shdev->chan_reg);
+       if (dmars) {
+               shdev->dmars = devm_ioremap_resource(&pdev->dev, dmars);
+               if (IS_ERR(shdev->dmars))
+                       return PTR_ERR(shdev->dmars);
+       }
+
+       if (!pdata->slave_only)
+               dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+       if (pdata->slave && pdata->slave_num)
+               dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+
+       /* Default transfer size of 32 bytes requires 32-byte alignment */
+       dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;
+
+       shdev->shdma_dev.ops = &sh_dmae_shdma_ops;
+       shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc);
+       err = shdma_init(&pdev->dev, &shdev->shdma_dev,
+                             pdata->channel_num);
+       if (err < 0)
+               goto eshdma;
+
+       /* platform data */
+       shdev->pdata = pdata;
+
+       if (pdata->chcr_offset)
+               shdev->chcr_offset = pdata->chcr_offset;
+       else
+               shdev->chcr_offset = CHCR;
+
+       if (pdata->chcr_ie_bit)
+               shdev->chcr_ie_bit = pdata->chcr_ie_bit;
+       else
+               shdev->chcr_ie_bit = CHCR_IE;
+
+       platform_set_drvdata(pdev, shdev);
+
+       pm_runtime_enable(&pdev->dev);
+       err = pm_runtime_get_sync(&pdev->dev);
+       if (err < 0)
+               dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
+
+       spin_lock_irq(&sh_dmae_lock);
+       list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
+       spin_unlock_irq(&sh_dmae_lock);
+
+       /* reset dma controller - only needed as a test */
+       err = sh_dmae_rst(shdev);
+       if (err)
+               goto rst_err;
+
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
+       chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+
+       if (!chanirq_res)
+               chanirq_res = errirq_res;
+       else
+               irqres++;
+
+       if (chanirq_res == errirq_res ||
+           (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
+               irqflags = IRQF_SHARED;
+
+       errirq = errirq_res->start;
+
+       err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err, irqflags,
+                              "DMAC Address Error", shdev);
+       if (err) {
+               dev_err(&pdev->dev,
+                       "DMA failed requesting irq #%d, error %d\n",
+                       errirq, err);
+               goto eirq_err;
+       }
+
+#else
+       chanirq_res = errirq_res;
+#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
+
+       if (chanirq_res->start == chanirq_res->end &&
+           !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
+               /* Special case - all multiplexed */
+               for (; irq_cnt < pdata->channel_num; irq_cnt++) {
+                       if (irq_cnt < SH_DMAE_MAX_CHANNELS) {
+                               chan_irq[irq_cnt] = chanirq_res->start;
+                               chan_flag[irq_cnt] = IRQF_SHARED;
+                       } else {
+                               irq_cap = 1;
+                               break;
+                       }
+               }
+       } else {
+               do {
+                       for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
+                               if (irq_cnt >= SH_DMAE_MAX_CHANNELS) {
+                                       irq_cap = 1;
+                                       break;
+                               }
+
+                               if ((errirq_res->flags & IORESOURCE_BITS) ==
+                                   IORESOURCE_IRQ_SHAREABLE)
+                                       chan_flag[irq_cnt] = IRQF_SHARED;
+                               else
+                                       chan_flag[irq_cnt] = IRQF_DISABLED;
+                               dev_dbg(&pdev->dev,
+                                       "Found IRQ %d for channel %d\n",
+                                       i, irq_cnt);
+                               chan_irq[irq_cnt++] = i;
+                       }
+
+                       if (irq_cnt >= SH_DMAE_MAX_CHANNELS)
+                               break;
+
+                       chanirq_res = platform_get_resource(pdev,
+                                               IORESOURCE_IRQ, ++irqres);
+               } while (irq_cnt < pdata->channel_num && chanirq_res);
+       }
+
+       /* Create DMA Channel */
+       for (i = 0; i < irq_cnt; i++) {
+               err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
+               if (err)
+                       goto chan_probe_err;
+       }
+
+       if (irq_cap)
+               dev_notice(&pdev->dev, "Attempting to register %d DMA "
+                          "channels when a maximum of %d are supported.\n",
+                          pdata->channel_num, SH_DMAE_MAX_CHANNELS);
+
+       pm_runtime_put(&pdev->dev);
+
+       err = dma_async_device_register(&shdev->shdma_dev.dma_dev);
+       if (err < 0)
+               goto edmadevreg;
+
+       return err;
+
+edmadevreg:
+       pm_runtime_get(&pdev->dev);
+
+chan_probe_err:
+       sh_dmae_chan_remove(shdev);
+
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
+eirq_err:
+#endif
+rst_err:
+       spin_lock_irq(&sh_dmae_lock);
+       list_del_rcu(&shdev->node);
+       spin_unlock_irq(&sh_dmae_lock);
+
+       pm_runtime_put(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
+
+       platform_set_drvdata(pdev, NULL);
+       shdma_cleanup(&shdev->shdma_dev);
+eshdma:
+       synchronize_rcu();
+
+       return err;
+}
+
+static int sh_dmae_remove(struct platform_device *pdev)
+{
+       struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
+       struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
+       struct resource *res;
+       int errirq = platform_get_irq(pdev, 0);
+
+       dma_async_device_unregister(dma_dev);
+
+       if (errirq > 0)
+               free_irq(errirq, shdev);
+
+       spin_lock_irq(&sh_dmae_lock);
+       list_del_rcu(&shdev->node);
+       spin_unlock_irq(&sh_dmae_lock);
+
+       pm_runtime_disable(&pdev->dev);
+
+       sh_dmae_chan_remove(shdev);
+       shdma_cleanup(&shdev->shdma_dev);
+
+       platform_set_drvdata(pdev, NULL);
+
+       synchronize_rcu();
+
+       return 0;
+}
+
+static struct platform_driver sh_dmae_driver = {
+       .driver         = {
+               .owner  = THIS_MODULE,
+               .pm     = &sh_dmae_pm,
+               .name   = SH_DMAE_DRV_NAME,
+               .of_match_table = sh_dmae_of_match,
+       },
+       .remove         = sh_dmae_remove,
+       .shutdown       = sh_dmae_shutdown,
+};
+
+static int __init sh_dmae_init(void)
+{
+       /* Wire up NMI handling */
+       int err = register_die_notifier(&sh_dmae_nmi_notifier);
+       if (err)
+               return err;
+
+       return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
+}
+module_init(sh_dmae_init);
+
+static void __exit sh_dmae_exit(void)
+{
+       platform_driver_unregister(&sh_dmae_driver);
+
+       unregister_die_notifier(&sh_dmae_nmi_notifier);
+}
+module_exit(sh_dmae_exit);
+
+MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
+MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME);
 
        return NULL;
 }
 
-static int sudmac_set_slave(struct shdma_chan *schan, int slave_id, bool try)
+static int sudmac_set_slave(struct shdma_chan *schan, int slave_id,
+                           dma_addr_t slave_addr, bool try)
 {
        struct sudmac_chan *sc = to_chan(schan);
        const struct sudmac_slave_config *cfg = sudmac_find_slave(sc, slave_id);
 
        dma_addr_t (*slave_addr)(struct shdma_chan *);
        int (*desc_setup)(struct shdma_chan *, struct shdma_desc *,
                          dma_addr_t, dma_addr_t, size_t *);
-       int (*set_slave)(struct shdma_chan *, int, bool);
+       int (*set_slave)(struct shdma_chan *, int, dma_addr_t, bool);
        void (*setup_xfer)(struct shdma_chan *, int);
        void (*start_xfer)(struct shdma_chan *, struct shdma_desc *);
        struct shdma_desc *(*embedded_desc)(void *, int);