static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
 {
+       struct dw_edma_chip *chip = desc->chan->dw->chip;
        struct dw_edma_chan *chan = desc->chan;
-       struct dw_edma *dw = chan->chip->dw;
        struct dw_edma_chunk *chunk;
 
        chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT);
         */
        chunk->cb = !(desc->chunks_alloc % 2);
        if (chan->dir == EDMA_DIR_WRITE) {
-               chunk->ll_region.paddr = dw->ll_region_wr[chan->id].paddr;
-               chunk->ll_region.vaddr = dw->ll_region_wr[chan->id].vaddr;
+               chunk->ll_region.paddr = chip->ll_region_wr[chan->id].paddr;
+               chunk->ll_region.vaddr = chip->ll_region_wr[chan->id].vaddr;
        } else {
-               chunk->ll_region.paddr = dw->ll_region_rd[chan->id].paddr;
-               chunk->ll_region.vaddr = dw->ll_region_rd[chan->id].vaddr;
+               chunk->ll_region.paddr = chip->ll_region_rd[chan->id].paddr;
+               chunk->ll_region.vaddr = chip->ll_region_rd[chan->id].vaddr;
        }
 
        if (desc->chunk) {
        if (chan->status != EDMA_ST_IDLE)
                return -EBUSY;
 
-       pm_runtime_get(chan->chip->dev);
+       pm_runtime_get(chan->dw->chip->dev);
 
        return 0;
 }
                cpu_relax();
        }
 
-       pm_runtime_put(chan->chip->dev);
+       pm_runtime_put(chan->dw->chip->dev);
 }
 
-static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
+static int dw_edma_channel_setup(struct dw_edma *dw, bool write,
                                 u32 wr_alloc, u32 rd_alloc)
 {
+       struct dw_edma_chip *chip = dw->chip;
        struct dw_edma_region *dt_region;
        struct device *dev = chip->dev;
-       struct dw_edma *dw = chip->dw;
        struct dw_edma_chan *chan;
        struct dw_edma_irq *irq;
        struct dma_device *dma;
 
                chan->vc.chan.private = dt_region;
 
-               chan->chip = chip;
+               chan->dw = dw;
                chan->id = j;
                chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ;
                chan->configured = false;
                chan->status = EDMA_ST_IDLE;
 
                if (write)
-                       chan->ll_max = (dw->ll_region_wr[j].sz / EDMA_LL_SZ);
+                       chan->ll_max = (chip->ll_region_wr[j].sz / EDMA_LL_SZ);
                else
-                       chan->ll_max = (dw->ll_region_rd[j].sz / EDMA_LL_SZ);
+                       chan->ll_max = (chip->ll_region_rd[j].sz / EDMA_LL_SZ);
                chan->ll_max -= 1;
 
                dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n",
                vchan_init(&chan->vc, dma);
 
                if (write) {
-                       dt_region->paddr = dw->dt_region_wr[j].paddr;
-                       dt_region->vaddr = dw->dt_region_wr[j].vaddr;
-                       dt_region->sz = dw->dt_region_wr[j].sz;
+                       dt_region->paddr = chip->dt_region_wr[j].paddr;
+                       dt_region->vaddr = chip->dt_region_wr[j].vaddr;
+                       dt_region->sz = chip->dt_region_wr[j].sz;
                } else {
-                       dt_region->paddr = dw->dt_region_rd[j].paddr;
-                       dt_region->vaddr = dw->dt_region_rd[j].vaddr;
-                       dt_region->sz = dw->dt_region_rd[j].sz;
+                       dt_region->paddr = chip->dt_region_rd[j].paddr;
+                       dt_region->vaddr = chip->dt_region_rd[j].vaddr;
+                       dt_region->sz = chip->dt_region_rd[j].sz;
                }
 
                dw_edma_v0_core_device_config(chan);
                (*mask)++;
 }
 
-static int dw_edma_irq_request(struct dw_edma_chip *chip,
+static int dw_edma_irq_request(struct dw_edma *dw,
                               u32 *wr_alloc, u32 *rd_alloc)
 {
-       struct device *dev = chip->dev;
-       struct dw_edma *dw = chip->dw;
+       struct dw_edma_chip *chip = dw->chip;
+       struct device *dev = dw->chip->dev;
        u32 wr_mask = 1;
        u32 rd_mask = 1;
        int i, err = 0;
 
        ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
 
-       if (dw->nr_irqs < 1)
+       if (chip->nr_irqs < 1 || !chip->ops->irq_vector)
                return -EINVAL;
 
-       if (dw->nr_irqs == 1) {
+       dw->irq = devm_kcalloc(dev, chip->nr_irqs, sizeof(*dw->irq), GFP_KERNEL);
+       if (!dw->irq)
+               return -ENOMEM;
+
+       if (chip->nr_irqs == 1) {
                /* Common IRQ shared among all channels */
-               irq = dw->ops->irq_vector(dev, 0);
+               irq = chip->ops->irq_vector(dev, 0);
                err = request_irq(irq, dw_edma_interrupt_common,
                                  IRQF_SHARED, dw->name, &dw->irq[0]);
                if (err) {
 
                if (irq_get_msi_desc(irq))
                        get_cached_msi_msg(irq, &dw->irq[0].msi);
+
+               dw->nr_irqs = 1;
        } else {
                /* Distribute IRQs equally among all channels */
-               int tmp = dw->nr_irqs;
+               int tmp = chip->nr_irqs;
 
                while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) {
                        dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt);
                dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt);
 
                for (i = 0; i < (*wr_alloc + *rd_alloc); i++) {
-                       irq = dw->ops->irq_vector(dev, i);
+                       irq = chip->ops->irq_vector(dev, i);
                        err = request_irq(irq,
                                          i < *wr_alloc ?
                                                dw_edma_interrupt_write :
                return -EINVAL;
 
        dev = chip->dev;
-       if (!dev)
+       if (!dev || !chip->ops)
                return -EINVAL;
 
-       dw = chip->dw;
-       if (!dw || !dw->irq || !dw->ops || !dw->ops->irq_vector)
-               return -EINVAL;
+       dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL);
+       if (!dw)
+               return -ENOMEM;
+
+       dw->chip = chip;
 
        raw_spin_lock_init(&dw->lock);
 
-       dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt,
+       dw->wr_ch_cnt = min_t(u16, chip->wr_ch_cnt,
                              dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE));
        dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt, EDMA_MAX_WR_CH);
 
-       dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt,
+       dw->rd_ch_cnt = min_t(u16, chip->rd_ch_cnt,
                              dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ));
        dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt, EDMA_MAX_RD_CH);
 
        dw_edma_v0_core_off(dw);
 
        /* Request IRQs */
-       err = dw_edma_irq_request(chip, &wr_alloc, &rd_alloc);
+       err = dw_edma_irq_request(dw, &wr_alloc, &rd_alloc);
        if (err)
                return err;
 
        /* Setup write channels */
-       err = dw_edma_channel_setup(chip, true, wr_alloc, rd_alloc);
+       err = dw_edma_channel_setup(dw, true, wr_alloc, rd_alloc);
        if (err)
                goto err_irq_free;
 
        /* Setup read channels */
-       err = dw_edma_channel_setup(chip, false, wr_alloc, rd_alloc);
+       err = dw_edma_channel_setup(dw, false, wr_alloc, rd_alloc);
        if (err)
                goto err_irq_free;
 
        pm_runtime_enable(dev);
 
        /* Turn debugfs on */
-       dw_edma_v0_core_debugfs_on(chip);
+       dw_edma_v0_core_debugfs_on(dw);
+
+       chip->dw = dw;
 
        return 0;
 
 err_irq_free:
        for (i = (dw->nr_irqs - 1); i >= 0; i--)
-               free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]);
-
-       dw->nr_irqs = 0;
+               free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
 
        return err;
 }
 
        /* Free irqs */
        for (i = (dw->nr_irqs - 1); i >= 0; i--)
-               free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]);
+               free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
 
        /* Power management */
        pm_runtime_disable(dev);
        }
 
        /* Turn debugfs off */
-       dw_edma_v0_core_debugfs_off(chip);
+       dw_edma_v0_core_debugfs_off(dw);
 
        return 0;
 }
 
 #include "../virt-dma.h"
 
 #define EDMA_LL_SZ                                     24
-#define EDMA_MAX_WR_CH                                 8
-#define EDMA_MAX_RD_CH                                 8
 
 enum dw_edma_dir {
        EDMA_DIR_WRITE = 0,
        EDMA_DIR_READ
 };
 
-enum dw_edma_map_format {
-       EDMA_MF_EDMA_LEGACY = 0x0,
-       EDMA_MF_EDMA_UNROLL = 0x1,
-       EDMA_MF_HDMA_COMPAT = 0x5
-};
-
 enum dw_edma_request {
        EDMA_REQ_NONE = 0,
        EDMA_REQ_STOP,
        u32                             sz;
 };
 
-struct dw_edma_region {
-       phys_addr_t                     paddr;
-       void                            __iomem *vaddr;
-       size_t                          sz;
-};
-
 struct dw_edma_chunk {
        struct list_head                list;
        struct dw_edma_chan             *chan;
 
 struct dw_edma_chan {
        struct virt_dma_chan            vc;
-       struct dw_edma_chip             *chip;
+       struct dw_edma                  *dw;
        int                             id;
        enum dw_edma_dir                dir;
 
        struct dw_edma                  *dw;
 };
 
-struct dw_edma_core_ops {
-       int     (*irq_vector)(struct device *dev, unsigned int nr);
-};
-
 struct dw_edma {
        char                            name[20];
 
        struct dma_device               rd_edma;
        u16                             rd_ch_cnt;
 
-       struct dw_edma_region           rg_region;      /* Registers */
-       struct dw_edma_region           ll_region_wr[EDMA_MAX_WR_CH];
-       struct dw_edma_region           ll_region_rd[EDMA_MAX_RD_CH];
-       struct dw_edma_region           dt_region_wr[EDMA_MAX_WR_CH];
-       struct dw_edma_region           dt_region_rd[EDMA_MAX_RD_CH];
-
        struct dw_edma_irq              *irq;
        int                             nr_irqs;
 
-       enum dw_edma_map_format         mf;
-
        struct dw_edma_chan             *chan;
-       const struct dw_edma_core_ops   *ops;
 
        raw_spinlock_t                  lock;           /* Only for legacy */
+
+       struct dw_edma_chip             *chip;
 #ifdef CONFIG_DEBUG_FS
        struct dentry                   *debugfs;
 #endif /* CONFIG_DEBUG_FS */
 
        struct dw_edma_pcie_data vsec_data;
        struct device *dev = &pdev->dev;
        struct dw_edma_chip *chip;
-       struct dw_edma *dw;
        int err, nr_irqs;
        int i, mask;
 
        if (!chip)
                return -ENOMEM;
 
-       dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL);
-       if (!dw)
-               return -ENOMEM;
-
        /* IRQs allocation */
        nr_irqs = pci_alloc_irq_vectors(pdev, 1, vsec_data.irqs,
                                        PCI_IRQ_MSI | PCI_IRQ_MSIX);
        }
 
        /* Data structure initialization */
-       chip->dw = dw;
        chip->dev = dev;
        chip->id = pdev->devfn;
 
-       dw->mf = vsec_data.mf;
-       dw->nr_irqs = nr_irqs;
-       dw->ops = &dw_edma_pcie_core_ops;
-       dw->wr_ch_cnt = vsec_data.wr_ch_cnt;
-       dw->rd_ch_cnt = vsec_data.rd_ch_cnt;
+       chip->mf = vsec_data.mf;
+       chip->nr_irqs = nr_irqs;
+       chip->ops = &dw_edma_pcie_core_ops;
 
-       dw->rg_region.vaddr = pcim_iomap_table(pdev)[vsec_data.rg.bar];
-       if (!dw->rg_region.vaddr)
-               return -ENOMEM;
+       chip->wr_ch_cnt = vsec_data.wr_ch_cnt;
+       chip->rd_ch_cnt = vsec_data.rd_ch_cnt;
 
-       dw->rg_region.vaddr += vsec_data.rg.off;
-       dw->rg_region.paddr = pdev->resource[vsec_data.rg.bar].start;
-       dw->rg_region.paddr += vsec_data.rg.off;
-       dw->rg_region.sz = vsec_data.rg.sz;
+       chip->rg_region.vaddr = pcim_iomap_table(pdev)[vsec_data.rg.bar];
+       if (!chip->rg_region.vaddr)
+               return -ENOMEM;
 
-       for (i = 0; i < dw->wr_ch_cnt; i++) {
-               struct dw_edma_region *ll_region = &dw->ll_region_wr[i];
-               struct dw_edma_region *dt_region = &dw->dt_region_wr[i];
+       for (i = 0; i < chip->wr_ch_cnt; i++) {
+               struct dw_edma_region *ll_region = &chip->ll_region_wr[i];
+               struct dw_edma_region *dt_region = &chip->dt_region_wr[i];
                struct dw_edma_block *ll_block = &vsec_data.ll_wr[i];
                struct dw_edma_block *dt_block = &vsec_data.dt_wr[i];
 
                dt_region->sz = dt_block->sz;
        }
 
-       for (i = 0; i < dw->rd_ch_cnt; i++) {
-               struct dw_edma_region *ll_region = &dw->ll_region_rd[i];
-               struct dw_edma_region *dt_region = &dw->dt_region_rd[i];
+       for (i = 0; i < chip->rd_ch_cnt; i++) {
+               struct dw_edma_region *ll_region = &chip->ll_region_rd[i];
+               struct dw_edma_region *dt_region = &chip->dt_region_rd[i];
                struct dw_edma_block *ll_block = &vsec_data.ll_rd[i];
                struct dw_edma_block *dt_block = &vsec_data.dt_rd[i];
 
        }
 
        /* Debug info */
-       if (dw->mf == EDMA_MF_EDMA_LEGACY)
-               pci_dbg(pdev, "Version:\teDMA Port Logic (0x%x)\n", dw->mf);
-       else if (dw->mf == EDMA_MF_EDMA_UNROLL)
-               pci_dbg(pdev, "Version:\teDMA Unroll (0x%x)\n", dw->mf);
-       else if (dw->mf == EDMA_MF_HDMA_COMPAT)
-               pci_dbg(pdev, "Version:\tHDMA Compatible (0x%x)\n", dw->mf);
+       if (chip->mf == EDMA_MF_EDMA_LEGACY)
+               pci_dbg(pdev, "Version:\teDMA Port Logic (0x%x)\n", chip->mf);
+       else if (chip->mf == EDMA_MF_EDMA_UNROLL)
+               pci_dbg(pdev, "Version:\teDMA Unroll (0x%x)\n", chip->mf);
+       else if (chip->mf == EDMA_MF_HDMA_COMPAT)
+               pci_dbg(pdev, "Version:\tHDMA Compatible (0x%x)\n", chip->mf);
        else
-               pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", dw->mf);
+               pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", chip->mf);
 
-       pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
+       pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p)\n",
                vsec_data.rg.bar, vsec_data.rg.off, vsec_data.rg.sz,
-               dw->rg_region.vaddr, &dw->rg_region.paddr);
+               chip->rg_region.vaddr);
 
 
-       for (i = 0; i < dw->wr_ch_cnt; i++) {
+       for (i = 0; i < chip->wr_ch_cnt; i++) {
                pci_dbg(pdev, "L. List:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
                        i, vsec_data.ll_wr[i].bar,
-                       vsec_data.ll_wr[i].off, dw->ll_region_wr[i].sz,
-                       dw->ll_region_wr[i].vaddr, &dw->ll_region_wr[i].paddr);
+                       vsec_data.ll_wr[i].off, chip->ll_region_wr[i].sz,
+                       chip->ll_region_wr[i].vaddr, &chip->ll_region_wr[i].paddr);
 
                pci_dbg(pdev, "Data:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
                        i, vsec_data.dt_wr[i].bar,
-                       vsec_data.dt_wr[i].off, dw->dt_region_wr[i].sz,
-                       dw->dt_region_wr[i].vaddr, &dw->dt_region_wr[i].paddr);
+                       vsec_data.dt_wr[i].off, chip->dt_region_wr[i].sz,
+                       chip->dt_region_wr[i].vaddr, &chip->dt_region_wr[i].paddr);
        }
 
-       for (i = 0; i < dw->rd_ch_cnt; i++) {
+       for (i = 0; i < chip->rd_ch_cnt; i++) {
                pci_dbg(pdev, "L. List:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
                        i, vsec_data.ll_rd[i].bar,
-                       vsec_data.ll_rd[i].off, dw->ll_region_rd[i].sz,
-                       dw->ll_region_rd[i].vaddr, &dw->ll_region_rd[i].paddr);
+                       vsec_data.ll_rd[i].off, chip->ll_region_rd[i].sz,
+                       chip->ll_region_rd[i].vaddr, &chip->ll_region_rd[i].paddr);
 
                pci_dbg(pdev, "Data:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
                        i, vsec_data.dt_rd[i].bar,
-                       vsec_data.dt_rd[i].off, dw->dt_region_rd[i].sz,
-                       dw->dt_region_rd[i].vaddr, &dw->dt_region_rd[i].paddr);
+                       vsec_data.dt_rd[i].off, chip->dt_region_rd[i].sz,
+                       chip->dt_region_rd[i].vaddr, &chip->dt_region_rd[i].paddr);
        }
 
-       pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs);
+       pci_dbg(pdev, "Nr. IRQs:\t%u\n", chip->nr_irqs);
 
        /* Validating if PCI interrupts were enabled */
        if (!pci_dev_msi_enabled(pdev)) {
                return -EPERM;
        }
 
-       dw->irq = devm_kcalloc(dev, nr_irqs, sizeof(*dw->irq), GFP_KERNEL);
-       if (!dw->irq)
-               return -ENOMEM;
-
        /* Starting eDMA driver */
        err = dw_edma_probe(chip);
        if (err) {
 
 
 static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
 {
-       return dw->rg_region.vaddr;
+       return dw->chip->rg_region.vaddr;
 }
 
 #define SET_32(dw, name, value)                                \
 static inline struct dw_edma_v0_ch_regs __iomem *
 __dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
 {
-       if (dw->mf == EDMA_MF_EDMA_LEGACY)
+       if (dw->chip->mf == EDMA_MF_EDMA_LEGACY)
                return &(__dw_regs(dw)->type.legacy.ch);
 
        if (dir == EDMA_DIR_WRITE)
 static inline void writel_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
                             u32 value, void __iomem *addr)
 {
-       if (dw->mf == EDMA_MF_EDMA_LEGACY) {
+       if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
                u32 viewport_sel;
                unsigned long flags;
 
 {
        u32 value;
 
-       if (dw->mf == EDMA_MF_EDMA_LEGACY) {
+       if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
                u32 viewport_sel;
                unsigned long flags;
 
 static inline void writeq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
                             u64 value, void __iomem *addr)
 {
-       if (dw->mf == EDMA_MF_EDMA_LEGACY) {
+       if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
                u32 viewport_sel;
                unsigned long flags;
 
 {
        u32 value;
 
-       if (dw->mf == EDMA_MF_EDMA_LEGACY) {
+       if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
                u32 viewport_sel;
                unsigned long flags;
 
 
 enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan)
 {
-       struct dw_edma *dw = chan->chip->dw;
+       struct dw_edma *dw = chan->dw;
        u32 tmp;
 
        tmp = FIELD_GET(EDMA_V0_CH_STATUS_MASK,
 
 void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan)
 {
-       struct dw_edma *dw = chan->chip->dw;
+       struct dw_edma *dw = chan->dw;
 
        SET_RW_32(dw, chan->dir, int_clear,
                  FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)));
 
 void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan)
 {
-       struct dw_edma *dw = chan->chip->dw;
+       struct dw_edma *dw = chan->dw;
 
        SET_RW_32(dw, chan->dir, int_clear,
                  FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)));
 void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
 {
        struct dw_edma_chan *chan = chunk->chan;
-       struct dw_edma *dw = chan->chip->dw;
+       struct dw_edma *dw = chan->dw;
        u32 tmp;
 
        dw_edma_v0_core_write_chunk(chunk);
        if (first) {
                /* Enable engine */
                SET_RW_32(dw, chan->dir, engine_en, BIT(0));
-               if (dw->mf == EDMA_MF_HDMA_COMPAT) {
+               if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
                        switch (chan->id) {
                        case 0:
                                SET_RW_COMPAT(dw, chan->dir, ch0_pwr_en,
 
 int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
 {
-       struct dw_edma *dw = chan->chip->dw;
+       struct dw_edma *dw = chan->dw;
        u32 tmp = 0;
 
        /* MSI done addr - low, high */
 }
 
 /* eDMA debugfs callbacks */
-void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip)
+void dw_edma_v0_core_debugfs_on(struct dw_edma *dw)
 {
-       dw_edma_v0_debugfs_on(chip);
+       dw_edma_v0_debugfs_on(dw);
 }
 
-void dw_edma_v0_core_debugfs_off(struct dw_edma_chip *chip)
+void dw_edma_v0_core_debugfs_off(struct dw_edma *dw)
 {
-       dw_edma_v0_debugfs_off(chip);
+       dw_edma_v0_debugfs_off(dw);
 }
 
 void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first);
 int dw_edma_v0_core_device_config(struct dw_edma_chan *chan);
 /* eDMA debug fs callbacks */
-void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip);
-void dw_edma_v0_core_debugfs_off(struct dw_edma_chip *chip);
+void dw_edma_v0_core_debugfs_on(struct dw_edma *dw);
+void dw_edma_v0_core_debugfs_off(struct dw_edma *dw);
 
 #endif /* _DW_EDMA_V0_CORE_H */
 
 static int dw_edma_debugfs_u32_get(void *data, u64 *val)
 {
        void __iomem *reg = (void __force __iomem *)data;
-       if (dw->mf == EDMA_MF_EDMA_LEGACY &&
+       if (dw->chip->mf == EDMA_MF_EDMA_LEGACY &&
            reg >= (void __iomem *)®s->type.legacy.ch) {
                void __iomem *ptr = ®s->type.legacy.ch;
                u32 viewport_sel = 0;
        nr_entries = ARRAY_SIZE(debugfs_regs);
        dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
 
-       if (dw->mf == EDMA_MF_HDMA_COMPAT) {
+       if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
                nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
                dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
                                           regs_dir);
        nr_entries = ARRAY_SIZE(debugfs_regs);
        dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
 
-       if (dw->mf == EDMA_MF_HDMA_COMPAT) {
+       if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
                nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
                dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
                                           regs_dir);
        dw_edma_debugfs_regs_rd(regs_dir);
 }
 
-void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
+void dw_edma_v0_debugfs_on(struct dw_edma *_dw)
 {
-       dw = chip->dw;
+       dw = _dw;
        if (!dw)
                return;
 
-       regs = dw->rg_region.vaddr;
+       regs = dw->chip->rg_region.vaddr;
        if (!regs)
                return;
 
        if (!dw->debugfs)
                return;
 
-       debugfs_create_u32("mf", 0444, dw->debugfs, &dw->mf);
+       debugfs_create_u32("mf", 0444, dw->debugfs, &dw->chip->mf);
        debugfs_create_u16("wr_ch_cnt", 0444, dw->debugfs, &dw->wr_ch_cnt);
        debugfs_create_u16("rd_ch_cnt", 0444, dw->debugfs, &dw->rd_ch_cnt);
 
        dw_edma_debugfs_regs();
 }
 
-void dw_edma_v0_debugfs_off(struct dw_edma_chip *chip)
+void dw_edma_v0_debugfs_off(struct dw_edma *_dw)
 {
-       dw = chip->dw;
+       dw = _dw;
        if (!dw)
                return;
 
 
 #include <linux/dma/edma.h>
 
 #ifdef CONFIG_DEBUG_FS
-void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip);
-void dw_edma_v0_debugfs_off(struct dw_edma_chip *chip);
+void dw_edma_v0_debugfs_on(struct dw_edma *dw);
+void dw_edma_v0_debugfs_off(struct dw_edma *dw);
 #else
-static inline void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
+static inline void dw_edma_v0_debugfs_on(struct dw_edma *dw)
 {
 }
 
-static inline void dw_edma_v0_debugfs_off(struct dw_edma_chip *chip)
+static inline void dw_edma_v0_debugfs_off(struct dw_edma *dw)
 {
 }
 #endif /* CONFIG_DEBUG_FS */
 
 #include <linux/device.h>
 #include <linux/dmaengine.h>
 
+#define EDMA_MAX_WR_CH                                  8
+#define EDMA_MAX_RD_CH                                  8
+
 struct dw_edma;
 
+struct dw_edma_region {
+       phys_addr_t     paddr;
+       void __iomem    *vaddr;
+       size_t          sz;
+};
+
+struct dw_edma_core_ops {
+       int (*irq_vector)(struct device *dev, unsigned int nr);
+};
+
+enum dw_edma_map_format {
+       EDMA_MF_EDMA_LEGACY = 0x0,
+       EDMA_MF_EDMA_UNROLL = 0x1,
+       EDMA_MF_HDMA_COMPAT = 0x5
+};
+
 /**
  * struct dw_edma_chip - representation of DesignWare eDMA controller hardware
  * @dev:                struct device of the eDMA controller
  * @id:                         instance ID
- * @dw:                         struct dw_edma that is filed by dw_edma_probe()
+ * @nr_irqs:            total number of DMA IRQs
+ * @ops                         DMA channel to IRQ number mapping
+ * @wr_ch_cnt           DMA write channel number
+ * @rd_ch_cnt           DMA read channel number
+ * @rg_region           DMA register region
+ * @ll_region_wr        DMA descriptor link list memory for write channel
+ * @ll_region_rd        DMA descriptor link list memory for read channel
+ * @dt_region_wr        DMA data memory for write channel
+ * @dt_region_rd        DMA data memory for read channel
+ * @mf                  DMA register map format
+ * @dw:                         struct dw_edma that is filled by dw_edma_probe()
  */
 struct dw_edma_chip {
        struct device           *dev;
        int                     id;
+       int                     nr_irqs;
+       const struct dw_edma_core_ops   *ops;
+
+       struct dw_edma_region   rg_region;
+
+       u16                     wr_ch_cnt;
+       u16                     rd_ch_cnt;
+       /* link list address */
+       struct dw_edma_region   ll_region_wr[EDMA_MAX_WR_CH];
+       struct dw_edma_region   ll_region_rd[EDMA_MAX_RD_CH];
+
+       /* data region */
+       struct dw_edma_region   dt_region_wr[EDMA_MAX_WR_CH];
+       struct dw_edma_region   dt_region_rd[EDMA_MAX_RD_CH];
+
+       enum dw_edma_map_format mf;
+
        struct dw_edma          *dw;
 };