u16 (*reg_readw)(u16 __iomem *addr);
        void (*reg_writeb)(u8 value, u8 __iomem *addr);
        u8 (*reg_readb)(u8 __iomem *addr);
-       /* Allocation ops */
-       int (*dma_alloc_pages)(struct hdac_bus *bus, int type, size_t size,
-                              struct snd_dma_buffer *buf);
-       void (*dma_free_pages)(struct hdac_bus *bus,
-                              struct snd_dma_buffer *buf);
 };
 
 #define HDA_UNSOL_QUEUE_SIZE   64
        /* CORB/RIRB and position buffers */
        struct snd_dma_buffer rb;
        struct snd_dma_buffer posbuf;
+       int dma_type;                   /* SNDRV_DMA_TYPE_XXX for CORB/RIRB */
 
        /* hdac_stream linked list */
        struct list_head stream_list;
 
        return readb(addr);
 }
 
-static int hdac_ext_dma_alloc_pages(struct hdac_bus *bus, int type,
-                          size_t size, struct snd_dma_buffer *buf)
-{
-       return snd_dma_alloc_pages(type, bus->dev, size, buf);
-}
-
-static void hdac_ext_dma_free_pages(struct hdac_bus *bus, struct snd_dma_buffer *buf)
-{
-       snd_dma_free_pages(buf);
-}
-
 static const struct hdac_io_ops hdac_ext_default_io = {
        .reg_writel = hdac_ext_writel,
        .reg_readl = hdac_ext_readl,
        .reg_readw = hdac_ext_readw,
        .reg_writeb = hdac_ext_writeb,
        .reg_readb = hdac_ext_readb,
-       .dma_alloc_pages = hdac_ext_dma_alloc_pages,
-       .dma_free_pages = hdac_ext_dma_free_pages,
 };
 
 /**
 
        else
                bus->ops = &default_ops;
        bus->io_ops = io_ops;
+       bus->dma_type = SNDRV_DMA_TYPE_DEV;
        INIT_LIST_HEAD(&bus->stream_list);
        INIT_LIST_HEAD(&bus->codec_list);
        INIT_WORK(&bus->unsol_work, snd_hdac_bus_process_unsol_events);
 
 {
        struct hdac_stream *s;
        int num_streams = 0;
+       int dma_type = bus->dma_type ? bus->dma_type : SNDRV_DMA_TYPE_DEV;
        int err;
 
        list_for_each_entry(s, &bus->stream_list, list) {
                /* allocate memory for the BDL for each stream */
-               err = bus->io_ops->dma_alloc_pages(bus, SNDRV_DMA_TYPE_DEV,
-                                                  BDL_SIZE, &s->bdl);
+               err = snd_dma_alloc_pages(dma_type, bus->dev,
+                                         BDL_SIZE, &s->bdl);
                num_streams++;
                if (err < 0)
                        return -ENOMEM;
        if (WARN_ON(!num_streams))
                return -EINVAL;
        /* allocate memory for the position buffer */
-       err = bus->io_ops->dma_alloc_pages(bus, SNDRV_DMA_TYPE_DEV,
-                                          num_streams * 8, &bus->posbuf);
+       err = snd_dma_alloc_pages(dma_type, bus->dev,
+                                 num_streams * 8, &bus->posbuf);
        if (err < 0)
                return -ENOMEM;
        list_for_each_entry(s, &bus->stream_list, list)
                s->posbuf = (__le32 *)(bus->posbuf.area + s->index * 8);
 
        /* single page (at least 4096 bytes) must suffice for both ringbuffes */
-       return bus->io_ops->dma_alloc_pages(bus, SNDRV_DMA_TYPE_DEV,
-                                           PAGE_SIZE, &bus->rb);
+       return snd_dma_alloc_pages(dma_type, bus->dev, PAGE_SIZE, &bus->rb);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_bus_alloc_stream_pages);
 
 
        list_for_each_entry(s, &bus->stream_list, list) {
                if (s->bdl.area)
-                       bus->io_ops->dma_free_pages(bus, &s->bdl);
+                       snd_dma_free_pages(&s->bdl);
        }
 
        if (bus->rb.area)
-               bus->io_ops->dma_free_pages(bus, &bus->rb);
+               snd_dma_free_pages(&bus->rb);
        if (bus->posbuf.area)
-               bus->io_ops->dma_free_pages(bus, &bus->posbuf);
+               snd_dma_free_pages(&bus->posbuf);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_bus_free_stream_pages);
 
        azx_dev->locked = true;
        spin_unlock_irq(&bus->reg_lock);
 
-       err = bus->io_ops->dma_alloc_pages(bus, SNDRV_DMA_TYPE_DEV_SG,
-                                          byte_size, bufp);
+       err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, bus->dev,
+                                 byte_size, bufp);
        if (err < 0)
                goto err_alloc;
 
        return azx_dev->stream_tag;
 
  error:
-       bus->io_ops->dma_free_pages(bus, bufp);
+       snd_dma_free_pages(bufp);
  err_alloc:
        spin_lock_irq(&bus->reg_lock);
        azx_dev->locked = false;
        azx_dev->period_bytes = 0;
        azx_dev->format_val = 0;
 
-       bus->io_ops->dma_free_pages(bus, dmab);
+       snd_dma_free_pages(dmab);
        dmab->area = NULL;
 
        spin_lock_irq(&bus->reg_lock);
 
                return err;
        }
 
+       /* use the non-cached pages in non-snoop mode */
+       if (!azx_snoop(chip))
+               azx_bus(chip)->dma_type = SNDRV_DMA_TYPE_DEV_UC;
+
        /* Workaround for a communication error on CFL (bko#199007) and CNL */
        if (IS_CFL(pci) || IS_CNL(pci))
                azx_bus(chip)->polling_mode = 1;
        return 0;
 }
 
-/* DMA page allocation helpers.  */
-static int dma_alloc_pages(struct hdac_bus *bus,
-                          int type,
-                          size_t size,
-                          struct snd_dma_buffer *buf)
-{
-       struct azx *chip = bus_to_azx(bus);
-
-       if (!azx_snoop(chip) && type == SNDRV_DMA_TYPE_DEV)
-               type = SNDRV_DMA_TYPE_DEV_UC;
-       return snd_dma_alloc_pages(type, bus->dev, size, buf);
-}
-
-static void dma_free_pages(struct hdac_bus *bus, struct snd_dma_buffer *buf)
-{
-       snd_dma_free_pages(buf);
-}
-
 static void pcm_mmap_prepare(struct snd_pcm_substream *substream,
                             struct vm_area_struct *area)
 {
        .reg_readw = pci_azx_readw,
        .reg_writeb = pci_azx_writeb,
        .reg_readb = pci_azx_readb,
-       .dma_alloc_pages = dma_alloc_pages,
-       .dma_free_pages = dma_free_pages,
 };
 
 static const struct hda_controller_ops pci_hda_ops = {
 
 #define power_save     0
 #endif
 
-/*
- * DMA page allocation ops.
- */
-static int dma_alloc_pages(struct hdac_bus *bus, int type, size_t size,
-                          struct snd_dma_buffer *buf)
-{
-       return snd_dma_alloc_pages(type, bus->dev, size, buf);
-}
-
-static void dma_free_pages(struct hdac_bus *bus, struct snd_dma_buffer *buf)
-{
-       snd_dma_free_pages(buf);
-}
-
 /*
  * Register access ops. Tegra HDA register access is DWORD only.
  */
        .reg_readw = hda_tegra_readw,
        .reg_writeb = hda_tegra_writeb,
        .reg_readb = hda_tegra_readb,
-       .dma_alloc_pages = dma_alloc_pages,
-       .dma_free_pages = dma_free_pages,
 };
 
 static const struct hda_controller_ops hda_tegra_ops; /* nothing special */
 
 static int skl_alloc_dma_buf(struct device *dev,
                struct snd_dma_buffer *dmab, size_t size)
 {
-       struct hdac_bus *bus = dev_get_drvdata(dev);
-
-       if (!bus)
-               return -ENODEV;
-
-       return  bus->io_ops->dma_alloc_pages(bus, SNDRV_DMA_TYPE_DEV, size, dmab);
+       return snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dev, size, dmab);
 }
 
 static int skl_free_dma_buf(struct device *dev, struct snd_dma_buffer *dmab)
 {
-       struct hdac_bus *bus = dev_get_drvdata(dev);
-
-       if (!bus)
-               return -ENODEV;
-
-       bus->io_ops->dma_free_pages(bus, dmab);
-
+       snd_dma_free_pages(dmab);
        return 0;
 }
 
 
        return readb(addr);
 }
 
-static int sof_hda_dma_alloc_pages(struct hdac_bus *bus, int type,
-                                  size_t size, struct snd_dma_buffer *buf)
-{
-       return snd_dma_alloc_pages(type, bus->dev, size, buf);
-}
-
-static void sof_hda_dma_free_pages(struct hdac_bus *bus,
-                                  struct snd_dma_buffer *buf)
-{
-       snd_dma_free_pages(buf);
-}
-
 static const struct hdac_io_ops io_ops = {
        .reg_writel = sof_hda_writel,
        .reg_readl = sof_hda_readl,
        .reg_readw = sof_hda_readw,
        .reg_writeb = sof_hda_writeb,
        .reg_readb = sof_hda_readb,
-       .dma_alloc_pages = sof_hda_dma_alloc_pages,
-       .dma_free_pages = sof_hda_dma_free_pages,
 };
 
 /*