#include <linux/dma-contiguous.h>
 #include <linux/dma-noncoherent.h>
 #include <linux/pfn.h>
+#include <linux/vmalloc.h>
 #include <linux/set_memory.h>
 #include <linux/swiotlb.h>
 
        struct page *page;
        void *ret;
 
+       if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+           dma_alloc_need_uncached(dev, attrs) &&
+           !gfpflags_allow_blocking(gfp)) {
+               ret = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
+               if (!ret)
+                       return NULL;
+               goto done;
+       }
+
        page = __dma_direct_alloc_pages(dev, size, gfp, attrs);
        if (!page)
                return NULL;
                /* remove any dirty cache lines on the kernel alias */
                if (!PageHighMem(page))
                        arch_dma_prep_coherent(page, size);
-               *dma_handle = phys_to_dma(dev, page_to_phys(page));
                /* return the page pointer as the opaque cookie */
-               return page;
+               ret = page;
+               goto done;
+       }
+
+       if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+            dma_alloc_need_uncached(dev, attrs)) ||
+           (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
+               /* remove any dirty cache lines on the kernel alias */
+               arch_dma_prep_coherent(page, PAGE_ALIGN(size));
+
+               /* create a coherent mapping */
+               ret = dma_common_contiguous_remap(page, PAGE_ALIGN(size),
+                               dma_pgprot(dev, PAGE_KERNEL, attrs),
+                               __builtin_return_address(0));
+               if (!ret) {
+                       dma_free_contiguous(dev, page, size);
+                       return ret;
+               }
+
+               memset(ret, 0, size);
+               goto done;
        }
 
        if (PageHighMem(page)) {
        }
 
        ret = page_address(page);
-       if (force_dma_unencrypted(dev)) {
+       if (force_dma_unencrypted(dev))
                set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
-               *dma_handle = __phys_to_dma(dev, page_to_phys(page));
-       } else {
-               *dma_handle = phys_to_dma(dev, page_to_phys(page));
-       }
+
        memset(ret, 0, size);
 
        if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
                arch_dma_prep_coherent(page, size);
                ret = uncached_kernel_address(ret);
        }
-
+done:
+       if (force_dma_unencrypted(dev))
+               *dma_handle = __phys_to_dma(dev, page_to_phys(page));
+       else
+               *dma_handle = phys_to_dma(dev, page_to_phys(page));
        return ret;
 }
 
                return;
        }
 
+       if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+           dma_free_from_pool(cpu_addr, PAGE_ALIGN(size)))
+               return;
+
        if (force_dma_unencrypted(dev))
                set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
 
-       if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
-           dma_alloc_need_uncached(dev, attrs))
-               cpu_addr = cached_kernel_address(cpu_addr);
-       dma_free_contiguous(dev, virt_to_page(cpu_addr), size);
+       if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
+               vunmap(cpu_addr);
+
+       dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size);
 }
 
 void *dma_direct_alloc(struct device *dev, size_t size,
                dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
        if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
+           !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
            dma_alloc_need_uncached(dev, attrs))
                return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
        return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
                void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
 {
        if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
+           !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
            dma_alloc_need_uncached(dev, attrs))
                arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
        else
 
        gen_pool_free(atomic_pool, (unsigned long)start, size);
        return true;
 }
-
-void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
-               gfp_t flags, unsigned long attrs)
-{
-       struct page *page = NULL;
-       void *ret;
-
-       size = PAGE_ALIGN(size);
-
-       if (!gfpflags_allow_blocking(flags)) {
-               ret = dma_alloc_from_pool(size, &page, flags);
-               if (!ret)
-                       return NULL;
-               goto done;
-       }
-
-       page = __dma_direct_alloc_pages(dev, size, flags, attrs);
-       if (!page)
-               return NULL;
-
-       /* remove any dirty cache lines on the kernel alias */
-       arch_dma_prep_coherent(page, size);
-
-       /* create a coherent mapping */
-       ret = dma_common_contiguous_remap(page, size,
-                       dma_pgprot(dev, PAGE_KERNEL, attrs),
-                       __builtin_return_address(0));
-       if (!ret) {
-               dma_free_contiguous(dev, page, size);
-               return ret;
-       }
-
-       memset(ret, 0, size);
-done:
-       *dma_handle = phys_to_dma(dev, page_to_phys(page));
-       return ret;
-}
-
-void arch_dma_free(struct device *dev, size_t size, void *vaddr,
-               dma_addr_t dma_handle, unsigned long attrs)
-{
-       if (!dma_free_from_pool(vaddr, PAGE_ALIGN(size))) {
-               phys_addr_t phys = dma_to_phys(dev, dma_handle);
-               struct page *page = pfn_to_page(__phys_to_pfn(phys));
-
-               vunmap(vaddr);
-               dma_free_contiguous(dev, page, size);
-       }
-}
 #endif /* CONFIG_DMA_DIRECT_REMAP */