arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
 }
 
-static dma_addr_t __iommu_dma_map_page(struct device *dev, struct page *page,
-               unsigned long offset, size_t size, int prot)
-{
-       return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
-}
-
-static void __iommu_dma_unmap_page(struct device *dev, dma_addr_t handle,
-               size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
-       __iommu_dma_unmap(dev, handle, size);
-}
-
 static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
                unsigned long offset, size_t size, enum dma_data_direction dir,
                unsigned long attrs)
                if (!addr)
                        return NULL;
 
-               *handle = __iommu_dma_map_page(dev, page, 0, iosize, ioprot);
+               *handle = __iommu_dma_map(dev, page_to_phys(page), iosize,
+                                         ioprot);
                if (*handle == DMA_MAPPING_ERROR) {
                        if (coherent)
                                __free_pages(page, get_order(size));
                if (!page)
                        return NULL;
 
-               *handle = __iommu_dma_map_page(dev, page, 0, iosize, ioprot);
+               *handle = __iommu_dma_map(dev, page_to_phys(page), iosize, ioprot);
                if (*handle == DMA_MAPPING_ERROR) {
                        dma_release_from_contiguous(dev, page,
                                                    size >> PAGE_SHIFT);
                                arch_dma_prep_coherent(page, iosize);
                        memset(addr, 0, size);
                } else {
-                       __iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
+                       __iommu_dma_unmap(dev, *handle, iosize);
                        dma_release_from_contiguous(dev, page,
                                                    size >> PAGE_SHIFT);
                }
         * Hence how dodgy the below logic looks...
         */
        if (dma_in_atomic_pool(cpu_addr, size)) {
-               __iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
+               __iommu_dma_unmap(dev, handle, iosize);
                dma_free_from_pool(cpu_addr, size);
        } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
                struct page *page = vmalloc_to_page(cpu_addr);
 
-               __iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
+               __iommu_dma_unmap(dev, handle, iosize);
                dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
                dma_common_free_remap(cpu_addr, size, VM_USERMAP);
        } else if (is_vmalloc_addr(cpu_addr)){
                __iommu_dma_free(dev, area->pages, iosize, &handle);
                dma_common_free_remap(cpu_addr, size, VM_USERMAP);
        } else {
-               __iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
+               __iommu_dma_unmap(dev, handle, iosize);
                __free_pages(virt_to_page(cpu_addr), get_order(size));
        }
 }