if (WARN_ON(!phys))
                return;
 
+       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev))
+               arch_sync_dma_for_cpu(phys, size, dir);
+
        __iommu_dma_unmap(dev, dma_addr, size);
 
        if (unlikely(is_swiotlb_buffer(dev, phys)))
 static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
                size_t size, enum dma_data_direction dir, unsigned long attrs)
 {
-       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
        __iommu_dma_unmap_swiotlb(dev, dma_handle, size, dir, attrs);
 }
 
        struct scatterlist *tmp;
        int i;
 
-       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
-
        if (dev_is_untrusted(dev)) {
                iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs);
                return;
        }
 
+       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+               iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
+
        /*
         * The scatterlist segments are mapped into a single
         * contiguous IOVA allocation, so this is incredibly easy.