struct scatterlist *sg;
        int i;
 
-       if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
-               return;
-
-       for_each_sg(sgl, sg, nelems, i) {
-               if (!dev_is_dma_coherent(dev))
+       if (dev_is_untrusted(dev))
+               for_each_sg(sgl, sg, nelems, i)
+                       iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
+                                                     sg->length, dir);
+       else if (!dev_is_dma_coherent(dev))
+               for_each_sg(sgl, sg, nelems, i)
                        arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
-
-               if (is_swiotlb_buffer(dev, sg_phys(sg)))
-                       swiotlb_sync_single_for_cpu(dev, sg_phys(sg),
-                                                   sg->length, dir);
-       }
 }
 
 static void iommu_dma_sync_sg_for_device(struct device *dev,
        struct scatterlist *sg;
        int i;
 
-       if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
-               return;
-
-       for_each_sg(sgl, sg, nelems, i) {
-               if (is_swiotlb_buffer(dev, sg_phys(sg)))
-                       swiotlb_sync_single_for_device(dev, sg_phys(sg),
-                                                      sg->length, dir);
-
-               if (!dev_is_dma_coherent(dev))
+       if (dev_is_untrusted(dev))
+               for_each_sg(sgl, sg, nelems, i)
+                       iommu_dma_sync_single_for_device(dev,
+                                                        sg_dma_address(sg),
+                                                        sg->length, dir);
+       else if (!dev_is_dma_coherent(dev))
+               for_each_sg(sgl, sg, nelems, i)
                        arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
-       }
 }
 
 static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,