swiotlb: split swiotlb_tbl_sync_single
authorChristoph Hellwig <hch@lst.de>
Mon, 1 Mar 2021 07:44:26 +0000 (08:44 +0100)
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Wed, 17 Mar 2021 00:32:01 +0000 (00:32 +0000)
Split swiotlb_tbl_sync_single into two separate funtions for the to device
and to cpu synchronization.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
drivers/iommu/dma-iommu.c
drivers/xen/swiotlb-xen.c
include/linux/swiotlb.h
kernel/dma/direct.c
kernel/dma/direct.h
kernel/dma/swiotlb.c

index 9149597410e28f1560a50a1312b4171782d66026..3087d9fa6065cfad9c2760d850482590a97b9cab 100644 (file)
@@ -750,7 +750,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev,
                arch_sync_dma_for_cpu(phys, size, dir);
 
        if (is_swiotlb_buffer(phys))
-               swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_CPU);
+               swiotlb_sync_single_for_cpu(dev, phys, size, dir);
 }
 
 static void iommu_dma_sync_single_for_device(struct device *dev,
@@ -763,7 +763,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev,
 
        phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
        if (is_swiotlb_buffer(phys))
-               swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_DEVICE);
+               swiotlb_sync_single_for_device(dev, phys, size, dir);
 
        if (!dev_is_dma_coherent(dev))
                arch_sync_dma_for_device(phys, size, dir);
@@ -784,8 +784,8 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
                        arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
 
                if (is_swiotlb_buffer(sg_phys(sg)))
-                       swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
-                                               dir, SYNC_FOR_CPU);
+                       swiotlb_sync_single_for_cpu(dev, sg_phys(sg),
+                                                   sg->length, dir);
        }
 }
 
@@ -801,8 +801,8 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
 
        for_each_sg(sgl, sg, nelems, i) {
                if (is_swiotlb_buffer(sg_phys(sg)))
-                       swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
-                                               dir, SYNC_FOR_DEVICE);
+                       swiotlb_sync_single_for_device(dev, sg_phys(sg),
+                                                      sg->length, dir);
 
                if (!dev_is_dma_coherent(dev))
                        arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
index d47f1b311caac0101b49559df143528cff3b7a99..4e8a4e14942afd66f0c69bbfd2fb0ac305581c2c 100644 (file)
@@ -462,7 +462,7 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
        }
 
        if (is_xen_swiotlb_buffer(dev, dma_addr))
-               swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
+               swiotlb_sync_single_for_cpu(dev, paddr, size, dir);
 }
 
 static void
@@ -472,7 +472,7 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
        phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
 
        if (is_xen_swiotlb_buffer(dev, dma_addr))
-               swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
+               swiotlb_sync_single_for_device(dev, paddr, size, dir);
 
        if (!dev_is_dma_coherent(dev)) {
                if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
index 59f421d041ed9e082a759fbd296def6205d23950..0696bdc8072e97876a51156fc8c11d84e675a2f1 100644 (file)
@@ -42,14 +42,6 @@ extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
 extern int swiotlb_late_init_with_default_size(size_t default_size);
 extern void __init swiotlb_update_mem_attributes(void);
 
-/*
- * Enumeration for sync targets
- */
-enum dma_sync_target {
-       SYNC_FOR_CPU = 0,
-       SYNC_FOR_DEVICE = 1,
-};
-
 phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
                size_t mapping_size, size_t alloc_size,
                enum dma_data_direction dir, unsigned long attrs);
@@ -60,11 +52,10 @@ extern void swiotlb_tbl_unmap_single(struct device *hwdev,
                                     enum dma_data_direction dir,
                                     unsigned long attrs);
 
-extern void swiotlb_tbl_sync_single(struct device *hwdev,
-                                   phys_addr_t tlb_addr,
-                                   size_t size, enum dma_data_direction dir,
-                                   enum dma_sync_target target);
-
+void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
+               size_t size, enum dma_data_direction dir);
+void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
+               size_t size, enum dma_data_direction dir);
 dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
                size_t size, enum dma_data_direction dir, unsigned long attrs);
 
index 002268262c9ad8e20fc1e6c1f0bb986981e58acf..f737e334705945c938159e84e8b12fde2face7c1 100644 (file)
@@ -344,8 +344,8 @@ void dma_direct_sync_sg_for_device(struct device *dev,
                phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
 
                if (unlikely(is_swiotlb_buffer(paddr)))
-                       swiotlb_tbl_sync_single(dev, paddr, sg->length,
-                                       dir, SYNC_FOR_DEVICE);
+                       swiotlb_sync_single_for_device(dev, paddr, sg->length,
+                                                      dir);
 
                if (!dev_is_dma_coherent(dev))
                        arch_sync_dma_for_device(paddr, sg->length,
@@ -370,8 +370,8 @@ void dma_direct_sync_sg_for_cpu(struct device *dev,
                        arch_sync_dma_for_cpu(paddr, sg->length, dir);
 
                if (unlikely(is_swiotlb_buffer(paddr)))
-                       swiotlb_tbl_sync_single(dev, paddr, sg->length, dir,
-                                       SYNC_FOR_CPU);
+                       swiotlb_sync_single_for_cpu(dev, paddr, sg->length,
+                                                   dir);
 
                if (dir == DMA_FROM_DEVICE)
                        arch_dma_mark_clean(paddr, sg->length);
index e1bf721591c0cffe7b1691e925bd873de1b8408e..50afc05b6f1dcbb75a9b9985f6e45b24bb13553f 100644 (file)
@@ -57,7 +57,7 @@ static inline void dma_direct_sync_single_for_device(struct device *dev,
        phys_addr_t paddr = dma_to_phys(dev, addr);
 
        if (unlikely(is_swiotlb_buffer(paddr)))
-               swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
+               swiotlb_sync_single_for_device(dev, paddr, size, dir);
 
        if (!dev_is_dma_coherent(dev))
                arch_sync_dma_for_device(paddr, size, dir);
@@ -74,7 +74,7 @@ static inline void dma_direct_sync_single_for_cpu(struct device *dev,
        }
 
        if (unlikely(is_swiotlb_buffer(paddr)))
-               swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
+               swiotlb_sync_single_for_cpu(dev, paddr, size, dir);
 
        if (dir == DMA_FROM_DEVICE)
                arch_dma_mark_clean(paddr, size);
index a431c6b64e82e871151f6ad6a8cdab2adad5b526..5fe8781be6f26f2192f345965b811a30da718f32 100644 (file)
@@ -715,26 +715,22 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
        spin_unlock_irqrestore(&io_tlb_lock, flags);
 }
 
-void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
-                            size_t size, enum dma_data_direction dir,
-                            enum dma_sync_target target)
+void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
+               size_t size, enum dma_data_direction dir)
 {
-       switch (target) {
-       case SYNC_FOR_CPU:
-               if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
-                       swiotlb_bounce(hwdev, tlb_addr, size, DMA_FROM_DEVICE);
-               else
-                       BUG_ON(dir != DMA_TO_DEVICE);
-               break;
-       case SYNC_FOR_DEVICE:
-               if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
-                       swiotlb_bounce(hwdev, tlb_addr, size, DMA_TO_DEVICE);
-               else
-                       BUG_ON(dir != DMA_FROM_DEVICE);
-               break;
-       default:
-               BUG();
-       }
+       if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
+               swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE);
+       else
+               BUG_ON(dir != DMA_FROM_DEVICE);
+}
+
+void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
+               size_t size, enum dma_data_direction dir)
+{
+       if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
+               swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE);
+       else
+               BUG_ON(dir != DMA_TO_DEVICE);
 }
 
 /*