dma-direct: make uncached_kernel_address more general
authorChristoph Hellwig <hch@lst.de>
Fri, 21 Feb 2020 23:55:43 +0000 (15:55 -0800)
committerChristoph Hellwig <hch@lst.de>
Mon, 16 Mar 2020 09:48:09 +0000 (10:48 +0100)
Rename the symbol to arch_dma_set_uncached, and pass a size to it as
well as allow an error return.  That will allow reusing this hook for
in-place pagetable remapping.

As the in-place remap doesn't always require an explicit cache flush,
also detangle ARCH_HAS_DMA_PREP_COHERENT from ARCH_HAS_DMA_SET_UNCACHED.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
arch/Kconfig
arch/microblaze/Kconfig
arch/microblaze/mm/consistent.c
arch/mips/Kconfig
arch/mips/mm/dma-noncoherent.c
arch/nios2/Kconfig
arch/nios2/mm/dma-mapping.c
arch/xtensa/Kconfig
arch/xtensa/kernel/pci-dma.c
include/linux/dma-noncoherent.h
kernel/dma/direct.c

index 7994b239f155af600a90b6698b4ea31d0848e1a8..090cfe0c82a7b43e01f08cd8adbdc0c78a8dbd4d 100644 (file)
@@ -248,11 +248,11 @@ config ARCH_HAS_SET_DIRECT_MAP
        bool
 
 #
-# Select if arch has an uncached kernel segment and provides the
-# uncached_kernel_address symbol to use it
+# Select if the architecture provides the arch_dma_set_uncached symbol to
+# either provide an uncached segement alias for a DMA allocation, or
+# to remap the page tables in place.
 #
-config ARCH_HAS_UNCACHED_SEGMENT
-       select ARCH_HAS_DMA_PREP_COHERENT
+config ARCH_HAS_DMA_SET_UNCACHED
        bool
 
 # Select if arch init_task must go in the __init_task_data section
index 6a331bd57ea84b1993be3ec1b4e372d59f9b0a54..9606c244b5b8b709debc9961649abda587bff394 100644 (file)
@@ -8,7 +8,7 @@ config MICROBLAZE
        select ARCH_HAS_GCOV_PROFILE_ALL
        select ARCH_HAS_SYNC_DMA_FOR_CPU
        select ARCH_HAS_SYNC_DMA_FOR_DEVICE
-       select ARCH_HAS_UNCACHED_SEGMENT if !MMU
+       select ARCH_HAS_DMA_SET_UNCACHED if !MMU
        select ARCH_MIGHT_HAVE_PC_PARPORT
        select ARCH_WANT_IPC_PARSE_VERSION
        select BUILDTIME_TABLE_SORT
index cede7c5e8135f4df85b336f32078e01432f2dccf..e09b66e43cb63f436f68a0444b32aca7a71dcb5b 100644 (file)
@@ -40,7 +40,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
 #define UNCACHED_SHADOW_MASK 0
 #endif /* CONFIG_XILINX_UNCACHED_SHADOW */
 
-void *uncached_kernel_address(void *ptr)
+void *arch_dma_set_uncached(void *ptr, size_t size)
 {
        unsigned long addr = (unsigned long)ptr;
 
index 797d7f1ad5fe8841410bb0bbcdb53f730ba5edbb..489185db501e2c3b8c86214de086c90ce0b039f6 100644 (file)
@@ -1187,8 +1187,9 @@ config DMA_NONCOHERENT
        # significant advantages.
        #
        select ARCH_HAS_DMA_WRITE_COMBINE
+       select ARCH_HAS_DMA_PREP_COHERENT
        select ARCH_HAS_SYNC_DMA_FOR_DEVICE
-       select ARCH_HAS_UNCACHED_SEGMENT
+       select ARCH_HAS_DMA_SET_UNCACHED
        select DMA_NONCOHERENT_MMAP
        select DMA_NONCOHERENT_CACHE_SYNC
        select NEED_DMA_MAP_STATE
index 77dce28ad0a07f6d246dab5bd8bf8d6d52fe2052..fcea92d95d86099d8fa5d00e52a1e825eee901c2 100644 (file)
@@ -49,7 +49,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
        dma_cache_wback_inv((unsigned long)page_address(page), size);
 }
 
-void *uncached_kernel_address(void *addr)
+void *arch_dma_set_uncached(void *addr, size_t size)
 {
        return (void *)(__pa(addr) + UNCAC_BASE);
 }
index 44b5da37e8bdc1b4fb123a0344f254b0c6ccf65a..2fc4ed210b5f0446ce526f1c063d91d4f347b6b6 100644 (file)
@@ -2,9 +2,10 @@
 config NIOS2
        def_bool y
        select ARCH_32BIT_OFF_T
+       select ARCH_HAS_DMA_PREP_COHERENT
        select ARCH_HAS_SYNC_DMA_FOR_CPU
        select ARCH_HAS_SYNC_DMA_FOR_DEVICE
-       select ARCH_HAS_UNCACHED_SEGMENT
+       select ARCH_HAS_DMA_SET_UNCACHED
        select ARCH_NO_SWAP
        select TIMER_OF
        select GENERIC_ATOMIC64
index f30f2749257ce0a24b92fb077c245f4c4b763a89..fd887d5f3f9a786eb3c7fbde2ef103fe9cbc07ed 100644 (file)
@@ -67,7 +67,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
        flush_dcache_range(start, start + size);
 }
 
-void *uncached_kernel_address(void *ptr)
+void *arch_dma_set_uncached(void *ptr, size_t size)
 {
        unsigned long addr = (unsigned long)ptr;
 
index 32ee759a3fdad56826ad551316051f8b94df5e16..de229424b65943057c04e3a6b80905fb87d9e538 100644 (file)
@@ -6,7 +6,7 @@ config XTENSA
        select ARCH_HAS_DMA_PREP_COHERENT if MMU
        select ARCH_HAS_SYNC_DMA_FOR_CPU if MMU
        select ARCH_HAS_SYNC_DMA_FOR_DEVICE if MMU
-       select ARCH_HAS_UNCACHED_SEGMENT if MMU
+       select ARCH_HAS_DMA_SET_UNCACHED if MMU
        select ARCH_USE_QUEUED_RWLOCKS
        select ARCH_USE_QUEUED_SPINLOCKS
        select ARCH_WANT_FRAME_POINTERS
index 6a685545d5c9d89289be7e090e3e50cdaf19a91a..17c4384f8495582f45774041251fc9f3d40f73e1 100644 (file)
@@ -92,7 +92,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
  * coherent DMA memory operations when CONFIG_MMU is not enabled.
  */
 #ifdef CONFIG_MMU
-void *uncached_kernel_address(void *p)
+void *arch_dma_set_uncached(void *p, size_t size)
 {
        return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
 }
index b6b72e19b0cd922054e24c1ee793deb8f969da9b..1a403950667373e597e5ab2770f194c52ca2d671 100644 (file)
@@ -108,6 +108,6 @@ static inline void arch_dma_prep_coherent(struct page *page, size_t size)
 }
 #endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
 
-void *uncached_kernel_address(void *addr);
+void *arch_dma_set_uncached(void *addr, size_t size);
 
 #endif /* _LINUX_DMA_NONCOHERENT_H */
index 650580fbbff3560e6dc538d86cd7a91d15462d3d..baf4e93735c313c64b52ddbad323913477fd6046 100644 (file)
@@ -192,10 +192,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
 
        memset(ret, 0, size);
 
-       if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
+       if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
            dma_alloc_need_uncached(dev, attrs)) {
                arch_dma_prep_coherent(page, size);
-               ret = uncached_kernel_address(ret);
+               ret = arch_dma_set_uncached(ret, size);
+               if (IS_ERR(ret))
+                       goto out_free_pages;
        }
 done:
        if (force_dma_unencrypted(dev))
@@ -236,7 +238,7 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
 void *dma_direct_alloc(struct device *dev, size_t size,
                dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
-       if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
+       if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
            !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
            dma_alloc_need_uncached(dev, attrs))
                return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
@@ -246,7 +248,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
 void dma_direct_free(struct device *dev, size_t size,
                void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
 {
-       if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
+       if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
            !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
            dma_alloc_need_uncached(dev, attrs))
                arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);