io-mapping: Cleanup atomic iomap
authorThomas Gleixner <tglx@linutronix.de>
Tue, 3 Nov 2020 09:27:32 +0000 (10:27 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Fri, 6 Nov 2020 22:14:58 +0000 (23:14 +0100)
Switch the atomic iomap implementation over to kmap_local and stick the
preempt/pagefault mechanics into the generic code similar to the
kmap_atomic variants.

Rename the x86 map function in preparation for a non-atomic variant.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linuxfoundation.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: https://lore.kernel.org/r/20201103095858.625310005@linutronix.de
arch/x86/include/asm/iomap.h
arch/x86/mm/iomap_32.c
include/linux/io-mapping.h

index 0be7a30fd6bc1601f0e0dd8f3b2f708ce148efaa..e2de092fc38cb5cc633061a9fd44972a72d46ae7 100644 (file)
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 
-void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot);
-
-static inline void iounmap_atomic(void __iomem *vaddr)
-{
-       kunmap_local_indexed((void __force *)vaddr);
-       pagefault_enable();
-       preempt_enable();
-}
+void __iomem *__iomap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
 
 int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
 
index e0a40d7cc66c806b2d342af4db8e91c98abffba4..9aaa756ddf21942dbc62e5956eb74883e7358ba3 100644 (file)
@@ -44,7 +44,7 @@ void iomap_free(resource_size_t base, unsigned long size)
 }
 EXPORT_SYMBOL_GPL(iomap_free);
 
-void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
+void __iomem *__iomap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
 {
        /*
         * For non-PAT systems, translate non-WB request to UC- just in
@@ -60,8 +60,6 @@ void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
        /* Filter out unsupported __PAGE_KERNEL* bits: */
        pgprot_val(prot) &= __default_kernel_pte_mask;
 
-       preempt_disable();
-       pagefault_disable();
        return (void __force __iomem *)__kmap_local_pfn_prot(pfn, prot);
 }
-EXPORT_SYMBOL_GPL(iomap_atomic_pfn_prot);
+EXPORT_SYMBOL_GPL(__iomap_local_pfn_prot);
index 3b0940be72e96bc03b73a77d86ff6984cf0c8e9f..60e7c83e49047fb3687828aacbaa194c25820e66 100644 (file)
@@ -69,13 +69,17 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
 
        BUG_ON(offset >= mapping->size);
        phys_addr = mapping->base + offset;
-       return iomap_atomic_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
+       preempt_disable();
+       pagefault_disable();
+       return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
 }
 
 static inline void
 io_mapping_unmap_atomic(void __iomem *vaddr)
 {
-       iounmap_atomic(vaddr);
+       kunmap_local_indexed((void __force *)vaddr);
+       pagefault_enable();
+       preempt_enable();
 }
 
 static inline void __iomem *