nios2: fix flush_dcache_page() for usage from irq context
authorHelge Deller <deller@gmx.de>
Tue, 22 Aug 2023 14:27:49 +0000 (16:27 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 24 Aug 2023 23:20:31 +0000 (16:20 -0700)
Since at least kernel 6.1, flush_dcache_page() is called with IRQs
disabled, e.g.  from aio_complete().

But the current implementation for flush_dcache_page() on NIOS2
unintentionally re-enables IRQs, which may lead to deadlocks.

Fix it by using xa_lock_irqsave() and xa_unlock_irqrestore() for the
flush_dcache_mmap_*lock() macros instead.

Link: https://lkml.kernel.org/r/ZOTF5WWURQNH9+iw@p100
Signed-off-by: Helge Deller <deller@gmx.de>
Cc: Dinh Nguyen <dinguyen@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/nios2/include/asm/cacheflush.h
arch/nios2/mm/cacheflush.c

index 7c48c5213fb7a3118455849a5c2ab707e7b27664..348cea0977927a523022217bc3637b498a8e4185 100644 (file)
@@ -52,5 +52,9 @@ extern void invalidate_dcache_range(unsigned long start, unsigned long end);
 
 #define flush_dcache_mmap_lock(mapping)                xa_lock_irq(&mapping->i_pages)
 #define flush_dcache_mmap_unlock(mapping)      xa_unlock_irq(&mapping->i_pages)
+#define flush_dcache_mmap_lock_irqsave(mapping, flags)         \
+               xa_lock_irqsave(&mapping->i_pages, flags)
+#define flush_dcache_mmap_unlock_irqrestore(mapping, flags)    \
+               xa_unlock_irqrestore(&mapping->i_pages, flags)
 
 #endif /* _ASM_NIOS2_CACHEFLUSH_H */
index 28b805f465a8b73d198673edb216220d77d94c4a..0ee9c5f02e08ebb57233d7da418f3e74b8abd189 100644 (file)
@@ -75,12 +75,13 @@ static void flush_aliases(struct address_space *mapping, struct folio *folio)
 {
        struct mm_struct *mm = current->active_mm;
        struct vm_area_struct *vma;
+       unsigned long flags;
        pgoff_t pgoff;
        unsigned long nr = folio_nr_pages(folio);
 
        pgoff = folio->index;
 
-       flush_dcache_mmap_lock(mapping);
+       flush_dcache_mmap_lock_irqsave(mapping, flags);
        vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) {
                unsigned long start;
 
@@ -92,7 +93,7 @@ static void flush_aliases(struct address_space *mapping, struct folio *folio)
                start = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
                flush_cache_range(vma, start, start + nr * PAGE_SIZE);
        }
-       flush_dcache_mmap_unlock(mapping);
+       flush_dcache_mmap_unlock_irqrestore(mapping, flags);
 }
 
 void flush_cache_all(void)