#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
+#define flush_dcache_mmap_lock_irqsave(mapping, flags) \
+ xa_lock_irqsave(&mapping->i_pages, flags)
+#define flush_dcache_mmap_unlock_irqrestore(mapping, flags) \
+ xa_unlock_irqrestore(&mapping->i_pages, flags)
#endif /* _ASM_NIOS2_CACHEFLUSH_H */
{
struct mm_struct *mm = current->active_mm;
struct vm_area_struct *vma;
+ unsigned long flags;
pgoff_t pgoff;
unsigned long nr = folio_nr_pages(folio);
pgoff = folio->index;
- flush_dcache_mmap_lock(mapping);
+ flush_dcache_mmap_lock_irqsave(mapping, flags);
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) {
unsigned long start;
start = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
flush_cache_range(vma, start, start + nr * PAGE_SIZE);
}
- flush_dcache_mmap_unlock(mapping);
+ flush_dcache_mmap_unlock_irqrestore(mapping, flags);
}
void flush_cache_all(void)