mm: remove CONFIG_PER_VMA_LOCK ifdefs
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 24 Jul 2023 18:54:01 +0000 (19:54 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 18 Aug 2023 17:12:50 +0000 (10:12 -0700)
Patch series "Handle most file-backed faults under the VMA lock", v3.

This patchset adds the ability to handle page faults on parts of files
which are already in the page cache without taking the mmap lock.

This patch (of 10):

Provide lock_vma_under_rcu() when CONFIG_PER_VMA_LOCK is not defined to
eliminate ifdefs in the users.

Link: https://lkml.kernel.org/r/20230724185410.1124082-1-willy@infradead.org
Link: https://lkml.kernel.org/r/20230724185410.1124082-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Cc: Punit Agrawal <punit.agrawal@bytedance.com>
Cc: Arjun Roy <arjunroy@google.com>
Cc: Eric Dumazet <edumazet@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/arm64/mm/fault.c
arch/powerpc/mm/fault.c
arch/riscv/mm/fault.c
arch/s390/mm/fault.c
arch/x86/mm/fault.c
include/linux/mm.h

index 3fe516b325772c25d6475f00e5700f0f061d1340..103fcbdc65526f67e7c7cc05fddc3664e11f3fcc 100644 (file)
@@ -587,7 +587,6 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
 
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
 
-#ifdef CONFIG_PER_VMA_LOCK
        if (!(mm_flags & FAULT_FLAG_USER))
                goto lock_mmap;
 
@@ -615,7 +614,6 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
                return 0;
        }
 lock_mmap:
-#endif /* CONFIG_PER_VMA_LOCK */
 
 retry:
        vma = lock_mm_and_find_vma(mm, addr, regs);
index 5bfdf6ecfa9650f9f102a9872bce3b0d5d031da2..fafce6bdeff0fd322be50617bcc559209d09d042 100644 (file)
@@ -469,7 +469,6 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
        if (is_exec)
                flags |= FAULT_FLAG_INSTRUCTION;
 
-#ifdef CONFIG_PER_VMA_LOCK
        if (!(flags & FAULT_FLAG_USER))
                goto lock_mmap;
 
@@ -501,7 +500,6 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
                return user_mode(regs) ? 0 : SIGBUS;
 
 lock_mmap:
-#endif /* CONFIG_PER_VMA_LOCK */
 
        /* When running in the kernel we expect faults to occur only to
         * addresses in user space.  All other faults represent errors in the
@@ -551,9 +549,7 @@ retry:
 
        mmap_read_unlock(current->mm);
 
-#ifdef CONFIG_PER_VMA_LOCK
 done:
-#endif
        if (unlikely(fault & VM_FAULT_ERROR))
                return mm_fault_error(regs, address, fault);
 
index 6ea2cce4cc17e17a8e30a639a3d6e7b82c8d5b81..046732fcb48ca38cf9c02eb54a87cbbda7f9ca18 100644 (file)
@@ -283,7 +283,6 @@ void handle_page_fault(struct pt_regs *regs)
                flags |= FAULT_FLAG_WRITE;
        else if (cause == EXC_INST_PAGE_FAULT)
                flags |= FAULT_FLAG_INSTRUCTION;
-#ifdef CONFIG_PER_VMA_LOCK
        if (!(flags & FAULT_FLAG_USER))
                goto lock_mmap;
 
@@ -311,7 +310,6 @@ void handle_page_fault(struct pt_regs *regs)
                return;
        }
 lock_mmap:
-#endif /* CONFIG_PER_VMA_LOCK */
 
 retry:
        vma = lock_mm_and_find_vma(mm, addr, regs);
@@ -368,9 +366,7 @@ retry:
 
        mmap_read_unlock(mm);
 
-#ifdef CONFIG_PER_VMA_LOCK
 done:
-#endif
        if (unlikely(fault & VM_FAULT_ERROR)) {
                tsk->thread.bad_cause = cause;
                mm_fault_error(regs, addr, fault);
index 2f123429a291b7457561ee8e56eeaf39fbbda49c..6f6b9881e55e6cceae5b07eb82c56bcc7509fa89 100644 (file)
@@ -407,7 +407,6 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
                access = VM_WRITE;
        if (access == VM_WRITE)
                flags |= FAULT_FLAG_WRITE;
-#ifdef CONFIG_PER_VMA_LOCK
        if (!(flags & FAULT_FLAG_USER))
                goto lock_mmap;
        vma = lock_vma_under_rcu(mm, address);
@@ -432,7 +431,6 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
                goto out;
        }
 lock_mmap:
-#endif /* CONFIG_PER_VMA_LOCK */
        mmap_read_lock(mm);
 
        gmap = NULL;
index e8711b2cafaf70cc8ef5cb1ce72c7fa12e3c070c..787da09d24f3fbc6a3e05d8f95250243b704be7b 100644 (file)
@@ -1328,7 +1328,6 @@ void do_user_addr_fault(struct pt_regs *regs,
        }
 #endif
 
-#ifdef CONFIG_PER_VMA_LOCK
        if (!(flags & FAULT_FLAG_USER))
                goto lock_mmap;
 
@@ -1358,7 +1357,6 @@ void do_user_addr_fault(struct pt_regs *regs,
                return;
        }
 lock_mmap:
-#endif /* CONFIG_PER_VMA_LOCK */
 
 retry:
        vma = lock_mm_and_find_vma(mm, address, regs);
@@ -1418,9 +1416,7 @@ retry:
        }
 
        mmap_read_unlock(mm);
-#ifdef CONFIG_PER_VMA_LOCK
 done:
-#endif
        if (likely(!(fault & VM_FAULT_ERROR)))
                return;
 
index ded514ee2588dc50f6171bbee05a33dcc8be31a1..21299a0cfbca8a12e78bfabf3be545cf35ab960c 100644 (file)
@@ -742,6 +742,12 @@ static inline void vma_assert_write_locked(struct vm_area_struct *vma) {}
 static inline void vma_mark_detached(struct vm_area_struct *vma,
                                     bool detached) {}
 
+static inline struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
+               unsigned long address)
+{
+       return NULL;
+}
+
 #endif /* CONFIG_PER_VMA_LOCK */
 
 /*