select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
        select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
        select HAVE_EXIT_THREAD
-       select HAVE_FAST_GUP if ARM_LPAE
+       select HAVE_GUP_FAST if ARM_LPAE
        select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
        select HAVE_FUNCTION_ERROR_INJECTION
        select HAVE_FUNCTION_GRAPH_TRACER
 
        select HAVE_SAMPLE_FTRACE_DIRECT
        select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
        select HAVE_EFFICIENT_UNALIGNED_ACCESS
-       select HAVE_FAST_GUP
+       select HAVE_GUP_FAST
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_FUNCTION_TRACER
        select HAVE_FUNCTION_ERROR_INJECTION
 
        select HAVE_EBPF_JIT
        select HAVE_EFFICIENT_UNALIGNED_ACCESS if !ARCH_STRICT_ALIGN
        select HAVE_EXIT_THREAD
-       select HAVE_FAST_GUP
+       select HAVE_GUP_FAST
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_FUNCTION_ARG_ACCESS_API
        select HAVE_FUNCTION_ERROR_INJECTION
 
        select HAVE_DYNAMIC_FTRACE
        select HAVE_EBPF_JIT if !CPU_MICROMIPS
        select HAVE_EXIT_THREAD
-       select HAVE_FAST_GUP
+       select HAVE_GUP_FAST
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_FUNCTION_TRACER
 
        select HAVE_DYNAMIC_FTRACE_WITH_REGS    if ARCH_USING_PATCHABLE_FUNCTION_ENTRY || MPROFILE_KERNEL || PPC32
        select HAVE_EBPF_JIT
        select HAVE_EFFICIENT_UNALIGNED_ACCESS
-       select HAVE_FAST_GUP
+       select HAVE_GUP_FAST
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_FUNCTION_ARG_ACCESS_API
        select HAVE_FUNCTION_DESCRIPTORS        if PPC64_ELF_ABI_V1
 
        select HAVE_FUNCTION_GRAPH_RETVAL if HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !PREEMPTION
        select HAVE_EBPF_JIT if MMU
-       select HAVE_FAST_GUP if MMU
+       select HAVE_GUP_FAST if MMU
        select HAVE_FUNCTION_ARG_ACCESS_API
        select HAVE_FUNCTION_ERROR_INJECTION
        select HAVE_GCC_PLUGINS
 
        select HAVE_DYNAMIC_FTRACE_WITH_REGS
        select HAVE_EBPF_JIT if HAVE_MARCH_Z196_FEATURES
        select HAVE_EFFICIENT_UNALIGNED_ACCESS
-       select HAVE_FAST_GUP
+       select HAVE_GUP_FAST
        select HAVE_FENTRY
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_FUNCTION_ARG_ACCESS_API
 
        select HAVE_DEBUG_BUGVERBOSE
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DYNAMIC_FTRACE
-       select HAVE_FAST_GUP if MMU
+       select HAVE_GUP_FAST if MMU
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_FUNCTION_TRACER
        select HAVE_FTRACE_MCOUNT_RECORD
 
        select HAVE_EFFICIENT_UNALIGNED_ACCESS
        select HAVE_EISA
        select HAVE_EXIT_THREAD
-       select HAVE_FAST_GUP
+       select HAVE_GUP_FAST
        select HAVE_FENTRY                      if X86_64 || DYNAMIC_FTRACE
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_FUNCTION_GRAPH_RETVAL       if HAVE_FUNCTION_GRAPH_TRACER
 
        VM_WARN_ON_FOLIO(!PageAnonExclusive(&folio->page), folio);
 
        /* Paired with the memory barrier in try_grab_folio(). */
-       if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
+       if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
                smp_mb();
 
        if (unlikely(folio_maybe_dma_pinned(folio)))
         * This is conceptually a smp_wmb() paired with the smp_rmb() in
         * gup_must_unshare().
         */
-       if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
+       if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
                smp_mb__after_atomic();
        return 0;
 }
         */
 
        /* Paired with the memory barrier in try_grab_folio(). */
-       if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
+       if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
                smp_mb();
 
        if (unlikely(folio_maybe_dma_pinned(folio)))
         * This is conceptually a smp_wmb() paired with the smp_rmb() in
         * gup_must_unshare().
         */
-       if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
+       if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
                smp_mb__after_atomic();
        return 0;
 }
 
 {
        u64 size = 0;
 
-#ifdef CONFIG_HAVE_FAST_GUP
+#ifdef CONFIG_HAVE_GUP_FAST
        pgd_t *pgdp, pgd;
        p4d_t *p4dp, p4d;
        pud_t *pudp, pud;
        if (pte_present(pte))
                size = pte_leaf_size(pte);
        pte_unmap(ptep);
-#endif /* CONFIG_HAVE_FAST_GUP */
+#endif /* CONFIG_HAVE_GUP_FAST */
 
        return size;
 }
 
 config HAVE_MEMBLOCK_PHYS_MAP
        bool
 
-config HAVE_FAST_GUP
+config HAVE_GUP_FAST
        depends on MMU
        bool
 
 
 
 #ifdef CONFIG_MMU
 
-#if defined(CONFIG_ARCH_HAS_HUGEPD) || defined(CONFIG_HAVE_FAST_GUP)
+#if defined(CONFIG_ARCH_HAS_HUGEPD) || defined(CONFIG_HAVE_GUP_FAST)
 static int record_subpages(struct page *page, unsigned long sz,
                           unsigned long addr, unsigned long end,
                           struct page **pages)
 
        return nr;
 }
-#endif /* CONFIG_ARCH_HAS_HUGEPD || CONFIG_HAVE_FAST_GUP */
+#endif /* CONFIG_ARCH_HAS_HUGEPD || CONFIG_HAVE_GUP_FAST */
 
 #ifdef CONFIG_ARCH_HAS_HUGEPD
 static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
  *
  * This code is based heavily on the PowerPC implementation by Nick Piggin.
  */
-#ifdef CONFIG_HAVE_FAST_GUP
+#ifdef CONFIG_HAVE_GUP_FAST
 
 /*
  * Used in the GUP-fast path to determine whether GUP is permitted to work on
                unsigned int flags, struct page **pages, int *nr)
 {
 }
-#endif /* CONFIG_HAVE_FAST_GUP */
+#endif /* CONFIG_HAVE_GUP_FAST */
 
 #ifndef gup_fast_permitted
 /*
        int nr_pinned = 0;
        unsigned seq;
 
-       if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP) ||
+       if (!IS_ENABLED(CONFIG_HAVE_GUP_FAST) ||
            !gup_fast_permitted(start, end))
                return 0;
 
 
        }
 
        /* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */
-       if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
+       if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
                smp_rmb();
 
        /*