mm: switch mm->get_unmapped_area() to a flag
authorRick Edgecombe <rick.p.edgecombe@intel.com>
Tue, 26 Mar 2024 02:16:44 +0000 (19:16 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Apr 2024 03:56:25 +0000 (20:56 -0700)
The mm_struct contains a function pointer *get_unmapped_area(), which is
set to either arch_get_unmapped_area() or arch_get_unmapped_area_topdown()
during the initialization of the mm.

Since the function pointer only ever points to two functions that are
named the same across all arch's, a function pointer is not really
required.  In addition future changes will want to add versions of the
functions that take additional arguments.  So to save a pointers worth of
bytes in mm_struct, and prevent adding additional function pointers to
mm_struct in future changes, remove it and keep the information about
which get_unmapped_area() to use in a flag.

Add the new flag to MMF_INIT_MASK so it doesn't get clobbered on fork by
mmf_init_flags().  Most MM flags get clobbered on fork.  In the
pre-existing behavior mm->get_unmapped_area() would get copied to the new
mm in dup_mm(), so not clobbering the flag preserves the existing behavior
around inheriting the topdown-ness.

Introduce a helper, mm_get_unmapped_area(), to easily convert code that
refers to the old function pointer to instead select and call either
arch_get_unmapped_area() or arch_get_unmapped_area_topdown() based on the
flag.  Then drop the mm->get_unmapped_area() function pointer.  Leave the
get_unmapped_area() pointer in struct file_operations alone.  The main
purpose of this change is to reorganize in preparation for future changes,
but it also converts the calls of mm->get_unmapped_area() from indirect
branches into a direct ones.

The stress-ng bigheap benchmark calls realloc a lot, which calls through
get_unmapped_area() in the kernel.  On x86, the change yielded a ~1%
improvement there on a retpoline config.

In testing a few x86 configs, removing the pointer unfortunately didn't
result in any actual size reductions in the compiled layout of mm_struct.
But depending on compiler or arch alignment requirements, the change could
shrink the size of mm_struct.

Link: https://lkml.kernel.org/r/20240326021656.202649-3-rick.p.edgecombe@intel.com
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
Acked-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Reviewed-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Aneesh Kumar K.V <aneesh.kumar@kernel.org>
Cc: Borislav Petkov (AMD) <bp@alien8.de>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Deepak Gupta <debug@rivosinc.com>
Cc: Guo Ren <guoren@kernel.org>
Cc: Helge Deller <deller@gmx.de>
Cc: H. Peter Anvin (Intel) <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Mark Brown <broonie@kernel.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
23 files changed:
arch/s390/mm/hugetlbpage.c
arch/s390/mm/mmap.c
arch/sparc/kernel/sys_sparc_64.c
arch/sparc/mm/hugetlbpage.c
arch/x86/kernel/cpu/sgx/driver.c
arch/x86/mm/hugetlbpage.c
arch/x86/mm/mmap.c
drivers/char/mem.c
drivers/dax/device.c
fs/hugetlbfs/inode.c
fs/proc/inode.c
fs/ramfs/file-mmu.c
include/linux/mm_types.h
include/linux/sched/coredump.h
include/linux/sched/mm.h
io_uring/io_uring.c
kernel/bpf/arena.c
kernel/bpf/syscall.c
mm/debug.c
mm/huge_memory.c
mm/mmap.c
mm/shmem.c
mm/util.c

index ca43b6fce71cf99392734c2d15348582ce330959..7d948e243f4b203c126a0274a8475de3af9e9e8b 100644 (file)
@@ -318,7 +318,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
                        goto check_asce_limit;
        }
 
-       if (mm->get_unmapped_area == arch_get_unmapped_area)
+       if (!test_bit(MMF_TOPDOWN, &mm->flags))
                addr = hugetlb_get_unmapped_area_bottomup(file, addr, len,
                                pgoff, flags);
        else
index b14fc0887654d28376081f5b4c77b0751109c149..6b2e4436ad4a8a0619418a51f40b15a53f1fc0b6 100644 (file)
@@ -185,10 +185,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
         */
        if (mmap_is_legacy(rlim_stack)) {
                mm->mmap_base = mmap_base_legacy(random_factor);
-               mm->get_unmapped_area = arch_get_unmapped_area;
+               clear_bit(MMF_TOPDOWN, &mm->flags);
        } else {
                mm->mmap_base = mmap_base(random_factor, rlim_stack);
-               mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+               set_bit(MMF_TOPDOWN, &mm->flags);
        }
 }
 
index 1e9a9e016237bfe621223a43c458bf42e50cfdba..1dbf7211666ea33f5fd294d5dc444c75224d6c97 100644 (file)
@@ -218,14 +218,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
 {
        unsigned long align_goal, addr = -ENOMEM;
-       unsigned long (*get_area)(struct file *, unsigned long,
-                                 unsigned long, unsigned long, unsigned long);
-
-       get_area = current->mm->get_unmapped_area;
 
        if (flags & MAP_FIXED) {
                /* Ok, don't mess with it. */
-               return get_area(NULL, orig_addr, len, pgoff, flags);
+               return mm_get_unmapped_area(current->mm, NULL, orig_addr, len, pgoff, flags);
        }
        flags &= ~MAP_SHARED;
 
@@ -238,7 +234,8 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
                align_goal = (64UL * 1024);
 
        do {
-               addr = get_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
+               addr = mm_get_unmapped_area(current->mm, NULL, orig_addr,
+                                           len + (align_goal - PAGE_SIZE), pgoff, flags);
                if (!(addr & ~PAGE_MASK)) {
                        addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
                        break;
@@ -256,7 +253,7 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
         * be obtained.
         */
        if (addr & ~PAGE_MASK)
-               addr = get_area(NULL, orig_addr, len, pgoff, flags);
+               addr = mm_get_unmapped_area(current->mm, NULL, orig_addr, len, pgoff, flags);
 
        return addr;
 }
@@ -292,7 +289,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
            gap == RLIM_INFINITY ||
            sysctl_legacy_va_layout) {
                mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
-               mm->get_unmapped_area = arch_get_unmapped_area;
+               clear_bit(MMF_TOPDOWN, &mm->flags);
        } else {
                /* We know it's 32-bit */
                unsigned long task_size = STACK_TOP32;
@@ -303,7 +300,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
                        gap = (task_size / 6 * 5);
 
                mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
-               mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+               set_bit(MMF_TOPDOWN, &mm->flags);
        }
 }
 
index 8ed5bdf95d253b297722dc173f795c1f02ac5378..c23012e3a3537899f726d973f7b389e22e0f4f9e 100644 (file)
@@ -123,7 +123,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
                    (!vma || addr + len <= vm_start_gap(vma)))
                        return addr;
        }
-       if (mm->get_unmapped_area == arch_get_unmapped_area)
+       if (!test_bit(MMF_TOPDOWN, &mm->flags))
                return hugetlb_get_unmapped_area_bottomup(file, addr, len,
                                pgoff, flags);
        else
index 262f5fb18d74d399c19acd689646692f367dd187..22b65a5f5ec6c4b3418a70ce3b15ca8e5a45c2e8 100644 (file)
@@ -113,7 +113,7 @@ static unsigned long sgx_get_unmapped_area(struct file *file,
        if (flags & MAP_FIXED)
                return addr;
 
-       return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
+       return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags);
 }
 
 #ifdef CONFIG_COMPAT
index dab6db288e5b21dc41e9870696c67f6335fa926f..06ca9a60bac2a5c13f2302937150b91ab994af30 100644 (file)
@@ -115,7 +115,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
        }
 
 get_unmapped_area:
-       if (mm->get_unmapped_area == arch_get_unmapped_area)
+       if (!test_bit(MMF_TOPDOWN, &mm->flags))
                return hugetlb_get_unmapped_area_bottomup(file, addr, len,
                                pgoff, flags);
        else
index c90c20904a60750b80e54c090fc844816369fcd9..a2cabb1c81e1aec2fc82339c1092bf8b8167908a 100644 (file)
@@ -129,9 +129,9 @@ static void arch_pick_mmap_base(unsigned long *base, unsigned long *legacy_base,
 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
 {
        if (mmap_is_legacy())
-               mm->get_unmapped_area = arch_get_unmapped_area;
+               clear_bit(MMF_TOPDOWN, &mm->flags);
        else
-               mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+               set_bit(MMF_TOPDOWN, &mm->flags);
 
        arch_pick_mmap_base(&mm->mmap_base, &mm->mmap_legacy_base,
                        arch_rnd(mmap64_rnd_bits), task_size_64bit(0),
index 3c6670cf905f116a2aebc773f8f9bb8d6714ac85..9b80e622ae80a88f5cca964dfcd9978aede04eba 100644 (file)
@@ -544,7 +544,7 @@ static unsigned long get_unmapped_area_zero(struct file *file,
        }
 
        /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
-       return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
+       return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags);
 #else
        return -ENOSYS;
 #endif
index 93ebedc5ec8ca36357635e774d5ca094d0e70e54..47c126d37b59ab2b83550325817cfde1ef58ef6b 100644 (file)
@@ -329,14 +329,14 @@ static unsigned long dax_get_unmapped_area(struct file *filp,
        if ((off + len_align) < off)
                goto out;
 
-       addr_align = current->mm->get_unmapped_area(filp, addr, len_align,
-                       pgoff, flags);
+       addr_align = mm_get_unmapped_area(current->mm, filp, addr, len_align,
+                                         pgoff, flags);
        if (!IS_ERR_VALUE(addr_align)) {
                addr_align += (off - addr_align) & (align - 1);
                return addr_align;
        }
  out:
-       return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
+       return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags);
 }
 
 static const struct address_space_operations dev_dax_aops = {
index 6502c7e776d195e1d004908964f74ce5a76d6db2..3dee18bf47ed35dd3c06a67dcc8111081f7b0bf3 100644 (file)
@@ -249,11 +249,11 @@ generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
        }
 
        /*
-        * Use mm->get_unmapped_area value as a hint to use topdown routine.
+        * Use MMF_TOPDOWN flag as a hint to use topdown routine.
         * If architectures have special needs, they should define their own
         * version of hugetlb_get_unmapped_area.
         */
-       if (mm->get_unmapped_area == arch_get_unmapped_area_topdown)
+       if (test_bit(MMF_TOPDOWN, &mm->flags))
                return hugetlb_get_unmapped_area_topdown(file, addr, len,
                                pgoff, flags);
        return hugetlb_get_unmapped_area_bottomup(file, addr, len,
index 75396a24fd8cdeeea45cafc582296f36c47fa09c..d19434e2a58e71fdae5e4be075b6976bec260acc 100644 (file)
@@ -455,8 +455,9 @@ pde_get_unmapped_area(struct proc_dir_entry *pde, struct file *file, unsigned lo
                return pde->proc_ops->proc_get_unmapped_area(file, orig_addr, len, pgoff, flags);
 
 #ifdef CONFIG_MMU
-       return current->mm->get_unmapped_area(file, orig_addr, len, pgoff, flags);
+       return mm_get_unmapped_area(current->mm, file, orig_addr, len, pgoff, flags);
 #endif
+
        return orig_addr;
 }
 
index c7a1aa3c882b021d28ff2d6c7522ddcc96cb9cc2..b45c7edc3225e4cfbeff17e28c2c2b0b8c17bd06 100644 (file)
@@ -35,7 +35,7 @@ static unsigned long ramfs_mmu_get_unmapped_area(struct file *file,
                unsigned long addr, unsigned long len, unsigned long pgoff,
                unsigned long flags)
 {
-       return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
+       return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags);
 }
 
 const struct file_operations ramfs_file_operations = {
index fee6561feb7eb5c0383e6c99ab7bc152c66873de..fa0d6995706f4f1fea295fb5fd59fc52b67dbb84 100644 (file)
@@ -777,11 +777,7 @@ struct mm_struct {
                } ____cacheline_aligned_in_smp;
 
                struct maple_tree mm_mt;
-#ifdef CONFIG_MMU
-               unsigned long (*get_unmapped_area) (struct file *filp,
-                               unsigned long addr, unsigned long len,
-                               unsigned long pgoff, unsigned long flags);
-#endif
+
                unsigned long mmap_base;        /* base of mmap area */
                unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
 #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
index 02f5090ffea29c9dc9f1901dae0ef43faaefef5c..e62ff805cfc950fc3091f727b0517c70fdfd3bdc 100644 (file)
@@ -92,9 +92,12 @@ static inline int get_dumpable(struct mm_struct *mm)
 #define MMF_VM_MERGE_ANY       30
 #define MMF_VM_MERGE_ANY_MASK  (1 << MMF_VM_MERGE_ANY)
 
+#define MMF_TOPDOWN            31      /* mm searches top down by default */
+#define MMF_TOPDOWN_MASK       (1 << MMF_TOPDOWN)
+
 #define MMF_INIT_MASK          (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
                                 MMF_DISABLE_THP_MASK | MMF_HAS_MDWE_MASK |\
-                                MMF_VM_MERGE_ANY_MASK)
+                                MMF_VM_MERGE_ANY_MASK | MMF_TOPDOWN_MASK)
 
 static inline unsigned long mmf_init_flags(unsigned long flags)
 {
index b6543f9d78d6b868f30fff4289c317ae365b8ca1..ed1caa26c8be9d2735fe3a4bf147213b738fe946 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/mm_types.h>
 #include <linux/gfp.h>
 #include <linux/sync_core.h>
+#include <linux/sched/coredump.h>
 
 /*
  * Routines for handling mm_structs
@@ -186,6 +187,10 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
                          unsigned long len, unsigned long pgoff,
                          unsigned long flags);
 
+unsigned long mm_get_unmapped_area(struct mm_struct *mm, struct file *filp,
+                                  unsigned long addr, unsigned long len,
+                                  unsigned long pgoff, unsigned long flags);
+
 unsigned long
 generic_get_unmapped_area(struct file *filp, unsigned long addr,
                          unsigned long len, unsigned long pgoff,
index c170a2b8d2cf21f06d1c5af8bf57edecb94aaa95..d5edfb8444d78f67e2026e8b1baba8513d48d256 100644 (file)
@@ -3525,7 +3525,7 @@ static unsigned long io_uring_mmu_get_unmapped_area(struct file *filp,
 #else
        addr = 0UL;
 #endif
-       return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
+       return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags);
 }
 
 #else /* !CONFIG_MMU */
index 343c3456c8ddf0e6352a5f2614627c316078077a..16dbf4f6b77fa53676ca44b7e3dcaf22778e68df 100644 (file)
@@ -314,7 +314,7 @@ static unsigned long arena_get_unmapped_area(struct file *filp, unsigned long ad
                        return -EINVAL;
        }
 
-       ret = current->mm->get_unmapped_area(filp, addr, len * 2, 0, flags);
+       ret = mm_get_unmapped_area(current->mm, filp, addr, len * 2, 0, flags);
        if (IS_ERR_VALUE(ret))
                return ret;
        if ((ret >> 32) == ((ret + len - 1) >> 32))
index c287925471f68ef989ffcd3022942397e9d1465c..9cb89e875f0d3f70b4d614e9a08bbe32f6806d40 100644 (file)
@@ -980,7 +980,7 @@ static unsigned long bpf_get_unmapped_area(struct file *filp, unsigned long addr
        if (map->ops->map_get_unmapped_area)
                return map->ops->map_get_unmapped_area(filp, addr, len, pgoff, flags);
 #ifdef CONFIG_MMU
-       return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
+       return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags);
 #else
        return addr;
 #endif
index e8a96b8b71973bfdeac6c7e1eb2b6c12efeb3170..b71186f1fb0b1be91b058090802fb0c04522605e 100644 (file)
@@ -177,9 +177,6 @@ EXPORT_SYMBOL(dump_vma);
 void dump_mm(const struct mm_struct *mm)
 {
        pr_emerg("mm %px task_size %lu\n"
-#ifdef CONFIG_MMU
-               "get_unmapped_area %px\n"
-#endif
                "mmap_base %lu mmap_legacy_base %lu\n"
                "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
                "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
@@ -205,9 +202,6 @@ void dump_mm(const struct mm_struct *mm)
                "def_flags: %#lx(%pGv)\n",
 
                mm, mm->task_size,
-#ifdef CONFIG_MMU
-               mm->get_unmapped_area,
-#endif
                mm->mmap_base, mm->mmap_legacy_base,
                mm->pgd, atomic_read(&mm->mm_users),
                atomic_read(&mm->mm_count),
index 157cee64850c808994975f836021f6e1bbfc42fd..7fdb2275a1e0403a3ddfec1b34c265a9472397aa 100644 (file)
@@ -816,8 +816,8 @@ static unsigned long __thp_get_unmapped_area(struct file *filp,
        if (len_pad < len || (off + len_pad) < off)
                return 0;
 
-       ret = current->mm->get_unmapped_area(filp, addr, len_pad,
-                                             off >> PAGE_SHIFT, flags);
+       ret = mm_get_unmapped_area(current->mm, filp, addr, len_pad,
+                                  off >> PAGE_SHIFT, flags);
 
        /*
         * The failure might be due to length padding. The caller will retry
@@ -835,8 +835,7 @@ static unsigned long __thp_get_unmapped_area(struct file *filp,
 
        off_sub = (off - ret) & (size - 1);
 
-       if (current->mm->get_unmapped_area == arch_get_unmapped_area_topdown &&
-           !off_sub)
+       if (test_bit(MMF_TOPDOWN, &current->mm->flags) && !off_sub)
                return ret + size;
 
        ret += off_sub;
@@ -853,7 +852,7 @@ unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
        if (ret)
                return ret;
 
-       return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
+       return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags);
 }
 EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
 
index 77a625e13ec1246775452fcc6abc4f7728b16f76..dfb8a518e8c9650cbc27e5824e091489b54c0c5e 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1812,7 +1812,8 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
                unsigned long pgoff, unsigned long flags)
 {
        unsigned long (*get_area)(struct file *, unsigned long,
-                                 unsigned long, unsigned long, unsigned long);
+                                 unsigned long, unsigned long, unsigned long)
+                                 = NULL;
 
        unsigned long error = arch_mmap_check(addr, len, flags);
        if (error)
@@ -1822,7 +1823,6 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
        if (len > TASK_SIZE)
                return -ENOMEM;
 
-       get_area = current->mm->get_unmapped_area;
        if (file) {
                if (file->f_op->get_unmapped_area)
                        get_area = file->f_op->get_unmapped_area;
@@ -1841,7 +1841,11 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
        if (!file)
                pgoff = 0;
 
-       addr = get_area(file, addr, len, pgoff, flags);
+       if (get_area)
+               addr = get_area(file, addr, len, pgoff, flags);
+       else
+               addr = mm_get_unmapped_area(current->mm, file, addr, len,
+                                           pgoff, flags);
        if (IS_ERR_VALUE(addr))
                return addr;
 
@@ -1856,6 +1860,17 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
 
 EXPORT_SYMBOL(get_unmapped_area);
 
+unsigned long
+mm_get_unmapped_area(struct mm_struct *mm, struct file *file,
+                    unsigned long addr, unsigned long len,
+                    unsigned long pgoff, unsigned long flags)
+{
+       if (test_bit(MMF_TOPDOWN, &mm->flags))
+               return arch_get_unmapped_area_topdown(file, addr, len, pgoff, flags);
+       return arch_get_unmapped_area(file, addr, len, pgoff, flags);
+}
+EXPORT_SYMBOL(mm_get_unmapped_area);
+
 /**
  * find_vma_intersection() - Look up the first VMA which intersects the interval
  * @mm: The process address space.
index 98985179f495d0b1797e1d3e48109e353f99756a..fa2a0ed97507d140c6614cf9c5d1ecbdf47f7cf3 100644 (file)
@@ -2267,8 +2267,6 @@ unsigned long shmem_get_unmapped_area(struct file *file,
                                      unsigned long uaddr, unsigned long len,
                                      unsigned long pgoff, unsigned long flags)
 {
-       unsigned long (*get_area)(struct file *,
-               unsigned long, unsigned long, unsigned long, unsigned long);
        unsigned long addr;
        unsigned long offset;
        unsigned long inflated_len;
@@ -2278,8 +2276,8 @@ unsigned long shmem_get_unmapped_area(struct file *file,
        if (len > TASK_SIZE)
                return -ENOMEM;
 
-       get_area = current->mm->get_unmapped_area;
-       addr = get_area(file, uaddr, len, pgoff, flags);
+       addr = mm_get_unmapped_area(current->mm, file, uaddr, len, pgoff,
+                                   flags);
 
        if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
                return addr;
@@ -2336,7 +2334,8 @@ unsigned long shmem_get_unmapped_area(struct file *file,
        if (inflated_len < len)
                return addr;
 
-       inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags);
+       inflated_addr = mm_get_unmapped_area(current->mm, NULL, uaddr,
+                                            inflated_len, 0, flags);
        if (IS_ERR_VALUE(inflated_addr))
                return addr;
        if (inflated_addr & ~PAGE_MASK)
@@ -4801,7 +4800,7 @@ unsigned long shmem_get_unmapped_area(struct file *file,
                                      unsigned long addr, unsigned long len,
                                      unsigned long pgoff, unsigned long flags)
 {
-       return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
+       return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags);
 }
 #endif
 
index a9e911b22b99273d9ede9a14bde4402e5361bd9a..c9e519e6811f55cccca2065124ecd0b6e83e9dd6 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -469,17 +469,17 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
 
        if (mmap_is_legacy(rlim_stack)) {
                mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
-               mm->get_unmapped_area = arch_get_unmapped_area;
+               clear_bit(MMF_TOPDOWN, &mm->flags);
        } else {
                mm->mmap_base = mmap_base(random_factor, rlim_stack);
-               mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+               set_bit(MMF_TOPDOWN, &mm->flags);
        }
 }
 #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
 {
        mm->mmap_base = TASK_UNMAPPED_BASE;
-       mm->get_unmapped_area = arch_get_unmapped_area;
+       clear_bit(MMF_TOPDOWN, &mm->flags);
 }
 #endif