mm: move 'mmap_min_addr' logic from callers into vm_unmapped_area()
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 18 Apr 2023 21:40:09 +0000 (17:40 -0400)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 21 Apr 2023 21:52:05 +0000 (14:52 -0700)
Instead of having callers care about the mmap_min_addr logic for the
lowest valid mapping address (and some of them getting it wrong), just
move the logic into vm_unmapped_area() itself.  One less thing for various
architecture cases (and generic helpers) to worry about.

We should really try to make much more of this be common code, but baby
steps..

Without this, vm_unmapped_area() could return an address below
mmap_min_addr (because some caller forgot about that).  That then causes
the mmap machinery to think it has found a workable address, but then
later security_mmap_addr(addr) is unhappy about it and the mmap() returns
with a nonsensical error (EPERM).

The proper action is to either return ENOMEM (if the virtual address space
is exhausted), or try to find another address (ie do a bottom-up search
for free addresses after the top-down one failed).

See commit 2afc745f3e30 ("mm: ensure get_unmapped_area() returns higher
address than mmap_min_addr"), which fixed this for one call site (the
generic arch_get_unmapped_area_topdown() fallback) but left other cases
alone.

Link: https://lkml.kernel.org/r/20230418214009.1142926-1-Liam.Howlett@oracle.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Liam Howlett <liam.howlett@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/s390/mm/hugetlbpage.c
arch/s390/mm/mmap.c
fs/hugetlbfs/inode.c
mm/mmap.c

index c299a18273ffeca9c30f89d75f14b6fdfc5b49ed..c718f2a0de9485ecb695b972509e99dd2eb0fbfa 100644 (file)
@@ -273,7 +273,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
 
        info.flags = VM_UNMAPPED_AREA_TOPDOWN;
        info.length = len;
-       info.low_limit = max(PAGE_SIZE, mmap_min_addr);
+       info.low_limit = PAGE_SIZE;
        info.high_limit = current->mm->mmap_base;
        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
        info.align_offset = 0;
index 3327c47bc18149826339d49fa5cf48b7ecee5d1a..fc9a7dc26c5ed710037923565bb8ab35814b621b 100644 (file)
@@ -136,7 +136,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long ad
 
        info.flags = VM_UNMAPPED_AREA_TOPDOWN;
        info.length = len;
-       info.low_limit = max(PAGE_SIZE, mmap_min_addr);
+       info.low_limit = PAGE_SIZE;
        info.high_limit = mm->mmap_base;
        if (filp || (flags & MAP_SHARED))
                info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
index 702d79639c0dffe3028bb7cd9d80edca25e5fe0e..ecfdfb2529a36cf7b98914fb7c19f85043eca413 100644 (file)
@@ -208,7 +208,7 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
 
        info.flags = VM_UNMAPPED_AREA_TOPDOWN;
        info.length = len;
-       info.low_limit = max(PAGE_SIZE, mmap_min_addr);
+       info.low_limit = PAGE_SIZE;
        info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base);
        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
        info.align_offset = 0;
index 51b6976fd5256addd71e1d1638d10c8b788bbc2f..536bbb8fa0aefeb754c70291c098aff10d4b4688 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1548,7 +1548,8 @@ static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
  */
 static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
 {
-       unsigned long length, gap, low_limit;
+       unsigned long length, gap;
+       unsigned long low_limit, high_limit;
        struct vm_area_struct *tmp;
 
        MA_STATE(mas, &current->mm->mm_mt, 0, 0);
@@ -1559,8 +1560,11 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
                return -ENOMEM;
 
        low_limit = info->low_limit;
+       if (low_limit < mmap_min_addr)
+               low_limit = mmap_min_addr;
+       high_limit = info->high_limit;
 retry:
-       if (mas_empty_area(&mas, low_limit, info->high_limit - 1, length))
+       if (mas_empty_area(&mas, low_limit, high_limit - 1, length))
                return -ENOMEM;
 
        gap = mas.index;
@@ -1596,7 +1600,8 @@ retry:
  */
 static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
 {
-       unsigned long length, gap, high_limit, gap_end;
+       unsigned long length, gap, gap_end;
+       unsigned long low_limit, high_limit;
        struct vm_area_struct *tmp;
 
        MA_STATE(mas, &current->mm->mm_mt, 0, 0);
@@ -1605,10 +1610,12 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
        if (length < info->length)
                return -ENOMEM;
 
+       low_limit = info->low_limit;
+       if (low_limit < mmap_min_addr)
+               low_limit = mmap_min_addr;
        high_limit = info->high_limit;
 retry:
-       if (mas_empty_area_rev(&mas, info->low_limit, high_limit - 1,
-                               length))
+       if (mas_empty_area_rev(&mas, low_limit, high_limit - 1, length))
                return -ENOMEM;
 
        gap = mas.last + 1 - info->length;
@@ -1743,7 +1750,7 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
 
        info.flags = VM_UNMAPPED_AREA_TOPDOWN;
        info.length = len;
-       info.low_limit = max(PAGE_SIZE, mmap_min_addr);
+       info.low_limit = PAGE_SIZE;
        info.high_limit = arch_get_mmap_base(addr, mm->mmap_base);
        info.align_mask = 0;
        info.align_offset = 0;