powerpc/mm: Enable full randomisation of memory mappings
authorChristophe Leroy <christophe.leroy@csgroup.eu>
Sat, 9 Apr 2022 17:17:35 +0000 (19:17 +0200)
committerMichael Ellerman <mpe@ellerman.id.au>
Thu, 5 May 2022 12:11:58 +0000 (22:11 +1000)
Do like most other architectures and provide randomisation also to
"legacy" memory mappings, by adding the random factor to
mm->mmap_base in arch_pick_mmap_layout().

See commit 8b8addf891de ("x86/mm/32: Enable full randomization on
i386 and X86_32") for all explanations and benefits of that mmap
randomisation.

At the moment, slice_find_area_bottomup() doesn't use mm->mmap_base
but uses the fixed TASK_UNMAPPED_BASE instead.
slice_find_area_bottomup() being used as a fallback to
slice_find_area_topdown(), it can't use mm->mmap_base
directly.

Instead of always using TASK_UNMAPPED_BASE as base address, leave
it to the caller. When called from slice_find_area_topdown()
TASK_UNMAPPED_BASE is used. Otherwise mm->mmap_base is used.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/417fb10dde828534c73a03138b49621d74f4e5be.1649523076.git.christophe.leroy@csgroup.eu
arch/powerpc/mm/book3s64/slice.c
arch/powerpc/mm/mmap.c

index 03681042b80784ce3ce24b1846bc61434b2e6b6e..c0b58afb9a47628973b315d013994834048e9feb 100644 (file)
@@ -276,20 +276,18 @@ static bool slice_scan_available(unsigned long addr,
 }
 
 static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
-                                             unsigned long len,
+                                             unsigned long addr, unsigned long len,
                                              const struct slice_mask *available,
                                              int psize, unsigned long high_limit)
 {
        int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
-       unsigned long addr, found, next_end;
+       unsigned long found, next_end;
        struct vm_unmapped_area_info info;
 
        info.flags = 0;
        info.length = len;
        info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
        info.align_offset = 0;
-
-       addr = TASK_UNMAPPED_BASE;
        /*
         * Check till the allow max value for this mmap request
         */
@@ -322,12 +320,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
 }
 
 static unsigned long slice_find_area_topdown(struct mm_struct *mm,
-                                            unsigned long len,
+                                            unsigned long addr, unsigned long len,
                                             const struct slice_mask *available,
                                             int psize, unsigned long high_limit)
 {
        int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
-       unsigned long addr, found, prev;
+       unsigned long found, prev;
        struct vm_unmapped_area_info info;
        unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr);
 
@@ -335,8 +333,6 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
        info.length = len;
        info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
        info.align_offset = 0;
-
-       addr = mm->mmap_base;
        /*
         * If we are trying to allocate above DEFAULT_MAP_WINDOW
         * Add the different to the mmap_base.
@@ -377,7 +373,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
         * can happen with large stack limits and large mmap()
         * allocations.
         */
-       return slice_find_area_bottomup(mm, len, available, psize, high_limit);
+       return slice_find_area_bottomup(mm, TASK_UNMAPPED_BASE, len, available, psize, high_limit);
 }
 
 
@@ -386,9 +382,9 @@ static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
                                     int topdown, unsigned long high_limit)
 {
        if (topdown)
-               return slice_find_area_topdown(mm, len, mask, psize, high_limit);
+               return slice_find_area_topdown(mm, mm->mmap_base, len, mask, psize, high_limit);
        else
-               return slice_find_area_bottomup(mm, len, mask, psize, high_limit);
+               return slice_find_area_bottomup(mm, mm->mmap_base, len, mask, psize, high_limit);
 }
 
 static inline void slice_copy_mask(struct slice_mask *dst,
index 5972d619d274c5b4798f4e1caf174310c8067ced..d9eae456558adb0694ac4e3e695b1f60f5ab2728 100644 (file)
@@ -96,7 +96,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
         * bit is set, or if the expected stack growth is unlimited:
         */
        if (mmap_is_legacy(rlim_stack)) {
-               mm->mmap_base = TASK_UNMAPPED_BASE;
+               mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
                mm->get_unmapped_area = arch_get_unmapped_area;
        } else {
                mm->mmap_base = mmap_base(random_factor, rlim_stack);