treewide: use initializer for struct vm_unmapped_area_info
authorRick Edgecombe <rick.p.edgecombe@intel.com>
Tue, 26 Mar 2024 02:16:52 +0000 (19:16 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Apr 2024 03:56:27 +0000 (20:56 -0700)
Future changes will need to add a new member to struct
vm_unmapped_area_info.  This would cause trouble for any call site that
doesn't initialize the struct.  Currently every caller sets each member
manually, so if new ones are added they will be uninitialized and the core
code parsing the struct will see garbage in the new member.

It could be possible to initialize the new member manually to 0 at each
call site.  This and a couple other options were discussed.  Having some
struct vm_unmapped_area_info instances not zero initialized will put those
sites at risk of feeding garbage into vm_unmapped_area(), if the
convention is to zero initialize the struct and any new field addition
missed a call site that initializes each field manually.  So it is useful
to do things similar across the kernel.

The consensus (see links) was that in general the best way to accomplish
taking into account both code cleanliness and minimizing the chance of
introducing bugs, was to do C99 static initialization.  As in: struct
vm_unmapped_area_info info = {};

With this method of initialization, the whole struct will be zero
initialized, and any statements setting fields to zero will be unneeded.
The change should not leave cleanup at the call sides.

While iterating though the possible solutions a few archs kindly acked
other variations that still zero initialized the struct.  These sites have
been modified in previous changes using the pattern acked by the
respective arch.

So to be reduce the chance of bugs via uninitialized fields, perform a
tree wide change using the consensus for the best general way to do this
change.  Use C99 static initializing to zero the struct and remove and
statements that simply set members to zero.

Link: https://lkml.kernel.org/r/20240326021656.202649-11-rick.p.edgecombe@intel.com
Link: https://lore.kernel.org/lkml/202402280912.33AEE7A9CF@keescook/#t
Link: https://lore.kernel.org/lkml/j7bfvig3gew3qruouxrh7z7ehjjafrgkbcmg6tcghhfh3rhmzi@wzlcoecgy5rs/
Link: https://lore.kernel.org/lkml/ec3e377a-c0a0-4dd3-9cb9-96517e54d17e@csgroup.eu/
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Reviewed-by: Kees Cook <keescook@chromium.org>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Aneesh Kumar K.V <aneesh.kumar@kernel.org>
Cc: Borislav Petkov (AMD) <bp@alien8.de>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Deepak Gupta <debug@rivosinc.com>
Cc: Guo Ren <guoren@kernel.org>
Cc: Helge Deller <deller@gmx.de>
Cc: H. Peter Anvin (Intel) <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Mark Brown <broonie@kernel.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
15 files changed:
arch/alpha/kernel/osf_sys.c
arch/arc/mm/mmap.c
arch/arm/mm/mmap.c
arch/loongarch/mm/mmap.c
arch/mips/mm/mmap.c
arch/s390/mm/hugetlbpage.c
arch/s390/mm/mmap.c
arch/sh/mm/mmap.c
arch/sparc/kernel/sys_sparc_32.c
arch/sparc/kernel/sys_sparc_64.c
arch/sparc/mm/hugetlbpage.c
arch/x86/kernel/sys_x86_64.c
arch/x86/mm/hugetlbpage.c
fs/hugetlbfs/inode.c
mm/mmap.c

index 5db88b6274396d6c379a8ac7940fa4a22e069404..e5f881bc82881caddbfba80c985595dcdc814b28 100644 (file)
@@ -1218,14 +1218,11 @@ static unsigned long
 arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
                         unsigned long limit)
 {
-       struct vm_unmapped_area_info info;
+       struct vm_unmapped_area_info info = {};
 
-       info.flags = 0;
        info.length = len;
        info.low_limit = addr;
        info.high_limit = limit;
-       info.align_mask = 0;
-       info.align_offset = 0;
        return vm_unmapped_area(&info);
 }
 
index 3c1c7ae732925ca9b17fc5dd138db7cf5112e36a..69a91529715599f08eec09089d32914e25e13bff 100644 (file)
@@ -27,7 +27,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
-       struct vm_unmapped_area_info info;
+       struct vm_unmapped_area_info info = {};
 
        /*
         * We enforce the MAP_FIXED case.
@@ -51,11 +51,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
                        return addr;
        }
 
-       info.flags = 0;
        info.length = len;
        info.low_limit = mm->mmap_base;
        info.high_limit = TASK_SIZE;
-       info.align_mask = 0;
        info.align_offset = pgoff << PAGE_SHIFT;
        return vm_unmapped_area(&info);
 }
index a0f8a0ca0788adccb4459ff0eec6e8dc8d095f29..d65d0e6ed10ab4beedcc240d82e90b730dfcfef9 100644 (file)
@@ -34,7 +34,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
        struct vm_area_struct *vma;
        int do_align = 0;
        int aliasing = cache_is_vipt_aliasing();
-       struct vm_unmapped_area_info info;
+       struct vm_unmapped_area_info info = {};
 
        /*
         * We only need to do colour alignment if either the I or D
@@ -68,7 +68,6 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
                        return addr;
        }
 
-       info.flags = 0;
        info.length = len;
        info.low_limit = mm->mmap_base;
        info.high_limit = TASK_SIZE;
@@ -87,7 +86,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        unsigned long addr = addr0;
        int do_align = 0;
        int aliasing = cache_is_vipt_aliasing();
-       struct vm_unmapped_area_info info;
+       struct vm_unmapped_area_info info = {};
 
        /*
         * We only need to do colour alignment if either the I or D
index 89af7c12e8c08d4faab2919cf22034b5ab0f5a6b..8890309851351cddff798777982b53046e998160 100644 (file)
@@ -25,7 +25,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
        struct vm_area_struct *vma;
        unsigned long addr = addr0;
        int do_color_align;
-       struct vm_unmapped_area_info info;
+       struct vm_unmapped_area_info info = {};
 
        if (unlikely(len > TASK_SIZE))
                return -ENOMEM;
@@ -83,7 +83,6 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
                 */
        }
 
-       info.flags = 0;
        info.low_limit = mm->mmap_base;
        info.high_limit = TASK_SIZE;
        return vm_unmapped_area(&info);
index 00fe90c6db3e8079323f06d4bf49626fa345014c..7e11d7b58761075ef837a13e8c50ba7f57422c8a 100644 (file)
@@ -34,7 +34,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
        struct vm_area_struct *vma;
        unsigned long addr = addr0;
        int do_color_align;
-       struct vm_unmapped_area_info info;
+       struct vm_unmapped_area_info info = {};
 
        if (unlikely(len > TASK_SIZE))
                return -ENOMEM;
@@ -92,7 +92,6 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
                 */
        }
 
-       info.flags = 0;
        info.low_limit = mm->mmap_base;
        info.high_limit = TASK_SIZE;
        return vm_unmapped_area(&info);
index 7d948e243f4b203c126a0274a8475de3af9e9e8b..e1e63dc1b23d6389738a68664caa2e36e0484ef8 100644 (file)
@@ -248,14 +248,12 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
                unsigned long pgoff, unsigned long flags)
 {
        struct hstate *h = hstate_file(file);
-       struct vm_unmapped_area_info info;
+       struct vm_unmapped_area_info info = {};
 
-       info.flags = 0;
        info.length = len;
        info.low_limit = current->mm->mmap_base;
        info.high_limit = TASK_SIZE;
        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
-       info.align_offset = 0;
        return vm_unmapped_area(&info);
 }
 
@@ -264,7 +262,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
                unsigned long pgoff, unsigned long flags)
 {
        struct hstate *h = hstate_file(file);
-       struct vm_unmapped_area_info info;
+       struct vm_unmapped_area_info info = {};
        unsigned long addr;
 
        info.flags = VM_UNMAPPED_AREA_TOPDOWN;
@@ -272,7 +270,6 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
        info.low_limit = PAGE_SIZE;
        info.high_limit = current->mm->mmap_base;
        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
-       info.align_offset = 0;
        addr = vm_unmapped_area(&info);
 
        /*
index 6b2e4436ad4a8a0619418a51f40b15a53f1fc0b6..2067569465897b364530fe589f6d1a52c9d37916 100644 (file)
@@ -86,7 +86,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
-       struct vm_unmapped_area_info info;
+       struct vm_unmapped_area_info info = {};
 
        if (len > TASK_SIZE - mmap_min_addr)
                return -ENOMEM;
@@ -102,7 +102,6 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
                        goto check_asce_limit;
        }
 
-       info.flags = 0;
        info.length = len;
        info.low_limit = mm->mmap_base;
        info.high_limit = TASK_SIZE;
@@ -122,7 +121,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long ad
 {
        struct vm_area_struct *vma;
        struct mm_struct *mm = current->mm;
-       struct vm_unmapped_area_info info;
+       struct vm_unmapped_area_info info = {};
 
        /* requested length too big for entire address space */
        if (len > TASK_SIZE - mmap_min_addr)
index b82199878b45949a7c22bc6c7a0ee862e4ce019c..bee329d4149aa7e7a48a02448310ce38d4d59f33 100644 (file)
@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
        int do_colour_align;
-       struct vm_unmapped_area_info info;
+       struct vm_unmapped_area_info info = {};
 
        if (flags & MAP_FIXED) {
                /* We do not accept a shared mapping if it would violate
@@ -88,7 +88,6 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
                        return addr;
        }
 
-       info.flags = 0;
        info.length = len;
        info.low_limit = TASK_UNMAPPED_BASE;
        info.high_limit = TASK_SIZE;
@@ -106,7 +105,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        struct mm_struct *mm = current->mm;
        unsigned long addr = addr0;
        int do_colour_align;
-       struct vm_unmapped_area_info info;
+       struct vm_unmapped_area_info info = {};
 
        if (flags & MAP_FIXED) {
                /* We do not accept a shared mapping if it would violate
index 082a551897ed8f35636299dc7ed84906a74860ef..08a19727795ca4a6ae85943787554b48f9f2ae82 100644 (file)
@@ -41,7 +41,7 @@ SYSCALL_DEFINE0(getpagesize)
 
 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
 {
-       struct vm_unmapped_area_info info;
+       struct vm_unmapped_area_info info = {};
 
        if (flags & MAP_FIXED) {
                /* We do not accept a shared mapping if it would violate
@@ -59,7 +59,6 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
        if (!addr)
                addr = TASK_UNMAPPED_BASE;
 
-       info.flags = 0;
        info.length = len;
        info.low_limit = addr;
        info.high_limit = TASK_SIZE;
index 1dbf7211666ea33f5fd294d5dc444c75224d6c97..d9c3b34ca7447075cb735603ee3324f297ff9845 100644 (file)
@@ -93,7 +93,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
        struct vm_area_struct * vma;
        unsigned long task_size = TASK_SIZE;
        int do_color_align;
-       struct vm_unmapped_area_info info;
+       struct vm_unmapped_area_info info = {};
 
        if (flags & MAP_FIXED) {
                /* We do not accept a shared mapping if it would violate
@@ -126,7 +126,6 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
                        return addr;
        }
 
-       info.flags = 0;
        info.length = len;
        info.low_limit = TASK_UNMAPPED_BASE;
        info.high_limit = min(task_size, VA_EXCLUDE_START);
@@ -154,7 +153,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        unsigned long task_size = STACK_TOP32;
        unsigned long addr = addr0;
        int do_color_align;
-       struct vm_unmapped_area_info info;
+       struct vm_unmapped_area_info info = {};
 
        /* This should only ever run for 32-bit processes.  */
        BUG_ON(!test_thread_flag(TIF_32BIT));
index c23012e3a3537899f726d973f7b389e22e0f4f9e..cc91ca7a1e182cf6f6de2c96be13c0e7e248bed1 100644 (file)
@@ -31,17 +31,15 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
 {
        struct hstate *h = hstate_file(filp);
        unsigned long task_size = TASK_SIZE;
-       struct vm_unmapped_area_info info;
+       struct vm_unmapped_area_info info = {};
 
        if (test_thread_flag(TIF_32BIT))
                task_size = STACK_TOP32;
 
-       info.flags = 0;
        info.length = len;
        info.low_limit = TASK_UNMAPPED_BASE;
        info.high_limit = min(task_size, VA_EXCLUDE_START);
        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
-       info.align_offset = 0;
        addr = vm_unmapped_area(&info);
 
        if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
@@ -63,7 +61,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        struct hstate *h = hstate_file(filp);
        struct mm_struct *mm = current->mm;
        unsigned long addr = addr0;
-       struct vm_unmapped_area_info info;
+       struct vm_unmapped_area_info info = {};
 
        /* This should only ever run for 32-bit processes.  */
        BUG_ON(!test_thread_flag(TIF_32BIT));
@@ -73,7 +71,6 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        info.low_limit = PAGE_SIZE;
        info.high_limit = mm->mmap_base;
        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
-       info.align_offset = 0;
        addr = vm_unmapped_area(&info);
 
        /*
index cb9fa1d5c66f73c96fa9f6a18d785ddfda75bc25..96b9d29aead0864adfd5c3847f43d34bf42b268d 100644 (file)
@@ -118,7 +118,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
-       struct vm_unmapped_area_info info;
+       struct vm_unmapped_area_info info = {};
        unsigned long begin, end;
 
        if (flags & MAP_FIXED)
@@ -137,11 +137,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
                        return addr;
        }
 
-       info.flags = 0;
        info.length = len;
        info.low_limit = begin;
        info.high_limit = end;
-       info.align_mask = 0;
        info.align_offset = pgoff << PAGE_SHIFT;
        if (filp) {
                info.align_mask = get_align_mask();
@@ -158,7 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        struct vm_area_struct *vma;
        struct mm_struct *mm = current->mm;
        unsigned long addr = addr0;
-       struct vm_unmapped_area_info info;
+       struct vm_unmapped_area_info info = {};
 
        /* requested length too big for entire address space */
        if (len > TASK_SIZE)
@@ -203,7 +201,6 @@ get_unmapped_area:
        if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall())
                info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
 
-       info.align_mask = 0;
        info.align_offset = pgoff << PAGE_SHIFT;
        if (filp) {
                info.align_mask = get_align_mask();
index 06ca9a60bac2a5c13f2302937150b91ab994af30..807a5859a3c4b3e64c57f9d3f15aa9cc9b034540 100644 (file)
@@ -25,9 +25,8 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
                unsigned long pgoff, unsigned long flags)
 {
        struct hstate *h = hstate_file(file);
-       struct vm_unmapped_area_info info;
+       struct vm_unmapped_area_info info = {};
 
-       info.flags = 0;
        info.length = len;
        info.low_limit = get_mmap_base(1);
 
@@ -39,7 +38,6 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
                task_size_32bit() : task_size_64bit(addr > DEFAULT_MAP_WINDOW);
 
        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
-       info.align_offset = 0;
        return vm_unmapped_area(&info);
 }
 
@@ -48,7 +46,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
                unsigned long pgoff, unsigned long flags)
 {
        struct hstate *h = hstate_file(file);
-       struct vm_unmapped_area_info info;
+       struct vm_unmapped_area_info info = {};
 
        info.flags = VM_UNMAPPED_AREA_TOPDOWN;
        info.length = len;
@@ -63,7 +61,6 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
                info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
 
        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
-       info.align_offset = 0;
        addr = vm_unmapped_area(&info);
 
        /*
index 3dee18bf47ed35dd3c06a67dcc8111081f7b0bf3..2f4e88552d3f28e35f653bd500465b7c38c8fb4e 100644 (file)
@@ -176,14 +176,12 @@ hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
                unsigned long len, unsigned long pgoff, unsigned long flags)
 {
        struct hstate *h = hstate_file(file);
-       struct vm_unmapped_area_info info;
+       struct vm_unmapped_area_info info = {};
 
-       info.flags = 0;
        info.length = len;
        info.low_limit = current->mm->mmap_base;
        info.high_limit = arch_get_mmap_end(addr, len, flags);
        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
-       info.align_offset = 0;
        return vm_unmapped_area(&info);
 }
 
@@ -192,14 +190,13 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
                unsigned long len, unsigned long pgoff, unsigned long flags)
 {
        struct hstate *h = hstate_file(file);
-       struct vm_unmapped_area_info info;
+       struct vm_unmapped_area_info info = {};
 
        info.flags = VM_UNMAPPED_AREA_TOPDOWN;
        info.length = len;
        info.low_limit = PAGE_SIZE;
        info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base);
        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
-       info.align_offset = 0;
        addr = vm_unmapped_area(&info);
 
        /*
index 0ef1191e3be5b04d133cc7a4b09333b40f559d22..4ad386f3f63fa57f403a98485ee2000d30a0ba7b 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1704,7 +1704,7 @@ generic_get_unmapped_area(struct file *filp, unsigned long addr,
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma, *prev;
-       struct vm_unmapped_area_info info;
+       struct vm_unmapped_area_info info = {};
        const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
 
        if (len > mmap_end - mmap_min_addr)
@@ -1722,12 +1722,9 @@ generic_get_unmapped_area(struct file *filp, unsigned long addr,
                        return addr;
        }
 
-       info.flags = 0;
        info.length = len;
        info.low_limit = mm->mmap_base;
        info.high_limit = mmap_end;
-       info.align_mask = 0;
-       info.align_offset = 0;
        return vm_unmapped_area(&info);
 }
 
@@ -1752,7 +1749,7 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
 {
        struct vm_area_struct *vma, *prev;
        struct mm_struct *mm = current->mm;
-       struct vm_unmapped_area_info info;
+       struct vm_unmapped_area_info info = {};
        const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
 
        /* requested length too big for entire address space */
@@ -1776,8 +1773,6 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
        info.length = len;
        info.low_limit = PAGE_SIZE;
        info.high_limit = arch_get_mmap_base(addr, mm->mmap_base);
-       info.align_mask = 0;
-       info.align_offset = 0;
        addr = vm_unmapped_area(&info);
 
        /*