mm/mmap: refactor mlock_future_check()
authorLorenzo Stoakes <lstoakes@gmail.com>
Mon, 22 May 2023 08:24:12 +0000 (09:24 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 9 Jun 2023 23:25:38 +0000 (16:25 -0700)
In all but one instance, mlock_future_check() is treated as a boolean
function despite returning an error code.  In one instance, this error
code is ignored and replaced with -ENOMEM.

This is confusing, and the inversion of true -> failure, false -> success
is not warranted.  Convert the function to a bool, lightly refactor and
return true if the check passes, false if not.

Link: https://lkml.kernel.org/r/20230522082412.56685-1-lstoakes@gmail.com
Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/internal.h
mm/mmap.c
mm/mremap.c
mm/secretmem.c

index bb654227959931ffb3cbc59b513431e43fb9ad05..66dd214b302a61dee6332d8778a043e2c214fdec 100644 (file)
@@ -576,8 +576,8 @@ extern long populate_vma_page_range(struct vm_area_struct *vma,
 extern long faultin_vma_page_range(struct vm_area_struct *vma,
                                   unsigned long start, unsigned long end,
                                   bool write, int *locked);
-extern int mlock_future_check(struct mm_struct *mm, unsigned long flags,
-                             unsigned long len);
+extern bool mlock_future_check(struct mm_struct *mm, unsigned long flags,
+                              unsigned long bytes);
 /*
  * mlock_vma_folio() and munlock_vma_folio():
  * should be called with vma's mmap_lock held for read or write,
index 44be7fdfaac9183e794c56d5e9d5f606afc8400c..28d2c489a7e5f848f7a47165b72118deff6a4f5e 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -182,7 +182,8 @@ static int check_brk_limits(unsigned long addr, unsigned long len)
        if (IS_ERR_VALUE(mapped_addr))
                return mapped_addr;
 
-       return mlock_future_check(current->mm, current->mm->def_flags, len);
+       return mlock_future_check(current->mm, current->mm->def_flags, len)
+               ? 0 : -EAGAIN;
 }
 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma,
                unsigned long addr, unsigned long request, unsigned long flags);
@@ -1145,21 +1146,21 @@ static inline unsigned long round_hint_to_min(unsigned long hint)
        return hint;
 }
 
-int mlock_future_check(struct mm_struct *mm, unsigned long flags,
-                      unsigned long len)
+bool mlock_future_check(struct mm_struct *mm, unsigned long flags,
+                       unsigned long bytes)
 {
-       unsigned long locked, lock_limit;
+       unsigned long locked_pages, limit_pages;
 
-       /*  mlock MCL_FUTURE? */
-       if (flags & VM_LOCKED) {
-               locked = len >> PAGE_SHIFT;
-               locked += mm->locked_vm;
-               lock_limit = rlimit(RLIMIT_MEMLOCK);
-               lock_limit >>= PAGE_SHIFT;
-               if (locked > lock_limit && !capable(CAP_IPC_LOCK))
-                       return -EAGAIN;
-       }
-       return 0;
+       if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK))
+               return true;
+
+       locked_pages = bytes >> PAGE_SHIFT;
+       locked_pages += mm->locked_vm;
+
+       limit_pages = rlimit(RLIMIT_MEMLOCK);
+       limit_pages >>= PAGE_SHIFT;
+
+       return locked_pages <= limit_pages;
 }
 
 static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
@@ -1271,7 +1272,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
                if (!can_do_mlock())
                        return -EPERM;
 
-       if (mlock_future_check(mm, vm_flags, len))
+       if (!mlock_future_check(mm, vm_flags, len))
                return -EAGAIN;
 
        if (file) {
@@ -1889,7 +1890,7 @@ static int acct_stack_growth(struct vm_area_struct *vma,
                return -ENOMEM;
 
        /* mlock limit tests */
-       if (mlock_future_check(mm, vma->vm_flags, grow << PAGE_SHIFT))
+       if (!mlock_future_check(mm, vma->vm_flags, grow << PAGE_SHIFT))
                return -ENOMEM;
 
        /* Check to ensure the stack will not grow into a hugetlb-only region */
index b11ce6c920996d99f21145cd7fd4215e0a3c5beb..bcfcb8df587505a7cd6c596f3ebc5376c33e01ca 100644 (file)
@@ -775,7 +775,7 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
        if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
                return ERR_PTR(-EFAULT);
 
-       if (mlock_future_check(mm, vma->vm_flags, new_len - old_len))
+       if (!mlock_future_check(mm, vma->vm_flags, new_len - old_len))
                return ERR_PTR(-EAGAIN);
 
        if (!may_expand_vm(mm, vma->vm_flags,
index 974b32ba8b9d4f19e15c080b497c358c3428d98d..58d2af12df4fa97e11c3b4abd722c8c157baadcd 100644 (file)
@@ -125,7 +125,7 @@ static int secretmem_mmap(struct file *file, struct vm_area_struct *vma)
        if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
                return -EINVAL;
 
-       if (mlock_future_check(vma->vm_mm, vma->vm_flags | VM_LOCKED, len))
+       if (!mlock_future_check(vma->vm_mm, vma->vm_flags | VM_LOCKED, len))
                return -EAGAIN;
 
        vm_flags_set(vma, VM_LOCKED | VM_DONTDUMP);