mm/mmap: move vma_prepare before vma_adjust_trans_huge
authorSuren Baghdasaryan <surenb@google.com>
Mon, 27 Feb 2023 17:36:13 +0000 (09:36 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 6 Apr 2023 03:02:58 +0000 (20:02 -0700)
vma_prepare() acquires all locks required before VMA modifications.  Move
vma_prepare() before vma_adjust_trans_huge() so that VMA is locked before
any modification.

Link: https://lkml.kernel.org/r/20230227173632.3292573-15-surenb@google.com
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/mmap.c

index 6c326002184d8ca13ec0f5d426fd185d9f32956c..e8f019eecd0f128d53ef4e1b0acb4291ba859304 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -683,12 +683,12 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
        if (vma_iter_prealloc(vmi))
                goto nomem;
 
+       vma_prepare(&vp);
        vma_adjust_trans_huge(vma, start, end, 0);
        /* VMA iterator points to previous, so set to start if necessary */
        if (vma_iter_addr(vmi) != start)
                vma_iter_set(vmi, start);
 
-       vma_prepare(&vp);
        vma->vm_start = start;
        vma->vm_end = end;
        vma->vm_pgoff = pgoff;
@@ -723,8 +723,8 @@ int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
                return -ENOMEM;
 
        init_vma_prep(&vp, vma);
-       vma_adjust_trans_huge(vma, start, end, 0);
        vma_prepare(&vp);
+       vma_adjust_trans_huge(vma, start, end, 0);
 
        if (vma->vm_start < start)
                vma_iter_clear(vmi, vma->vm_start, start);
@@ -1010,12 +1010,12 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
        if (vma_iter_prealloc(vmi))
                return NULL;
 
-       vma_adjust_trans_huge(vma, vma_start, vma_end, adj_start);
        init_multi_vma_prep(&vp, vma, adjust, remove, remove2);
        VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
                   vp.anon_vma != adjust->anon_vma);
 
        vma_prepare(&vp);
+       vma_adjust_trans_huge(vma, vma_start, vma_end, adj_start);
        if (vma_start < vma->vm_start || vma_end > vma->vm_end)
                vma_expanded = true;
 
@@ -2214,10 +2214,10 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
        if (new->vm_ops && new->vm_ops->open)
                new->vm_ops->open(new);
 
-       vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
        init_vma_prep(&vp, vma);
        vp.insert = new;
        vma_prepare(&vp);
+       vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
 
        if (new_below) {
                vma->vm_start = addr;
@@ -2920,9 +2920,9 @@ static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
                if (vma_iter_prealloc(vmi))
                        goto unacct_fail;
 
-               vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0);
                init_vma_prep(&vp, vma);
                vma_prepare(&vp);
+               vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0);
                vma->vm_end = addr + len;
                vm_flags_set(vma, VM_SOFTDIRTY);
                vma_iter_store(vmi, vma);