if (mm) {
                mmap_read_lock(mm);
-               vma = alloc->vma;
+               vma = vma_lookup(mm, alloc->vma_addr);
        }
 
        if (!vma && need_mm) {
 static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
                struct vm_area_struct *vma)
 {
-       if (vma)
+       unsigned long vm_start = 0;
+
+       if (vma) {
+               vm_start = vma->vm_start;
                alloc->vma_vm_mm = vma->vm_mm;
-       /*
-        * If we see alloc->vma is not NULL, buffer data structures set up
-        * completely. Look at smp_rmb side binder_alloc_get_vma.
-        * We also want to guarantee new alloc->vma_vm_mm is always visible
-        * if alloc->vma is set.
-        */
-       smp_wmb();
-       alloc->vma = vma;
+       }
+
+       mmap_assert_write_locked(alloc->vma_vm_mm);
+       alloc->vma_addr = vm_start;
 }
 
 static inline struct vm_area_struct *binder_alloc_get_vma(
 {
        struct vm_area_struct *vma = NULL;
 
-       if (alloc->vma) {
-               /* Look at description in binder_alloc_set_vma */
-               smp_rmb();
-               vma = alloc->vma;
-       }
+       if (alloc->vma_addr)
+               vma = vma_lookup(alloc->vma_vm_mm, alloc->vma_addr);
+
        return vma;
 }
 
 
        buffers = 0;
        mutex_lock(&alloc->mutex);
-       BUG_ON(alloc->vma);
+       BUG_ON(alloc->vma_addr &&
+              vma_lookup(alloc->vma_vm_mm, alloc->vma_addr));
 
        while ((n = rb_first(&alloc->allocated_buffers))) {
                buffer = rb_entry(n, struct binder_buffer, rb_node);
 
        if (!binder_selftest_run)
                return;
        mutex_lock(&binder_selftest_lock);
-       if (!binder_selftest_run || !alloc->vma)
+       if (!binder_selftest_run || !alloc->vma_addr)
                goto done;
        pr_info("STARTED\n");
        binder_selftest_alloc_offset(alloc, end_offset, 0);