struct vm_area_struct *vma = pvmw->vma;
        struct mm_struct *mm = vma->vm_mm;
        unsigned long address = pvmw->address;
-       unsigned long mmun_start = address & HPAGE_PMD_MASK;
+       unsigned long haddr = address & HPAGE_PMD_MASK;
        pmd_t pmde;
        swp_entry_t entry;
 
                if (!is_readable_migration_entry(entry))
                        rmap_flags |= RMAP_EXCLUSIVE;
 
-               page_add_anon_rmap(new, vma, mmun_start, rmap_flags);
+               page_add_anon_rmap(new, vma, haddr, rmap_flags);
        } else {
                page_add_file_rmap(new, vma, true);
        }
        VM_BUG_ON(pmd_write(pmde) && PageAnon(new) && !PageAnonExclusive(new));
-       set_pmd_at(mm, mmun_start, pvmw->pmd, pmde);
+       set_pmd_at(mm, haddr, pvmw->pmd, pmde);
 
        /* No need to invalidate - it was non-present before */
        update_mmu_cache_pmd(vma, address, pvmw->pmd);