{
        pgd_t *pgd;
        struct mm_struct *mm = vma->vm_mm;
+       struct page *page;
 
-       ctx->page_mask = 0;
-
-       /*
-        * Call hugetlb_follow_page_mask for hugetlb vmas as it will use
-        * special hugetlb page table walking code.  This eliminates the
-        * need to check for hugetlb entries in the general walking code.
-        */
-       if (is_vm_hugetlb_page(vma))
-               return hugetlb_follow_page_mask(vma, address, flags,
-                                               &ctx->page_mask);
+       vma_pgtable_walk_begin(vma);
 
+       ctx->page_mask = 0;
        pgd = pgd_offset(mm, address);
 
        if (unlikely(is_hugepd(__hugepd(pgd_val(*pgd)))))
        else
                page = follow_p4d_mask(vma, address, pgd, flags, ctx);
 
+       vma_pgtable_walk_end(vma);
+
        return page;
 }
 
 
 }
 #endif /* CONFIG_USERFAULTFD */
 
-struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
-                                     unsigned long address, unsigned int flags,
-                                     unsigned int *page_mask)
-{
-       struct hstate *h = hstate_vma(vma);
-       struct mm_struct *mm = vma->vm_mm;
-       unsigned long haddr = address & huge_page_mask(h);
-       struct page *page = NULL;
-       spinlock_t *ptl;
-       pte_t *pte, entry;
-       int ret;
-
-       hugetlb_vma_lock_read(vma);
-       pte = hugetlb_walk(vma, haddr, huge_page_size(h));
-       if (!pte)
-               goto out_unlock;
-
-       ptl = huge_pte_lock(h, mm, pte);
-       entry = huge_ptep_get(pte);
-       if (pte_present(entry)) {
-               page = pte_page(entry);
-
-               if (!huge_pte_write(entry)) {
-                       if (flags & FOLL_WRITE) {
-                               page = NULL;
-                               goto out;
-                       }
-
-                       if (gup_must_unshare(vma, flags, page)) {
-                               /* Tell the caller to do unsharing */
-                               page = ERR_PTR(-EMLINK);
-                               goto out;
-                       }
-               }
-
-               page = nth_page(page, ((address & ~huge_page_mask(h)) >> PAGE_SHIFT));
-
-               /*
-                * Note that page may be a sub-page, and with vmemmap
-                * optimizations the page struct may be read only.
-                * try_grab_page() will increase the ref count on the
-                * head page, so this will be OK.
-                *
-                * try_grab_page() should always be able to get the page here,
-                * because we hold the ptl lock and have verified pte_present().
-                */
-               ret = try_grab_page(page, flags);
-
-               if (WARN_ON_ONCE(ret)) {
-                       page = ERR_PTR(ret);
-                       goto out;
-               }
-
-               *page_mask = (1U << huge_page_order(h)) - 1;
-       }
-out:
-       spin_unlock(ptl);
-out_unlock:
-       hugetlb_vma_unlock_read(vma);
-
-       /*
-        * Fixup retval for dump requests: if pagecache doesn't exist,
-        * don't try to allocate a new page but just skip it.
-        */
-       if (!page && (flags & FOLL_DUMP) &&
-           !hugetlbfs_pagecache_present(h, vma, address))
-               page = ERR_PTR(-EFAULT);
-
-       return page;
-}
-
 long hugetlb_change_protection(struct vm_area_struct *vma,
                unsigned long address, unsigned long end,
                pgprot_t newprot, unsigned long cp_flags)