for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
                addr = start + i * PMD_SIZE;
                domain = get_domain_name(pmd);
-               if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd))
+               if (pmd_none(*pmd) || pmd_leaf(*pmd) || !pmd_present(*pmd))
                        note_page(st, addr, 4, pmd_val(*pmd), domain);
                else
                        walk_pte(st, pmd, addr, domain);
 
-               if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) {
+               if (SECTION_SIZE < PMD_SIZE && pmd_leaf(pmd[1])) {
                        addr += SECTION_SIZE;
                        pmd++;
                        domain = get_domain_name(pmd);
 
 
        WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
        assert_spin_locked(pmd_lockptr(mm, pmdp));
-       WARN_ON(!(pmd_large(pmd)));
+       WARN_ON(!(pmd_leaf(pmd)));
 #endif
        trace_hugepage_set_pmd(addr, pmd_val(pmd));
        return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
 
 int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
                                unsigned long addr, unsigned long next)
 {
-       int large = pmd_large(*pmdp);
+       int large = pmd_leaf(*pmdp);
 
        if (large)
                vmemmap_verify(pmdp_ptep(pmdp), node, addr, next);
 
                 * enabled so these checks can't be used.
                 */
                if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
-                       VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd)));
+                       VM_WARN_ON(!(pmd_leaf(pmd) || pmd_huge(pmd)));
                return pte_page(pmd_pte(pmd));
        }
        return virt_to_page(pmd_page_vaddr(pmd));
 
                        }
                        pte = boot_pte_alloc();
                        pmd_populate(&init_mm, pmd, pte);
-               } else if (pmd_large(*pmd)) {
+               } else if (pmd_leaf(*pmd)) {
                        continue;
                }
                pgtable_pte_populate(pmd, addr, next, mode);
 
 
 static inline int pmd_bad(pmd_t pmd)
 {
-       if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd))
+       if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_leaf(pmd))
                return 1;
        return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
 }
 
 static inline int pmd_protnone(pmd_t pmd)
 {
-       /* pmd_large(pmd) implies pmd_present(pmd) */
-       return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
+       /* pmd_leaf(pmd) implies pmd_present(pmd) */
+       return pmd_leaf(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
 }
 #endif
 
        unsigned long origin_mask;
 
        origin_mask = _SEGMENT_ENTRY_ORIGIN;
-       if (pmd_large(pmd))
+       if (pmd_leaf(pmd))
                origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
        return (unsigned long)__va(pmd_val(pmd) & origin_mask);
 }
 
        pmd = pmd_offset(pud, vmaddr);
        VM_BUG_ON(pmd_none(*pmd));
        /* Are we allowed to use huge pages? */
-       if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
+       if (pmd_leaf(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
                return -EFAULT;
        /* Link gmap segment table entry location to page table. */
        rc = radix_tree_preload(GFP_KERNEL_ACCOUNT);
                rc = radix_tree_insert(&gmap->host_to_guest,
                                       vmaddr >> PMD_SHIFT, table);
                if (!rc) {
-                       if (pmd_large(*pmd)) {
+                       if (pmd_leaf(*pmd)) {
                                *table = (pmd_val(*pmd) &
                                          _SEGMENT_ENTRY_HARDWARE_BITS_LARGE)
                                        | _SEGMENT_ENTRY_GMAP_UC;
        }
 
        /* 4k page table entries are locked via the pte (pte_alloc_map_lock). */
-       if (!pmd_large(*pmdp))
+       if (!pmd_leaf(*pmdp))
                spin_unlock(&gmap->guest_table_lock);
        return pmdp;
 }
  */
 static inline void gmap_pmd_op_end(struct gmap *gmap, pmd_t *pmdp)
 {
-       if (pmd_large(*pmdp))
+       if (pmd_leaf(*pmdp))
                spin_unlock(&gmap->guest_table_lock);
 }
 
                rc = -EAGAIN;
                pmdp = gmap_pmd_op_walk(gmap, gaddr);
                if (pmdp) {
-                       if (!pmd_large(*pmdp)) {
+                       if (!pmd_leaf(*pmdp)) {
                                rc = gmap_protect_pte(gmap, gaddr, pmdp, prot,
                                                      bits);
                                if (!rc) {
        if (!pmdp)
                return;
 
-       if (pmd_large(*pmdp)) {
+       if (pmd_leaf(*pmdp)) {
                if (gmap_test_and_clear_dirty_pmd(gmap, pmdp, gaddr))
                        bitmap_fill(bitmap, _PAGE_ENTRIES);
        } else {
 
 
 int pmd_huge(pmd_t pmd)
 {
-       return pmd_large(pmd);
+       return pmd_leaf(pmd);
 }
 
 int pud_huge(pud_t pud)
 
                if (pmd_none(*pmdp))
                        return -EINVAL;
                next = pmd_addr_end(addr, end);
-               if (pmd_large(*pmdp)) {
+               if (pmd_leaf(*pmdp)) {
                        need_split  = !!(flags & SET_MEMORY_4K);
                        need_split |= !!(addr & ~PMD_MASK);
                        need_split |= !!(addr + PMD_SIZE > next);
 
                return key ? -EFAULT : 0;
        }
 
-       if (pmd_large(*pmdp)) {
+       if (pmd_leaf(*pmdp)) {
                paddr = pmd_val(*pmdp) & HPAGE_MASK;
                paddr |= addr & ~HPAGE_MASK;
                /*
                return 0;
        }
 
-       if (pmd_large(*pmdp)) {
+       if (pmd_leaf(*pmdp)) {
                paddr = pmd_val(*pmdp) & HPAGE_MASK;
                paddr |= addr & ~HPAGE_MASK;
                cc = page_reset_referenced(paddr);
                return 0;
        }
 
-       if (pmd_large(*pmdp)) {
+       if (pmd_leaf(*pmdp)) {
                paddr = pmd_val(*pmdp) & HPAGE_MASK;
                paddr |= addr & ~HPAGE_MASK;
                *key = page_get_storage_key(paddr);
 
                if (!add) {
                        if (pmd_none(*pmd))
                                continue;
-                       if (pmd_large(*pmd)) {
+                       if (pmd_leaf(*pmd)) {
                                if (IS_ALIGNED(addr, PMD_SIZE) &&
                                    IS_ALIGNED(next, PMD_SIZE)) {
                                        if (!direct)
                        if (!pte)
                                goto out;
                        pmd_populate(&init_mm, pmd, pte);
-               } else if (pmd_large(*pmd)) {
+               } else if (pmd_leaf(*pmd)) {
                        if (!direct)
                                vmemmap_use_sub_pmd(addr, next);
                        continue;
                if (!pte)
                        goto out;
                pmd_populate(&init_mm, pmd, pte);
-       } else if (WARN_ON_ONCE(pmd_large(*pmd))) {
+       } else if (WARN_ON_ONCE(pmd_leaf(*pmd))) {
                goto out;
        }
        ptep = pte_offset_kernel(pmd, addr);
 
        if (pmd_none(*pmd))
                return false;
 
-       if (pmd_large(*pmd))
+       if (pmd_leaf(*pmd))
                return pfn_valid(pmd_pfn(*pmd));
 
        pte = pte_offset_kernel(pmd, addr);
        struct mm_struct *mm;
        pmd_t entry = *pmd;
 
-       if (!pmd_large(entry) || !pmd_young(entry))
+       if (!pmd_leaf(entry) || !pmd_young(entry))
                return;
 
        pte = pmd_val(entry);
 
        pudp = pud_offset(p4dp, address);
        pmdp = pmd_offset(pudp, address);
 
-       if (pmd_large(*pmdp))
+       if (pmd_leaf(*pmdp))
                ptep = split_large_pmd(info, pmdp, address);
        else
                ptep = pte_offset_kernel(pmdp, address);
 
        if (pmd_none(pmd) || !pmd_present(pmd))
                goto out;
 
-       if (pmd_large(pmd))
+       if (pmd_leaf(pmd))
                level = PG_LEVEL_2M;
 
 out:
 
        if (!pmd_k)
                return -1;
 
-       if (pmd_large(*pmd_k))
+       if (pmd_leaf(*pmd_k))
                return 0;
 
        pte_k = pte_offset_kernel(pmd_k, address);
         * And let's rather not kmap-atomic the pte, just in case
         * it's allocated already:
         */
-       if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
+       if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_leaf(*pmd))
                goto out;
 
        pte = pte_offset_kernel(pmd, address);
                goto bad;
 
        pr_cont("PMD %lx ", pmd_val(*pmd));
-       if (!pmd_present(*pmd) || pmd_large(*pmd))
+       if (!pmd_present(*pmd) || pmd_leaf(*pmd))
                goto out;
 
        pte = pte_offset_kernel(pmd, address);
        if (!pmd_present(*pmd))
                return 0;
 
-       if (pmd_large(*pmd))
+       if (pmd_leaf(*pmd))
                return spurious_kernel_fault_check(error_code, (pte_t *) pmd);
 
        pte = pte_offset_kernel(pmd, address);
 
                        break;
 
                /* should not be large page here */
-               if (pmd_large(*pmd)) {
+               if (pmd_leaf(*pmd)) {
                        pr_warn("try to clear pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx, but pmd is big page and is not using pte !\n",
                                pfn, pmd, __pa(pmd));
                        BUG_ON(1);
 
                }
 
                if (!pmd_none(*pmd)) {
-                       if (!pmd_large(*pmd)) {
+                       if (!pmd_leaf(*pmd)) {
                                spin_lock(&init_mm.page_table_lock);
                                pte = (pte_t *)pmd_page_vaddr(*pmd);
                                paddr_last = phys_pte_init(pte, paddr,
                if (!pmd_present(*pmd))
                        continue;
 
-               if (pmd_large(*pmd)) {
+               if (pmd_leaf(*pmd)) {
                        if (IS_ALIGNED(addr, PMD_SIZE) &&
                            IS_ALIGNED(next, PMD_SIZE)) {
                                if (!direct)
 int __meminit vmemmap_check_pmd(pmd_t *pmd, int node,
                                unsigned long addr, unsigned long next)
 {
-       int large = pmd_large(*pmd);
+       int large = pmd_leaf(*pmd);
 
-       if (pmd_large(*pmd)) {
+       if (pmd_leaf(*pmd)) {
                vmemmap_verify((pte_t *)pmd, node, addr, next);
                vmemmap_use_sub_pmd(addr, next);
        }
 
        pmd = pmd_offset(pud, addr);
        do {
                next = pmd_addr_end(addr, end);
-               if (!pmd_large(*pmd))
+               if (!pmd_leaf(*pmd))
                        kasan_populate_pmd(pmd, addr, next, nid);
        } while (pmd++, addr = next, addr != end);
 }
 
                return;
 
        pmd = pmd_offset(pud, ppd->vaddr);
-       if (pmd_large(*pmd))
+       if (pmd_leaf(*pmd))
                return;
 
        set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags));
                set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte)));
        }
 
-       if (pmd_large(*pmd))
+       if (pmd_leaf(*pmd))
                return;
 
        pte = pte_offset_kernel(pmd, ppd->vaddr);
 
                return NULL;
 
        *level = PG_LEVEL_2M;
-       if (pmd_large(*pmd) || !pmd_present(*pmd))
+       if (pmd_leaf(*pmd) || !pmd_present(*pmd))
                return (pte_t *)pmd;
 
        *level = PG_LEVEL_4K;
         * Try to unmap in 2M chunks.
         */
        while (end - start >= PMD_SIZE) {
-               if (pmd_large(*pmd))
+               if (pmd_leaf(*pmd))
                        pmd_clear(pmd);
                else
                        __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE);
 
  */
 int pmd_clear_huge(pmd_t *pmd)
 {
-       if (pmd_large(*pmd)) {
+       if (pmd_leaf(*pmd)) {
                pmd_clear(pmd);
                return 1;
        }
 
                return NULL;
 
        /* We can't do anything sensible if we hit a large mapping. */
-       if (pmd_large(*pmd)) {
+       if (pmd_leaf(*pmd)) {
                WARN_ON(1);
                return NULL;
        }
                        continue;
                }
 
-               if (pmd_large(*pmd) || level == PTI_CLONE_PMD) {
+               if (pmd_leaf(*pmd) || level == PTI_CLONE_PMD) {
                        target_pmd = pti_user_pagetable_walk_pmd(addr);
                        if (WARN_ON(!target_pmd))
                                return;
 
                goto out;
        }
        pmd = pmd_offset(pud, relocated_restore_code);
-       if (pmd_large(*pmd)) {
+       if (pmd_leaf(*pmd)) {
                set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
                goto out;
        }
 
        pte_t *pte_tbl;
        int i;
 
-       if (pmd_large(*pmd)) {
+       if (pmd_leaf(*pmd)) {
                pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK;
                xen_free_ro_pages(pa, PMD_SIZE);
                return;
        if (!pmd_present(pmd))
                return 0;
        pa = pmd_val(pmd) & PTE_PFN_MASK;
-       if (pmd_large(pmd))
+       if (pmd_leaf(pmd))
                return pa + (vaddr & ~PMD_MASK);
 
        pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) *
 
        if (unlikely(pmd_none(*pmdp)))
                goto err;
 #ifdef CONFIG_X86_64
-       if (unlikely(pmd_large(*pmdp)))
+       if (unlikely(pmd_leaf(*pmdp)))
                pte = ptep_get((pte_t *)pmdp);
        else
 #endif