config HAVE_ARCH_TRANSPARENT_HUGEPAGE
        bool
 
+config HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+       bool
+
 config HAVE_ARCH_HUGE_VMAP
        bool
 
 
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+       select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64
        select HAVE_ARCH_VMAP_STACK             if X86_64
        select HAVE_ARCH_WITHIN_STACK_FRAMES
        select HAVE_CC_STACKPROTECTOR
 
                            native_pmd_val(pmd));
 }
 
+static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
+                             pud_t *pudp, pud_t pud)
+{
+       if (sizeof(pudval_t) > sizeof(long))
+               /* 5 arg words */
+               pv_mmu_ops.set_pud_at(mm, addr, pudp, pud);
+       else
+               PVOP_VCALL4(pv_mmu_ops.set_pud_at, mm, addr, pudp,
+                           native_pud_val(pud));
+}
+
 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
        pmdval_t val = native_pmd_val(pmd);
 
        void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
        void (*set_pmd_at)(struct mm_struct *mm, unsigned long addr,
                           pmd_t *pmdp, pmd_t pmdval);
+       void (*set_pud_at)(struct mm_struct *mm, unsigned long addr,
+                          pud_t *pudp, pud_t pudval);
        void (*pte_update)(struct mm_struct *mm, unsigned long addr,
                           pte_t *ptep);
 
 
        *pmdp = pmd;
 }
 
+static inline void native_set_pud(pud_t *pudp, pud_t pud)
+{
+}
+
 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
 {
        native_set_pte(ptep, pte);
        native_set_pmd(pmdp, __pmd(0));
 }
 
+static inline void native_pud_clear(pud_t *pudp)
+{
+}
+
 static inline void native_pte_clear(struct mm_struct *mm,
                                    unsigned long addr, pte_t *xp)
 {
 #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
 #endif
 
+#ifdef CONFIG_SMP
+static inline pud_t native_pudp_get_and_clear(pud_t *xp)
+{
+       return __pud(xchg((pudval_t *)xp, 0));
+}
+#else
+#define native_pudp_get_and_clear(xp) native_local_pudp_get_and_clear(xp)
+#endif
+
 /* Bit manipulation helper on pte/pgoff entry */
 static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshift,
                                      unsigned long mask, unsigned int leftshift)
 
        *(tmp + 1) = 0;
 }
 
+#ifndef CONFIG_SMP
+static inline void native_pud_clear(pud_t *pudp)
+{
+}
+#endif
+
 static inline void pud_clear(pud_t *pudp)
 {
        set_pud(pudp, __pud(0));
 #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
 #endif
 
+#ifdef CONFIG_SMP
+union split_pud {
+       struct {
+               u32 pud_low;
+               u32 pud_high;
+       };
+       pud_t pud;
+};
+
+static inline pud_t native_pudp_get_and_clear(pud_t *pudp)
+{
+       union split_pud res, *orig = (union split_pud *)pudp;
+
+       /* xchg acts as a barrier before setting of the high bits */
+       res.pud_low = xchg(&orig->pud_low, 0);
+       res.pud_high = orig->pud_high;
+       orig->pud_high = 0;
+
+       return res.pud;
+}
+#else
+#define native_pudp_get_and_clear(xp) native_local_pudp_get_and_clear(xp)
+#endif
+
 /* Encode and de-code a swap entry */
 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
 #define __swp_type(x)                  (((x).val) & 0x1f)
 
 #define set_pte(ptep, pte)             native_set_pte(ptep, pte)
 #define set_pte_at(mm, addr, ptep, pte)        native_set_pte_at(mm, addr, ptep, pte)
 #define set_pmd_at(mm, addr, pmdp, pmd)        native_set_pmd_at(mm, addr, pmdp, pmd)
+#define set_pud_at(mm, addr, pudp, pud)        native_set_pud_at(mm, addr, pudp, pud)
 
 #define set_pte_atomic(ptep, pte)                                      \
        native_set_pte_atomic(ptep, pte)
        return pmd_flags(pmd) & _PAGE_ACCESSED;
 }
 
+static inline int pud_dirty(pud_t pud)
+{
+       return pud_flags(pud) & _PAGE_DIRTY;
+}
+
+static inline int pud_young(pud_t pud)
+{
+       return pud_flags(pud) & _PAGE_ACCESSED;
+}
+
 static inline int pte_write(pte_t pte)
 {
        return pte_flags(pte) & _PAGE_RW;
        return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
 }
 
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+static inline int pud_trans_huge(pud_t pud)
+{
+       return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
+}
+#endif
+
 #define has_transparent_hugepage has_transparent_hugepage
 static inline int has_transparent_hugepage(void)
 {
 {
        return !!(pmd_val(pmd) & _PAGE_DEVMAP);
 }
+
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+static inline int pud_devmap(pud_t pud)
+{
+       return !!(pud_val(pud) & _PAGE_DEVMAP);
+}
+#else
+static inline int pud_devmap(pud_t pud)
+{
+       return 0;
+}
+#endif
 #endif
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
        return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
 }
 
+static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
+{
+       pudval_t v = native_pud_val(pud);
+
+       return __pud(v | set);
+}
+
+static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
+{
+       pudval_t v = native_pud_val(pud);
+
+       return __pud(v & ~clear);
+}
+
+static inline pud_t pud_mkold(pud_t pud)
+{
+       return pud_clear_flags(pud, _PAGE_ACCESSED);
+}
+
+static inline pud_t pud_mkclean(pud_t pud)
+{
+       return pud_clear_flags(pud, _PAGE_DIRTY);
+}
+
+static inline pud_t pud_wrprotect(pud_t pud)
+{
+       return pud_clear_flags(pud, _PAGE_RW);
+}
+
+static inline pud_t pud_mkdirty(pud_t pud)
+{
+       return pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
+}
+
+static inline pud_t pud_mkdevmap(pud_t pud)
+{
+       return pud_set_flags(pud, _PAGE_DEVMAP);
+}
+
+static inline pud_t pud_mkhuge(pud_t pud)
+{
+       return pud_set_flags(pud, _PAGE_PSE);
+}
+
+static inline pud_t pud_mkyoung(pud_t pud)
+{
+       return pud_set_flags(pud, _PAGE_ACCESSED);
+}
+
+static inline pud_t pud_mkwrite(pud_t pud)
+{
+       return pud_set_flags(pud, _PAGE_RW);
+}
+
+static inline pud_t pud_mknotpresent(pud_t pud)
+{
+       return pud_clear_flags(pud, _PAGE_PRESENT | _PAGE_PROTNONE);
+}
+
 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
 static inline int pte_soft_dirty(pte_t pte)
 {
        return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
 }
 
+static inline int pud_soft_dirty(pud_t pud)
+{
+       return pud_flags(pud) & _PAGE_SOFT_DIRTY;
+}
+
 static inline pte_t pte_mksoft_dirty(pte_t pte)
 {
        return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
        return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
 }
 
+static inline pud_t pud_mksoft_dirty(pud_t pud)
+{
+       return pud_set_flags(pud, _PAGE_SOFT_DIRTY);
+}
+
 static inline pte_t pte_clear_soft_dirty(pte_t pte)
 {
        return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
        return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
 }
 
+static inline pud_t pud_clear_soft_dirty(pud_t pud)
+{
+       return pud_clear_flags(pud, _PAGE_SOFT_DIRTY);
+}
+
 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
 
 /*
                     massage_pgprot(pgprot));
 }
 
+static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
+{
+       return __pud(((phys_addr_t)page_nr << PAGE_SHIFT) |
+                    massage_pgprot(pgprot));
+}
+
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
        pteval_t val = pte_val(pte);
        return res;
 }
 
+static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp)
+{
+       pud_t res = *pudp;
+
+       native_pud_clear(pudp);
+       return res;
+}
+
 static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
                                     pte_t *ptep , pte_t pte)
 {
        native_set_pmd(pmdp, pmd);
 }
 
+static inline void native_set_pud_at(struct mm_struct *mm, unsigned long addr,
+                                    pud_t *pudp, pud_t pud)
+{
+       native_set_pud(pudp, pud);
+}
+
 #ifndef CONFIG_PARAVIRT
 /*
  * Rules for using pte_update - it must be called after any PTE update which
 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
                                 unsigned long address, pmd_t *pmdp,
                                 pmd_t entry, int dirty);
+extern int pudp_set_access_flags(struct vm_area_struct *vma,
+                                unsigned long address, pud_t *pudp,
+                                pud_t entry, int dirty);
 
 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
 extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
                                     unsigned long addr, pmd_t *pmdp);
+extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
+                                    unsigned long addr, pud_t *pudp);
 
 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
 extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
        return native_pmdp_get_and_clear(pmdp);
 }
 
+#define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
+static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
+                                       unsigned long addr, pud_t *pudp)
+{
+       return native_pudp_get_and_clear(pudp);
+}
+
 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
                                      unsigned long addr, pmd_t *pmdp)
                unsigned long addr, pmd_t *pmd)
 {
 }
+static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
+               unsigned long addr, pud_t *pud)
+{
+}
 
 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
 static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
 
        native_set_pud(pud, native_make_pud(0));
 }
 
+static inline pud_t native_pudp_get_and_clear(pud_t *xp)
+{
+#ifdef CONFIG_SMP
+       return native_make_pud(xchg(&xp->pud, 0));
+#else
+       /* native_local_pudp_get_and_clear,
+        * but duplicated because of cyclic dependency
+        */
+       pud_t ret = *xp;
+
+       native_pud_clear(xp);
+       return ret;
+#endif
+}
+
 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
 {
        *pgdp = pgd;
 
        .pmd_clear = native_pmd_clear,
 #endif
        .set_pud = native_set_pud,
+       .set_pud_at = native_set_pud_at,
 
        .pmd_val = PTE_IDENT,
        .make_pmd = PTE_IDENT,
 
 
        return changed;
 }
+
+int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
+                         pud_t *pudp, pud_t entry, int dirty)
+{
+       int changed = !pud_same(*pudp, entry);
+
+       VM_BUG_ON(address & ~HPAGE_PUD_MASK);
+
+       if (changed && dirty) {
+               *pudp = entry;
+               /*
+                * We had a write-protection fault here and changed the pud
+                * to to more permissive. No need to flush the TLB for that,
+                * #PF is architecturally guaranteed to do that and in the
+                * worst-case we'll generate a spurious fault.
+                */
+       }
+
+       return changed;
+}
 #endif
 
 int ptep_test_and_clear_young(struct vm_area_struct *vma,
 
        return ret;
 }
+int pudp_test_and_clear_young(struct vm_area_struct *vma,
+                             unsigned long addr, pud_t *pudp)
+{
+       int ret = 0;
+
+       if (pud_young(*pudp))
+               ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
+                                        (unsigned long *)pudp);
+
+       return ret;
+}
 #endif
 
 int ptep_clear_flush_young(struct vm_area_struct *vma,
 
 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
                                 unsigned long address, pmd_t *pmdp,
                                 pmd_t entry, int dirty);
+extern int pudp_set_access_flags(struct vm_area_struct *vma,
+                                unsigned long address, pud_t *pudp,
+                                pud_t entry, int dirty);
 #else
 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
                                        unsigned long address, pmd_t *pmdp,
        BUILD_BUG();
        return 0;
 }
+static inline int pudp_set_access_flags(struct vm_area_struct *vma,
+                                       unsigned long address, pud_t *pudp,
+                                       pud_t entry, int dirty)
+{
+       BUILD_BUG();
+       return 0;
+}
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif
 
 }
 #endif
 
-#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
                                            unsigned long address,
                                            pmd_t *pmdp)
        pmd_clear(pmdp);
        return pmd;
 }
+#endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */
+#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
+static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
+                                           unsigned long address,
+                                           pud_t *pudp)
+{
+       pud_t pud = *pudp;
+
+       pud_clear(pudp);
+       return pud;
+}
+#endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-#endif
 
-#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
 static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm,
                                            unsigned long address, pmd_t *pmdp,
                                            int full)
 {
        return pmdp_huge_get_and_clear(mm, address, pmdp);
 }
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif
 
+#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL
+static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm,
+                                           unsigned long address, pud_t *pudp,
+                                           int full)
+{
+       return pudp_huge_get_and_clear(mm, address, pudp);
+}
+#endif
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
                                            unsigned long address, pte_t *ptep,
 extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
                              unsigned long address,
                              pmd_t *pmdp);
+extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
+                             unsigned long address,
+                             pud_t *pudp);
 #endif
 
 #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif
+#ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+static inline void pudp_set_wrprotect(struct mm_struct *mm,
+                                     unsigned long address, pud_t *pudp)
+{
+       pud_t old_pud = *pudp;
+
+       set_pud_at(mm, address, pudp, pud_wrprotect(old_pud));
+}
+#else
+static inline void pudp_set_wrprotect(struct mm_struct *mm,
+                                     unsigned long address, pud_t *pudp)
+{
+       BUILD_BUG();
+}
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+#endif
 
 #ifndef pmdp_collapse_flush
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 {
        return pmd_val(pmd_a) == pmd_val(pmd_b);
 }
+
+static inline int pud_same(pud_t pud_a, pud_t pud_b)
+{
+       return pud_val(pud_a) == pud_val(pud_b);
+}
 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
 static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
 {
        BUILD_BUG();
        return 0;
 }
+
+static inline int pud_same(pud_t pud_a, pud_t pud_b)
+{
+       BUILD_BUG();
+       return 0;
+}
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif
 
 #endif /* __HAVE_ARCH_PMD_WRITE */
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
+#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
+       (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
+        !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
+static inline int pud_trans_huge(pud_t pud)
+{
+       return 0;
+}
+#endif
+
 #ifndef pmd_read_atomic
 static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
 {
  * e.g. see arch/arc: flush_pmd_tlb_range
  */
 #define flush_pmd_tlb_range(vma, addr, end)    flush_tlb_range(vma, addr, end)
+#define flush_pud_tlb_range(vma, addr, end)    flush_tlb_range(vma, addr, end)
 #else
 #define flush_pmd_tlb_range(vma, addr, end)    BUILD_BUG()
+#define flush_pud_tlb_range(vma, addr, end)    BUILD_BUG()
 #endif
 #endif
 
 
                __tlb_remove_pmd_tlb_entry(tlb, pmdp, address);         \
        } while (0)
 
+/**
+ * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
+ * invalidation. This is a nop so far, because only x86 needs it.
+ */
+#ifndef __tlb_remove_pud_tlb_entry
+#define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
+#endif
+
+#define tlb_remove_pud_tlb_entry(tlb, pudp, address)                   \
+       do {                                                            \
+               __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE);       \
+               __tlb_remove_pud_tlb_entry(tlb, pudp, address);         \
+       } while (0)
+
 /*
  * For things like page tables caches (ie caching addresses "inside" the
  * page tables, like x86 does), for legacy reasons, flushing an
 
                         pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
                         struct vm_area_struct *vma);
 extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
+extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+                        pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
+                        struct vm_area_struct *vma);
+
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
+#else
+static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
+{
+}
+#endif
+
 extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
                                          unsigned long addr,
 extern int zap_huge_pmd(struct mmu_gather *tlb,
                        struct vm_area_struct *vma,
                        pmd_t *pmd, unsigned long addr);
+extern int zap_huge_pud(struct mmu_gather *tlb,
+                       struct vm_area_struct *vma,
+                       pud_t *pud, unsigned long addr);
 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                        unsigned long addr, unsigned long end,
                        unsigned char *vec);
 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                        unsigned long addr, pgprot_t newprot,
                        int prot_numa);
-int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
-                       pfn_t pfn, bool write);
+int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
+                       pmd_t *pmd, pfn_t pfn, bool write);
+int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
+                       pud_t *pud, pfn_t pfn, bool write);
 enum transparent_hugepage_flag {
        TRANSPARENT_HUGEPAGE_FLAG,
        TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
-               pmd_t *pmd, int flags);
-
 #define HPAGE_PMD_SHIFT PMD_SHIFT
 #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
 #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
 
+#define HPAGE_PUD_SHIFT PUD_SHIFT
+#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
+#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
+
 extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
 
 #define transparent_hugepage_enabled(__vma)                            \
 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
                bool freeze, struct page *page);
 
+void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
+               unsigned long address);
+
+#define split_huge_pud(__vma, __pud, __address)                                \
+       do {                                                            \
+               pud_t *____pud = (__pud);                               \
+               if (pud_trans_huge(*____pud)                            \
+                                       || pud_devmap(*____pud))        \
+                       __split_huge_pud(__vma, __pud, __address);      \
+       }  while (0)
+
 extern int hugepage_madvise(struct vm_area_struct *vma,
                            unsigned long *vm_flags, int advice);
 extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
                                    long adjust_next);
 extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
                struct vm_area_struct *vma);
+extern spinlock_t *__pud_trans_huge_lock(pud_t *pud,
+               struct vm_area_struct *vma);
 /* mmap_sem must be held on entry */
 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
                struct vm_area_struct *vma)
        else
                return NULL;
 }
+static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
+               struct vm_area_struct *vma)
+{
+       VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
+       if (pud_trans_huge(*pud) || pud_devmap(*pud))
+               return __pud_trans_huge_lock(pud, vma);
+       else
+               return NULL;
+}
 static inline int hpage_nr_pages(struct page *page)
 {
        if (unlikely(PageTransHuge(page)))
        return 1;
 }
 
+struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
+               pmd_t *pmd, int flags);
+struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
+               pud_t *pud, int flags);
+
 extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
 
 extern struct page *huge_zero_page;
        return is_huge_zero_page(pmd_page(pmd));
 }
 
+static inline bool is_huge_zero_pud(pud_t pud)
+{
+       return false;
+}
+
 struct page *mm_get_huge_zero_page(struct mm_struct *mm);
 void mm_put_huge_zero_page(struct mm_struct *mm);
 
 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
 
+#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
+#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
+#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
+
 #define hpage_nr_pages(x) 1
 
 #define transparent_hugepage_enabled(__vma) 0
 static inline void split_huge_pmd_address(struct vm_area_struct *vma,
                unsigned long address, bool freeze, struct page *page) {}
 
+#define split_huge_pud(__vma, __pmd, __address)        \
+       do { } while (0)
+
 static inline int hugepage_madvise(struct vm_area_struct *vma,
                                   unsigned long *vm_flags, int advice)
 {
 {
        return NULL;
 }
+static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
+               struct vm_area_struct *vma)
+{
+       return NULL;
+}
 
 static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd)
 {
        return false;
 }
 
+static inline bool is_huge_zero_pud(pud_t pud)
+{
+       return false;
+}
+
 static inline void mm_put_huge_zero_page(struct mm_struct *mm)
 {
        return;
 {
        return NULL;
 }
+
+static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
+               unsigned long addr, pud_t *pud, int flags)
+{
+       return NULL;
+}
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
 #endif /* _LINUX_HUGE_MM_H */
 
 {
        return 0;
 }
+static inline int pud_devmap(pud_t pud)
+{
+       return 0;
+}
 #endif
 
 /*
 
 /**
  * mm_walk - callbacks for walk_page_range
+ * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
+ *            this handler should only handle pud_trans_huge() puds.
+ *            the pmd_entry or pte_entry callbacks will be used for
+ *            regular PUDs.
  * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
  *            this handler is required to be able to handle
  *            pmd_trans_huge() pmds.  They may simply choose to
  * (see the comment on walk_page_range() for more details)
  */
 struct mm_walk {
+       int (*pud_entry)(pud_t *pud, unsigned long addr,
+                        unsigned long next, struct mm_walk *walk);
        int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
                         unsigned long next, struct mm_walk *walk);
        int (*pte_entry)(pte_t *pte, unsigned long addr,
        return ptl;
 }
 
-extern void __init pagecache_init(void);
+/*
+ * No scalability reason to split PUD locks yet, but follow the same pattern
+ * as the PMD locks to make it easier if we decide to.  The VM should not be
+ * considered ready to switch to split PUD locks yet; there may be places
+ * which need to be converted from page_table_lock.
+ */
+static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
+{
+       return &mm->page_table_lock;
+}
+
+static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
+{
+       spinlock_t *ptl = pud_lockptr(mm, pud);
+
+       spin_lock(ptl);
+       return ptl;
+}
 
+extern void __init pagecache_init(void);
 extern void free_area_init(unsigned long * zones_size);
 extern void free_area_init_node(int nid, unsigned long * zones_size,
                unsigned long zone_start_pfn, unsigned long *zholes_size);
 
        ___pmd;                                                         \
 })
 
+#define pudp_huge_clear_flush_notify(__vma, __haddr, __pud)            \
+({                                                                     \
+       unsigned long ___haddr = __haddr & HPAGE_PUD_MASK;              \
+       struct mm_struct *___mm = (__vma)->vm_mm;                       \
+       pud_t ___pud;                                                   \
+                                                                       \
+       ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud);          \
+       mmu_notifier_invalidate_range(___mm, ___haddr,                  \
+                                     ___haddr + HPAGE_PUD_SIZE);       \
+                                                                       \
+       ___pud;                                                         \
+})
+
 #define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd)           \
 ({                                                                     \
        unsigned long ___haddr = __haddr & HPAGE_PMD_MASK;              \
 #define pmdp_clear_young_notify pmdp_test_and_clear_young
 #define        ptep_clear_flush_notify ptep_clear_flush
 #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
+#define pudp_huge_clear_flush_notify pudp_huge_clear_flush
 #define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear
 #define set_pte_at_notify set_pte_at
 
 
 {
        return pfn_pmd(pfn_t_to_pfn(pfn), pgprot);
 }
+
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+static inline pud_t pfn_t_pud(pfn_t pfn, pgprot_t pgprot)
+{
+       return pfn_pud(pfn_t_to_pfn(pfn), pgprot);
+}
+#endif
 #endif
 
 #ifdef __HAVE_ARCH_PTE_DEVMAP
 }
 pte_t pte_mkdevmap(pte_t pte);
 pmd_t pmd_mkdevmap(pmd_t pmd);
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
+       defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
+pud_t pud_mkdevmap(pud_t pud);
 #endif
+#endif /* __HAVE_ARCH_PTE_DEVMAP */
+
 #endif /* _LINUX_PFN_T_H_ */
 
                        return page;
                return no_page_table(vma, flags);
        }
+       if (pud_devmap(*pud)) {
+               ptl = pud_lock(mm, pud);
+               page = follow_devmap_pud(vma, address, pud, flags);
+               spin_unlock(ptl);
+               if (page)
+                       return page;
+       }
        if (unlikely(pud_bad(*pud)))
                return no_page_table(vma, flags);
 
 
 }
 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
 
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
+{
+       if (likely(vma->vm_flags & VM_WRITE))
+               pud = pud_mkwrite(pud);
+       return pud;
+}
+
+static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
+               pud_t *pud, pfn_t pfn, pgprot_t prot, bool write)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       pud_t entry;
+       spinlock_t *ptl;
+
+       ptl = pud_lock(mm, pud);
+       entry = pud_mkhuge(pfn_t_pud(pfn, prot));
+       if (pfn_t_devmap(pfn))
+               entry = pud_mkdevmap(entry);
+       if (write) {
+               entry = pud_mkyoung(pud_mkdirty(entry));
+               entry = maybe_pud_mkwrite(entry, vma);
+       }
+       set_pud_at(mm, addr, pud, entry);
+       update_mmu_cache_pud(vma, addr, pud);
+       spin_unlock(ptl);
+}
+
+int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
+                       pud_t *pud, pfn_t pfn, bool write)
+{
+       pgprot_t pgprot = vma->vm_page_prot;
+       /*
+        * If we had pud_special, we could avoid all these restrictions,
+        * but we need to be consistent with PTEs and architectures that
+        * can't support a 'special' bit.
+        */
+       BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
+       BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
+                                               (VM_PFNMAP|VM_MIXEDMAP));
+       BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
+       BUG_ON(!pfn_t_devmap(pfn));
+
+       if (addr < vma->vm_start || addr >= vma->vm_end)
+               return VM_FAULT_SIGBUS;
+
+       track_pfn_insert(vma, &pgprot, pfn);
+
+       insert_pfn_pud(vma, addr, pud, pfn, pgprot, write);
+       return VM_FAULT_NOPAGE;
+}
+EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+
 static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
                pmd_t *pmd)
 {
        return ret;
 }
 
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
+               pud_t *pud)
+{
+       pud_t _pud;
+
+       /*
+        * We should set the dirty bit only for FOLL_WRITE but for now
+        * the dirty bit in the pud is meaningless.  And if the dirty
+        * bit will become meaningful and we'll only set it with
+        * FOLL_WRITE, an atomic set_bit will be required on the pud to
+        * set the young bit, instead of the current set_pud_at.
+        */
+       _pud = pud_mkyoung(pud_mkdirty(*pud));
+       if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
+                               pud, _pud,  1))
+               update_mmu_cache_pud(vma, addr, pud);
+}
+
+struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
+               pud_t *pud, int flags)
+{
+       unsigned long pfn = pud_pfn(*pud);
+       struct mm_struct *mm = vma->vm_mm;
+       struct dev_pagemap *pgmap;
+       struct page *page;
+
+       assert_spin_locked(pud_lockptr(mm, pud));
+
+       if (flags & FOLL_WRITE && !pud_write(*pud))
+               return NULL;
+
+       if (pud_present(*pud) && pud_devmap(*pud))
+               /* pass */;
+       else
+               return NULL;
+
+       if (flags & FOLL_TOUCH)
+               touch_pud(vma, addr, pud);
+
+       /*
+        * device mapped pages can only be returned if the
+        * caller will manage the page reference count.
+        */
+       if (!(flags & FOLL_GET))
+               return ERR_PTR(-EEXIST);
+
+       pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
+       pgmap = get_dev_pagemap(pfn, NULL);
+       if (!pgmap)
+               return ERR_PTR(-EFAULT);
+       page = pfn_to_page(pfn);
+       get_page(page);
+       put_dev_pagemap(pgmap);
+
+       return page;
+}
+
+int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+                 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
+                 struct vm_area_struct *vma)
+{
+       spinlock_t *dst_ptl, *src_ptl;
+       pud_t pud;
+       int ret;
+
+       dst_ptl = pud_lock(dst_mm, dst_pud);
+       src_ptl = pud_lockptr(src_mm, src_pud);
+       spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
+
+       ret = -EAGAIN;
+       pud = *src_pud;
+       if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
+               goto out_unlock;
+
+       /*
+        * When page table lock is held, the huge zero pud should not be
+        * under splitting since we don't split the page itself, only pud to
+        * a page table.
+        */
+       if (is_huge_zero_pud(pud)) {
+               /* No huge zero pud yet */
+       }
+
+       pudp_set_wrprotect(src_mm, addr, src_pud);
+       pud = pud_mkold(pud_wrprotect(pud));
+       set_pud_at(dst_mm, addr, dst_pud, pud);
+
+       ret = 0;
+out_unlock:
+       spin_unlock(src_ptl);
+       spin_unlock(dst_ptl);
+       return ret;
+}
+
+void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
+{
+       pud_t entry;
+       unsigned long haddr;
+       bool write = vmf->flags & FAULT_FLAG_WRITE;
+
+       vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
+       if (unlikely(!pud_same(*vmf->pud, orig_pud)))
+               goto unlock;
+
+       entry = pud_mkyoung(orig_pud);
+       if (write)
+               entry = pud_mkdirty(entry);
+       haddr = vmf->address & HPAGE_PUD_MASK;
+       if (pudp_set_access_flags(vmf->vma, haddr, vmf->pud, entry, write))
+               update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud);
+
+unlock:
+       spin_unlock(vmf->ptl);
+}
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+
 void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd)
 {
        pmd_t entry;
        return NULL;
 }
 
+/*
+ * Returns true if a given pud maps a thp, false otherwise.
+ *
+ * Note that if it returns true, this routine returns without unlocking page
+ * table lock. So callers must unlock it.
+ */
+spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
+{
+       spinlock_t *ptl;
+
+       ptl = pud_lock(vma->vm_mm, pud);
+       if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
+               return ptl;
+       spin_unlock(ptl);
+       return NULL;
+}
+
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
+                pud_t *pud, unsigned long addr)
+{
+       pud_t orig_pud;
+       spinlock_t *ptl;
+
+       ptl = __pud_trans_huge_lock(pud, vma);
+       if (!ptl)
+               return 0;
+       /*
+        * For architectures like ppc64 we look at deposited pgtable
+        * when calling pudp_huge_get_and_clear. So do the
+        * pgtable_trans_huge_withdraw after finishing pudp related
+        * operations.
+        */
+       orig_pud = pudp_huge_get_and_clear_full(tlb->mm, addr, pud,
+                       tlb->fullmm);
+       tlb_remove_pud_tlb_entry(tlb, pud, addr);
+       if (vma_is_dax(vma)) {
+               spin_unlock(ptl);
+               /* No zero page support yet */
+       } else {
+               /* No support for anonymous PUD pages yet */
+               BUG();
+       }
+       return 1;
+}
+
+static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
+               unsigned long haddr)
+{
+       VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
+       VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
+       VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
+       VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
+
+       count_vm_event(THP_SPLIT_PMD);
+
+       pudp_huge_clear_flush_notify(vma, haddr, pud);
+}
+
+void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
+               unsigned long address)
+{
+       spinlock_t *ptl;
+       struct mm_struct *mm = vma->vm_mm;
+       unsigned long haddr = address & HPAGE_PUD_MASK;
+
+       mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PUD_SIZE);
+       ptl = pud_lock(mm, pud);
+       if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
+               goto out;
+       __split_huge_pud_locked(vma, pud, haddr);
+
+out:
+       spin_unlock(ptl);
+       mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PUD_SIZE);
+}
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+
 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
                unsigned long haddr, pmd_t *pmd)
 {
 
                next = pmd_addr_end(addr, end);
                if (pmd_trans_huge(*src_pmd) || pmd_devmap(*src_pmd)) {
                        int err;
-                       VM_BUG_ON(next-addr != HPAGE_PMD_SIZE);
+                       VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, vma);
                        err = copy_huge_pmd(dst_mm, src_mm,
                                            dst_pmd, src_pmd, addr, vma);
                        if (err == -ENOMEM)
        src_pud = pud_offset(src_pgd, addr);
        do {
                next = pud_addr_end(addr, end);
+               if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
+                       int err;
+
+                       VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, vma);
+                       err = copy_huge_pud(dst_mm, src_mm,
+                                           dst_pud, src_pud, addr, vma);
+                       if (err == -ENOMEM)
+                               return -ENOMEM;
+                       if (!err)
+                               continue;
+                       /* fall through */
+               }
                if (pud_none_or_clear_bad(src_pud))
                        continue;
                if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
        pud = pud_offset(pgd, addr);
        do {
                next = pud_addr_end(addr, end);
+               if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
+                       if (next - addr != HPAGE_PUD_SIZE) {
+                               VM_BUG_ON_VMA(!rwsem_is_locked(&tlb->mm->mmap_sem), vma);
+                               split_huge_pud(vma, pud, addr);
+                       } else if (zap_huge_pud(tlb, vma, pud, addr))
+                               goto next;
+                       /* fall through */
+               }
                if (pud_none_or_clear_bad(pud))
                        continue;
                next = zap_pmd_range(tlb, vma, pud, addr, next, details);
+next:
+               cond_resched();
        } while (pud++, addr = next, addr != end);
 
        return addr;
        return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE);
 }
 
+static int create_huge_pud(struct vm_fault *vmf)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       /* No support for anonymous transparent PUD pages yet */
+       if (vma_is_anonymous(vmf->vma))
+               return VM_FAULT_FALLBACK;
+       if (vmf->vma->vm_ops->huge_fault)
+               return vmf->vma->vm_ops->huge_fault(vmf);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+       return VM_FAULT_FALLBACK;
+}
+
+static int wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       /* No support for anonymous transparent PUD pages yet */
+       if (vma_is_anonymous(vmf->vma))
+               return VM_FAULT_FALLBACK;
+       if (vmf->vma->vm_ops->huge_fault)
+               return vmf->vma->vm_ops->huge_fault(vmf);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+       return VM_FAULT_FALLBACK;
+}
+
 /*
  * These routines also need to handle stuff like marking pages dirty
  * and/or accessed for architectures that don't do it in hardware (most
        };
        struct mm_struct *mm = vma->vm_mm;
        pgd_t *pgd;
-       pud_t *pud;
        int ret;
 
        pgd = pgd_offset(mm, address);
-       pud = pud_alloc(mm, pgd, address);
-       if (!pud)
+
+       vmf.pud = pud_alloc(mm, pgd, address);
+       if (!vmf.pud)
                return VM_FAULT_OOM;
-       vmf.pmd = pmd_alloc(mm, pud, address);
+       if (pud_none(*vmf.pud) && transparent_hugepage_enabled(vma)) {
+               vmf.flags |= FAULT_FLAG_SIZE_PUD;
+               ret = create_huge_pud(&vmf);
+               if (!(ret & VM_FAULT_FALLBACK))
+                       return ret;
+       } else {
+               pud_t orig_pud = *vmf.pud;
+
+               barrier();
+               if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
+                       unsigned int dirty = flags & FAULT_FLAG_WRITE;
+
+                       vmf.flags |= FAULT_FLAG_SIZE_PUD;
+
+                       /* NUMA case for anonymous PUDs would go here */
+
+                       if (dirty && !pud_write(orig_pud)) {
+                               ret = wp_huge_pud(&vmf, orig_pud);
+                               if (!(ret & VM_FAULT_FALLBACK))
+                                       return ret;
+                       } else {
+                               huge_pud_set_accessed(&vmf, orig_pud);
+                               return 0;
+                       }
+               }
+       }
+
+       vmf.pmd = pmd_alloc(mm, vmf.pud, address);
        if (!vmf.pmd)
                return VM_FAULT_OOM;
        if (pmd_none(*vmf.pmd) && transparent_hugepage_enabled(vma)) {
  */
 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
 {
+       spinlock_t *ptl;
        pmd_t *new = pmd_alloc_one(mm, address);
        if (!new)
                return -ENOMEM;
 
        smp_wmb(); /* See comment in __pte_alloc */
 
-       spin_lock(&mm->page_table_lock);
+       ptl = pud_lock(mm, pud);
 #ifndef __ARCH_HAS_4LEVEL_HACK
        if (!pud_present(*pud)) {
                mm_inc_nr_pmds(mm);
        } else /* Another has populated it */
                pmd_free(mm, new);
 #endif /* __ARCH_HAS_4LEVEL_HACK */
-       spin_unlock(&mm->page_table_lock);
+       spin_unlock(ptl);
        return 0;
 }
 #endif /* __PAGETABLE_PMD_FOLDED */
 
 
        pud = pud_offset(pgd, addr);
        do {
+ again:
                next = pud_addr_end(addr, end);
-               if (pud_none_or_clear_bad(pud)) {
+               if (pud_none(*pud) || !walk->vma) {
                        if (walk->pte_hole)
                                err = walk->pte_hole(addr, next, walk);
                        if (err)
                                break;
                        continue;
                }
+
+               if (walk->pud_entry) {
+                       spinlock_t *ptl = pud_trans_huge_lock(pud, walk->vma);
+
+                       if (ptl) {
+                               err = walk->pud_entry(pud, addr, next, walk);
+                               spin_unlock(ptl);
+                               if (err)
+                                       break;
+                               continue;
+                       }
+               }
+
+               split_huge_pud(walk->vma, pud, addr);
+               if (pud_none(*pud))
+                       goto again;
+
                if (walk->pmd_entry || walk->pte_entry)
                        err = walk_pmd_range(pud, addr, next, walk);
                if (err)
 
        flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
        return pmd;
 }
+
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
+                           pud_t *pudp)
+{
+       pud_t pud;
+
+       VM_BUG_ON(address & ~HPAGE_PUD_MASK);
+       VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp));
+       pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
+       flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
+       return pud;
+}
+#endif
 #endif
 
 #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT