{
        if (level == PT_PAGE_TABLE_LEVEL)
                return 1;
-       if (level == PT_DIRECTORY_LEVEL && is_large_pte(pte))
+       if (is_large_pte(pte))
                return 1;
        return 0;
 }
 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
                          int (*handler)(struct kvm *kvm, unsigned long *rmapp))
 {
-       int i;
+       int i, j;
        int retval = 0;
 
        /*
                end = start + (memslot->npages << PAGE_SHIFT);
                if (hva >= start && hva < end) {
                        gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
-                       int idx = gfn_offset /
-                                 KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL);
+
                        retval |= handler(kvm, &memslot->rmap[gfn_offset]);
-                       retval |= handler(kvm,
-                                       &memslot->lpage_info[0][idx].rmap_pde);
+
+                       for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
+                               int idx = gfn_offset;
+                               idx /= KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL + j);
+                               retval |= handler(kvm,
+                                       &memslot->lpage_info[j][idx].rmap_pde);
+                       }
                }
        }
 
 
 #define RMAP_RECYCLE_THRESHOLD 1000
 
-static void rmap_recycle(struct kvm_vcpu *vcpu, gfn_t gfn, int lpage)
+static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
 {
        unsigned long *rmapp;
+       struct kvm_mmu_page *sp;
+
+       sp = page_header(__pa(spte));
 
        gfn = unalias_gfn(vcpu->kvm, gfn);
-       rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
+       rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
 
        kvm_unmap_rmapp(vcpu->kvm, rmapp);
        kvm_flush_remote_tlbs(vcpu->kvm);
 
 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                    unsigned pte_access, int user_fault,
-                   int write_fault, int dirty, int largepage,
+                   int write_fault, int dirty, int level,
                    gfn_t gfn, pfn_t pfn, bool speculative,
                    bool can_unsync)
 {
                spte |= shadow_nx_mask;
        if (pte_access & ACC_USER_MASK)
                spte |= shadow_user_mask;
-       if (largepage)
+       if (level > PT_PAGE_TABLE_LEVEL)
                spte |= PT_PAGE_SIZE_MASK;
        if (tdp_enabled)
                spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
        if ((pte_access & ACC_WRITE_MASK)
            || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
 
-               if (largepage && has_wrprotected_page(vcpu->kvm, gfn, 1)) {
+               if (level > PT_PAGE_TABLE_LEVEL &&
+                   has_wrprotected_page(vcpu->kvm, gfn, level)) {
                        ret = 1;
                        spte = shadow_trap_nonpresent_pte;
                        goto set_pte;
 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                         unsigned pt_access, unsigned pte_access,
                         int user_fault, int write_fault, int dirty,
-                        int *ptwrite, int largepage, gfn_t gfn,
+                        int *ptwrite, int level, gfn_t gfn,
                         pfn_t pfn, bool speculative)
 {
        int was_rmapped = 0;
                 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
                 * the parent of the now unreachable PTE.
                 */
-               if (largepage && !is_large_pte(*sptep)) {
+               if (level > PT_PAGE_TABLE_LEVEL &&
+                   !is_large_pte(*sptep)) {
                        struct kvm_mmu_page *child;
                        u64 pte = *sptep;
 
                } else
                        was_rmapped = 1;
        }
+
        if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
-                     dirty, largepage, gfn, pfn, speculative, true)) {
+                     dirty, level, gfn, pfn, speculative, true)) {
                if (write_fault)
                        *ptwrite = 1;
                kvm_x86_ops->tlb_flush(vcpu);
                if (!is_rmap_spte(*sptep))
                        kvm_release_pfn_clean(pfn);
                if (rmap_count > RMAP_RECYCLE_THRESHOLD)
-                       rmap_recycle(vcpu, gfn, largepage);
+                       rmap_recycle(vcpu, sptep, gfn);
        } else {
                if (was_writeble)
                        kvm_release_pfn_dirty(pfn);
 }
 
 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
-                       int largepage, gfn_t gfn, pfn_t pfn)
+                       int level, gfn_t gfn, pfn_t pfn)
 {
        struct kvm_shadow_walk_iterator iterator;
        struct kvm_mmu_page *sp;
        gfn_t pseudo_gfn;
 
        for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
-               if (iterator.level == PT_PAGE_TABLE_LEVEL
-                   || (largepage && iterator.level == PT_DIRECTORY_LEVEL)) {
+               if (iterator.level == level) {
                        mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
                                     0, write, 1, &pt_write,
-                                    largepage, gfn, pfn, false);
+                                    level, gfn, pfn, false);
                        ++vcpu->stat.pf_fixed;
                        break;
                }
 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
 {
        int r;
-       int largepage = 0;
+       int level;
        pfn_t pfn;
        unsigned long mmu_seq;
 
-       if (mapping_level(vcpu, gfn) == PT_DIRECTORY_LEVEL) {
-               gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1);
-               largepage = 1;
-       }
+       level = mapping_level(vcpu, gfn);
+
+       /*
+        * This path builds a PAE pagetable - so we can map 2mb pages at
+        * maximum. Therefore check if the level is larger than that.
+        */
+       if (level > PT_DIRECTORY_LEVEL)
+               level = PT_DIRECTORY_LEVEL;
+
+       gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
 
        mmu_seq = vcpu->kvm->mmu_notifier_seq;
        smp_rmb();
        if (mmu_notifier_retry(vcpu, mmu_seq))
                goto out_unlock;
        kvm_mmu_free_some_pages(vcpu);
-       r = __direct_map(vcpu, v, write, largepage, gfn, pfn);
+       r = __direct_map(vcpu, v, write, level, gfn, pfn);
        spin_unlock(&vcpu->kvm->mmu_lock);
 
 
 {
        pfn_t pfn;
        int r;
-       int largepage = 0;
+       int level;
        gfn_t gfn = gpa >> PAGE_SHIFT;
        unsigned long mmu_seq;
 
        if (r)
                return r;
 
-       if (mapping_level(vcpu, gfn) == PT_DIRECTORY_LEVEL) {
-               gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1);
-               largepage = 1;
-       }
+       level = mapping_level(vcpu, gfn);
+
+       gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
+
        mmu_seq = vcpu->kvm->mmu_notifier_seq;
        smp_rmb();
        pfn = gfn_to_pfn(vcpu->kvm, gfn);
                goto out_unlock;
        kvm_mmu_free_some_pages(vcpu);
        r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
-                        largepage, gfn, pfn);
+                        level, gfn, pfn);
        spin_unlock(&vcpu->kvm->mmu_lock);
 
        return r;
                                  const void *new)
 {
        if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
-               if (!vcpu->arch.update_pte.largepage ||
+               if (vcpu->arch.update_pte.level == PT_PAGE_TABLE_LEVEL ||
                    sp->role.glevels == PT32_ROOT_LEVEL) {
                        ++vcpu->kvm->stat.mmu_pde_zapped;
                        return;
        u64 gpte = 0;
        pfn_t pfn;
 
-       vcpu->arch.update_pte.largepage = 0;
+       vcpu->arch.update_pte.level = PT_PAGE_TABLE_LEVEL;
 
        if (bytes != 4 && bytes != 8)
                return;
        if (is_large_pte(gpte) &&
            (mapping_level(vcpu, gfn) == PT_DIRECTORY_LEVEL)) {
                gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1);
-               vcpu->arch.update_pte.largepage = 1;
+               vcpu->arch.update_pte.level = PT_DIRECTORY_LEVEL;
        }
        vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
        smp_rmb();