int direct,
                                             unsigned int access)
 {
-       bool direct_mmu = vcpu->arch.mmu->direct_map;
+       bool direct_mmu = vcpu->arch.mmu->root_role.direct;
        union kvm_mmu_page_role role;
        struct hlist_head *sp_list;
        unsigned quadrant;
 
        if (iterator->level >= PT64_ROOT_4LEVEL &&
            vcpu->arch.mmu->cpu_role.base.level < PT64_ROOT_4LEVEL &&
-           !vcpu->arch.mmu->direct_map)
+           !vcpu->arch.mmu->root_role.direct)
                iterator->level = PT32E_ROOT_LEVEL;
 
        if (iterator->level == PT32E_ROOT_LEVEL) {
        gpa_t gpa;
        int r;
 
-       if (vcpu->arch.mmu->direct_map)
+       if (vcpu->arch.mmu->root_role.direct)
                return 0;
 
        gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
         * equivalent level in the guest's NPT to shadow.  Allocate the tables
         * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare.
         */
-       if (mmu->direct_map || mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL ||
+       if (mmu->root_role.direct ||
+           mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL ||
            mmu->root_role.level < PT64_ROOT_4LEVEL)
                return 0;
 
        int i;
        struct kvm_mmu_page *sp;
 
-       if (vcpu->arch.mmu->direct_map)
+       if (vcpu->arch.mmu->root_role.direct)
                return;
 
        if (!VALID_PAGE(vcpu->arch.mmu->root.hpa))
 
        arch.token = alloc_apf_token(vcpu);
        arch.gfn = gfn;
-       arch.direct_map = vcpu->arch.mmu->direct_map;
+       arch.direct_map = vcpu->arch.mmu->root_role.direct;
        arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
 
        return kvm_setup_async_pf(vcpu, cr2_or_gpa,
        context->gva_to_gpa = nonpaging_gva_to_gpa;
        context->sync_page = nonpaging_sync_page;
        context->invlpg = NULL;
-       context->direct_map = true;
 }
 
 static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
        context->gva_to_gpa = paging64_gva_to_gpa;
        context->sync_page = paging64_sync_page;
        context->invlpg = paging64_invlpg;
-       context->direct_map = false;
 }
 
 static void paging32_init_context(struct kvm_mmu *context)
        context->gva_to_gpa = paging32_gva_to_gpa;
        context->sync_page = paging32_sync_page;
        context->invlpg = paging32_invlpg;
-       context->direct_map = false;
 }
 
 static union kvm_cpu_role
        context->page_fault = kvm_tdp_page_fault;
        context->sync_page = nonpaging_sync_page;
        context->invlpg = NULL;
-       context->direct_map = true;
        context->get_guest_pgd = get_cr3;
        context->get_pdptr = kvm_pdptr_read;
        context->inject_page_fault = kvm_inject_page_fault;
                context->gva_to_gpa = ept_gva_to_gpa;
                context->sync_page = ept_sync_page;
                context->invlpg = ept_invlpg;
-               context->direct_map = false;
+
                update_permission_bitmask(context, true);
                context->pkru_mask = 0;
                reset_rsvds_bits_mask_ept(vcpu, context, execonly, huge_page_level);
 {
        int r;
 
-       r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map);
+       r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->root_role.direct);
        if (r)
                goto out;
        r = mmu_alloc_special_roots(vcpu);
        if (r)
                goto out;
-       if (vcpu->arch.mmu->direct_map)
+       if (vcpu->arch.mmu->root_role.direct)
                r = mmu_alloc_direct_roots(vcpu);
        else
                r = mmu_alloc_shadow_roots(vcpu);
                       void *insn, int insn_len)
 {
        int r, emulation_type = EMULTYPE_PF;
-       bool direct = vcpu->arch.mmu->direct_map;
+       bool direct = vcpu->arch.mmu->root_role.direct;
 
        if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
                return RET_PF_RETRY;
         * paging in both guests. If true, we simply unprotect the page
         * and resume the guest.
         */
-       if (vcpu->arch.mmu->direct_map &&
+       if (vcpu->arch.mmu->root_role.direct &&
            (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
                kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa));
                return 1;
 
            WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF)))
                return false;
 
-       if (!vcpu->arch.mmu->direct_map) {
+       if (!vcpu->arch.mmu->root_role.direct) {
                /*
                 * Write permission should be allowed since only
                 * write access need to be emulated.
        kvm_release_pfn_clean(pfn);
 
        /* The instructions are well-emulated on direct mmu. */
-       if (vcpu->arch.mmu->direct_map) {
+       if (vcpu->arch.mmu->root_role.direct) {
                unsigned int indirect_shadow_pages;
 
                write_lock(&vcpu->kvm->mmu_lock);
        vcpu->arch.last_retry_eip = ctxt->eip;
        vcpu->arch.last_retry_addr = cr2_or_gpa;
 
-       if (!vcpu->arch.mmu->direct_map)
+       if (!vcpu->arch.mmu->root_role.direct)
                gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
 
        kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
                ctxt->exception.address = cr2_or_gpa;
 
                /* With shadow page tables, cr2 contains a GVA or nGPA. */
-               if (vcpu->arch.mmu->direct_map) {
+               if (vcpu->arch.mmu->root_role.direct) {
                        ctxt->gpa_available = true;
                        ctxt->gpa_val = cr2_or_gpa;
                }
 {
        int r;
 
-       if ((vcpu->arch.mmu->direct_map != work->arch.direct_map) ||
+       if ((vcpu->arch.mmu->root_role.direct != work->arch.direct_map) ||
              work->wakeup_all)
                return;
 
        if (unlikely(r))
                return;
 
-       if (!vcpu->arch.mmu->direct_map &&
+       if (!vcpu->arch.mmu->root_role.direct &&
              work->arch.cr3 != vcpu->arch.mmu->get_guest_pgd(vcpu))
                return;