{
        iterator->addr = addr;
        iterator->shadow_addr = root;
-       iterator->level = vcpu->arch.mmu->shadow_root_level;
+       iterator->level = vcpu->arch.mmu->root_role.level;
 
        if (iterator->level >= PT64_ROOT_4LEVEL &&
            vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
 static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
 {
        struct kvm_mmu *mmu = vcpu->arch.mmu;
-       u8 shadow_root_level = mmu->shadow_root_level;
+       u8 shadow_root_level = mmu->root_role.level;
        hpa_t root;
        unsigned i;
        int r;
         */
        if (mmu->root_level >= PT64_ROOT_4LEVEL) {
                root = mmu_alloc_root(vcpu, root_gfn, 0,
-                                     mmu->shadow_root_level, false);
+                                     mmu->root_role.level, false);
                mmu->root.hpa = root;
                goto set_root_pgd;
        }
         * the shadow page table may be a PAE or a long mode page table.
         */
        pm_mask = PT_PRESENT_MASK | shadow_me_mask;
-       if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL) {
+       if (mmu->root_role.level >= PT64_ROOT_4LEVEL) {
                pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
 
                if (WARN_ON_ONCE(!mmu->pml4_root)) {
                }
                mmu->pml4_root[0] = __pa(mmu->pae_root) | pm_mask;
 
-               if (mmu->shadow_root_level == PT64_ROOT_5LEVEL) {
+               if (mmu->root_role.level == PT64_ROOT_5LEVEL) {
                        if (WARN_ON_ONCE(!mmu->pml5_root)) {
                                r = -EIO;
                                goto out_unlock;
                mmu->pae_root[i] = root | pm_mask;
        }
 
-       if (mmu->shadow_root_level == PT64_ROOT_5LEVEL)
+       if (mmu->root_role.level == PT64_ROOT_5LEVEL)
                mmu->root.hpa = __pa(mmu->pml5_root);
-       else if (mmu->shadow_root_level == PT64_ROOT_4LEVEL)
+       else if (mmu->root_role.level == PT64_ROOT_4LEVEL)
                mmu->root.hpa = __pa(mmu->pml4_root);
        else
                mmu->root.hpa = __pa(mmu->pae_root);
 static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
 {
        struct kvm_mmu *mmu = vcpu->arch.mmu;
-       bool need_pml5 = mmu->shadow_root_level > PT64_ROOT_4LEVEL;
+       bool need_pml5 = mmu->root_role.level > PT64_ROOT_4LEVEL;
        u64 *pml5_root = NULL;
        u64 *pml4_root = NULL;
        u64 *pae_root;
         * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare.
         */
        if (mmu->direct_map || mmu->root_level >= PT64_ROOT_4LEVEL ||
-           mmu->shadow_root_level < PT64_ROOT_4LEVEL)
+           mmu->root_role.level < PT64_ROOT_4LEVEL)
                return 0;
 
        /*
        struct rsvd_bits_validate *shadow_zero_check;
        int i;
 
-       WARN_ON_ONCE(context->shadow_root_level < PT32E_ROOT_LEVEL);
+       WARN_ON_ONCE(context->root_role.level < PT32E_ROOT_LEVEL);
 
        shadow_zero_check = &context->shadow_zero_check;
        __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
-                               context->shadow_root_level,
+                               context->root_role.level,
                                context->root_role.efer_nx,
                                guest_can_use_gbpages(vcpu), is_pse, is_amd);
 
        if (!shadow_me_mask)
                return;
 
-       for (i = context->shadow_root_level; --i >= 0;) {
+       for (i = context->root_role.level; --i >= 0;) {
                shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
                shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
        }
 
        if (boot_cpu_is_amd())
                __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
-                                       context->shadow_root_level, false,
+                                       context->root_role.level, false,
                                        boot_cpu_has(X86_FEATURE_GBPAGES),
                                        false, true);
        else
        if (!shadow_me_mask)
                return;
 
-       for (i = context->shadow_root_level; --i >= 0;) {
+       for (i = context->root_role.level; --i >= 0;) {
                shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
                shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
        }
        context->page_fault = kvm_tdp_page_fault;
        context->sync_page = nonpaging_sync_page;
        context->invlpg = NULL;
-       context->shadow_root_level = kvm_mmu_get_tdp_level(vcpu);
        context->direct_map = true;
        context->get_guest_pgd = get_cr3;
        context->get_pdptr = kvm_pdptr_read;
        context->root_level = cpu_role.base.level;
 
        reset_guest_paging_metadata(vcpu, context);
-       context->shadow_root_level = root_role.level;
-
        reset_shadow_zero_bits_mask(vcpu, context);
 }
 
                context->cpu_role.as_u64 = new_mode.as_u64;
                context->root_role.word = new_mode.base.word;
 
-               context->shadow_root_level = level;
-
                context->page_fault = ept_page_fault;
                context->gva_to_gpa = ept_gva_to_gpa;
                context->sync_page = ept_sync_page;