iterator->level = vcpu->arch.mmu->root_role.level;
 
        if (iterator->level >= PT64_ROOT_4LEVEL &&
-           vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
+           vcpu->arch.mmu->cpu_role.base.level < PT64_ROOT_4LEVEL &&
            !vcpu->arch.mmu->direct_map)
                iterator->level = PT32E_ROOT_LEVEL;
 
         * On SVM, reading PDPTRs might access guest memory, which might fault
         * and thus might sleep.  Grab the PDPTRs before acquiring mmu_lock.
         */
-       if (mmu->root_level == PT32E_ROOT_LEVEL) {
+       if (mmu->cpu_role.base.level == PT32E_ROOT_LEVEL) {
                for (i = 0; i < 4; ++i) {
                        pdptrs[i] = mmu->get_pdptr(vcpu, i);
                        if (!(pdptrs[i] & PT_PRESENT_MASK))
         * Do we shadow a long mode page table? If so we need to
         * write-protect the guests page table root.
         */
-       if (mmu->root_level >= PT64_ROOT_4LEVEL) {
+       if (mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) {
                root = mmu_alloc_root(vcpu, root_gfn, 0,
                                      mmu->root_role.level, false);
                mmu->root.hpa = root;
        for (i = 0; i < 4; ++i) {
                WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
 
-               if (mmu->root_level == PT32E_ROOT_LEVEL) {
+               if (mmu->cpu_role.base.level == PT32E_ROOT_LEVEL) {
                        if (!(pdptrs[i] & PT_PRESENT_MASK)) {
                                mmu->pae_root[i] = INVALID_PAE_ROOT;
                                continue;
         * equivalent level in the guest's NPT to shadow.  Allocate the tables
         * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare.
         */
-       if (mmu->direct_map || mmu->root_level >= PT64_ROOT_4LEVEL ||
+       if (mmu->direct_map || mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL ||
            mmu->root_role.level < PT64_ROOT_4LEVEL)
                return 0;
 
 
        vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
 
-       if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
+       if (vcpu->arch.mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) {
                hpa_t root = vcpu->arch.mmu->root.hpa;
                sp = to_shadow_page(root);
 
 {
        __reset_rsvds_bits_mask(&context->guest_rsvd_check,
                                vcpu->arch.reserved_gpa_bits,
-                               context->root_level, is_efer_nx(context),
+                               context->cpu_role.base.level, is_efer_nx(context),
                                guest_can_use_gbpages(vcpu),
                                is_cr4_pse(context),
                                guest_cpuid_is_amd_or_hygon(vcpu));
        context->get_guest_pgd = get_cr3;
        context->get_pdptr = kvm_pdptr_read;
        context->inject_page_fault = kvm_inject_page_fault;
-       context->root_level = cpu_role.base.level;
 
        if (!is_cr0_pg(context))
                context->gva_to_gpa = nonpaging_gva_to_gpa;
                paging64_init_context(context);
        else
                paging32_init_context(context);
-       context->root_level = cpu_role.base.level;
 
        reset_guest_paging_metadata(vcpu, context);
        reset_shadow_zero_bits_mask(vcpu, context);
                context->gva_to_gpa = ept_gva_to_gpa;
                context->sync_page = ept_sync_page;
                context->invlpg = ept_invlpg;
-               context->root_level = level;
                context->direct_map = false;
                update_permission_bitmask(context, true);
                context->pkru_mask = 0;
        g_context->get_guest_pgd     = get_cr3;
        g_context->get_pdptr         = kvm_pdptr_read;
        g_context->inject_page_fault = kvm_inject_page_fault;
-       g_context->root_level        = new_mode.base.level;
 
        /*
         * L2 page tables are never shadowed, so there is no need to sync