KVM: vmx, svm: clean up mass updates to regs_avail/regs_dirty bits
authorPaolo Bonzini <pbonzini@redhat.com>
Fri, 26 Nov 2021 12:00:15 +0000 (07:00 -0500)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 8 Dec 2021 09:25:03 +0000 (04:25 -0500)
Document the meaning of the three combinations of regs_avail and
regs_dirty.  Update regs_dirty just after writeback instead of
doing it later after vmexit.  After vmexit, instead, we clear the
regs_avail bits corresponding to lazily-loaded registers.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/kvm_cache_regs.h
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h

index 6e6d0d01f18d8830f1564c7b735e2edf60e8b64d..ac3d3bd662f41b5d1e3159aa80e8a2a93744b765 100644 (file)
@@ -43,6 +43,13 @@ BUILD_KVM_GPR_ACCESSORS(r14, R14)
 BUILD_KVM_GPR_ACCESSORS(r15, R15)
 #endif
 
+/*
+ * avail  dirty
+ * 0     0       register in VMCS/VMCB
+ * 0     1       *INVALID*
+ * 1     0       register in vcpu->arch
+ * 1     1       register in vcpu->arch, needs to be stored back
+ */
 static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
                                             enum kvm_reg reg)
 {
index 7f493ffc1f8d3522a3e02270ad81c8d8456ecd38..de872098071dcb8af41b13065e6e792044ec3451 100644 (file)
@@ -3946,6 +3946,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
                vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
                vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
        }
+       vcpu->arch.regs_dirty = 0;
 
        if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
                kvm_before_interrupt(vcpu);
@@ -3980,7 +3981,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
                vcpu->arch.apf.host_apf_flags =
                        kvm_read_and_reset_apf_flags();
 
-       kvm_register_clear_available(vcpu, VCPU_EXREG_PDPTR);
+       vcpu->arch.regs_avail &= ~SVM_REGS_LAZY_LOAD_SET;
 
        /*
         * We need to handle MC intercepts here before the vcpu has a chance to
index a57390473013da68616ee234c553005396d34e8c..9f153c59f2c8d5ba1442ee8c9e855589e8a39a65 100644 (file)
@@ -326,6 +326,16 @@ static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
        return container_of(vcpu, struct vcpu_svm, vcpu);
 }
 
+/*
+ * Only the PDPTRs are loaded on demand into the shadow MMU.  All other
+ * fields are synchronized in handle_exit, because accessing the VMCB is cheap.
+ *
+ * CR3 might be out of date in the VMCB but it is not marked dirty; instead,
+ * KVM_REQ_LOAD_MMU_PGD is always requested when the cached vcpu->arch.cr3
+ * is changed.  svm_load_mmu_pgd() then syncs the new CR3 value into the VMCB.
+ */
+#define SVM_REGS_LAZY_LOAD_SET (1 << VCPU_EXREG_PDPTR)
+
 static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
 {
        WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
index b240776151c6c9e0b528bad4f081568ebd850748..dc5041ad860fff89a15a7aaa252e36e7c3b96acc 100644 (file)
@@ -269,7 +269,13 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
        vmx_sync_vmcs_host_state(vmx, prev);
        put_cpu();
 
-       vmx_register_cache_reset(vcpu);
+       vcpu->arch.regs_avail = ~VMX_REGS_LAZY_LOAD_SET;
+
+       /*
+        * All lazily updated registers will be reloaded from VMCS12 on both
+        * vmentry and vmexit.
+        */
+       vcpu->arch.regs_dirty = 0;
 }
 
 /*
index ffe45435b77e995227a083c29b677e17df064f42..c65ff62e11f506dd64749a7e5b2a1d56c83ebd45 100644 (file)
@@ -6649,6 +6649,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
                vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
        if (kvm_register_is_dirty(vcpu, VCPU_REGS_RIP))
                vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
+       vcpu->arch.regs_dirty = 0;
 
        cr3 = __get_current_cr3_fast();
        if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
@@ -6743,7 +6744,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
        loadsegment(es, __USER_DS);
 #endif
 
-       vmx_register_cache_reset(vcpu);
+       vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET;
 
        pt_guest_exit(vmx);
 
index 4df2ac24ffc13009db0f43486450594a4d35432b..f978699480e36ec8c9bc85538c7fb9996e76541b 100644 (file)
@@ -473,19 +473,21 @@ BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL)
 BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL)
 BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL)
 
-static inline void vmx_register_cache_reset(struct kvm_vcpu *vcpu)
-{
-       vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
-                                 | (1 << VCPU_EXREG_RFLAGS)
-                                 | (1 << VCPU_EXREG_PDPTR)
-                                 | (1 << VCPU_EXREG_SEGMENTS)
-                                 | (1 << VCPU_EXREG_CR0)
-                                 | (1 << VCPU_EXREG_CR3)
-                                 | (1 << VCPU_EXREG_CR4)
-                                 | (1 << VCPU_EXREG_EXIT_INFO_1)
-                                 | (1 << VCPU_EXREG_EXIT_INFO_2));
-       vcpu->arch.regs_dirty = 0;
-}
+/*
+ * VMX_REGS_LAZY_LOAD_SET - The set of registers that will be updated in the
+ * cache on demand.  Other registers not listed here are synced to
+ * the cache immediately after VM-Exit.
+ */
+#define VMX_REGS_LAZY_LOAD_SET ((1 << VCPU_REGS_RIP) |         \
+                               (1 << VCPU_REGS_RSP) |          \
+                               (1 << VCPU_EXREG_RFLAGS) |      \
+                               (1 << VCPU_EXREG_PDPTR) |       \
+                               (1 << VCPU_EXREG_SEGMENTS) |    \
+                               (1 << VCPU_EXREG_CR0) |         \
+                               (1 << VCPU_EXREG_CR3) |         \
+                               (1 << VCPU_EXREG_CR4) |         \
+                               (1 << VCPU_EXREG_EXIT_INFO_1) | \
+                               (1 << VCPU_EXREG_EXIT_INFO_2))
 
 static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
 {