KVM: x86: Make vmx_get_exit_qual() and vmx_get_intr_info() noinstr-friendly
authorSean Christopherson <seanjc@google.com>
Tue, 13 Dec 2022 06:09:06 +0000 (06:09 +0000)
committerSean Christopherson <seanjc@google.com>
Tue, 24 Jan 2023 18:35:38 +0000 (10:35 -0800)
Add an extra special noinstr-friendly helper to test+mark a "register"
available and use it when caching vmcs.EXIT_QUALIFICATION and
vmcs.VM_EXIT_INTR_INFO.  Make the caching helpers __always_inline too so
that they can be used in noinstr functions.

A future fix will move VMX's handling of NMI exits into the noinstr
vmx_vcpu_enter_exit() so that the NMI is processed before any kind of
instrumentation can trigger a fault and thus IRET, i.e. so that KVM
doesn't invoke the NMI handler with NMIs enabled.

Cc: Peter Zijlstra <peterz@infradead.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20221213060912.654668-2-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/kvm_cache_regs.h
arch/x86/kvm/vmx/vmx.h

index c09174f73a344f79d61967ed2e2e5ca55c4d5e9b..4c91f626c05808305d5f554bbf39bd5ec0c5d1cd 100644 (file)
@@ -75,6 +75,18 @@ static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
        __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
 }
 
+/*
+ * kvm_register_test_and_mark_available() is a special snowflake that uses an
+ * arch bitop directly to avoid the explicit instrumentation that comes with
+ * the generic bitops.  This allows code that cannot be instrumented (noinstr
+ * functions), e.g. the low level VM-Enter/VM-Exit paths, to cache registers.
+ */
+static __always_inline bool kvm_register_test_and_mark_available(struct kvm_vcpu *vcpu,
+                                                                enum kvm_reg reg)
+{
+       return arch___test_and_set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
+}
+
 /*
  * The "raw" register helpers are only for cases where the full 64 bits of a
  * register are read/written irrespective of current vCPU mode.  In other words,
index a3da84f4ea45609d44f76857d0e4cf3162fe210e..bb720a2f11abd9be23e2563cd87c5b39d545c966 100644 (file)
@@ -669,25 +669,23 @@ void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu);
 int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
 void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu);
 
-static inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
+static __always_inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
-       if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_1)) {
-               kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
+       if (!kvm_register_test_and_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1))
                vmx->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
-       }
+
        return vmx->exit_qualification;
 }
 
-static inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
+static __always_inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
-       if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_2)) {
-               kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
+       if (!kvm_register_test_and_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2))
                vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
-       }
+
        return vmx->exit_intr_info;
 }