KVM: x86: More precisely identify NMI from guest when handling PMI
authorSean Christopherson <seanjc@google.com>
Thu, 11 Nov 2021 02:07:32 +0000 (02:07 +0000)
committerPeter Zijlstra <peterz@infradead.org>
Wed, 17 Nov 2021 13:49:09 +0000 (14:49 +0100)
Differentiate between IRQ and NMI for KVM's PMC overflow callback, which
was originally invoked in response to an NMI that arrived while the guest
was running, but was inadvertantly changed to fire on IRQs as well when
support for perf without PMU/NMI was added to KVM.  In practice, this
should be a nop as the PMC overflow callback shouldn't be reached, but
it's a cheap and easy fix that also better documents the situation.

Note, this also doesn't completely prevent false positives if perf
somehow ends up calling into KVM, e.g. an NMI can arrive in host after
KVM sets its flag.

Fixes: dd60d217062f ("KVM: x86: Fix perf timer mode IP reporting")
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Link: https://lore.kernel.org/r/20211111020738.2512932-12-seanjc@google.com
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h

index 5630c241d5f6e0bdf1899163cfdfef57c18b5d47..b2f0c6c40802b3ba6fbf15842d257e0b36acdaab 100644 (file)
@@ -3931,7 +3931,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
        }
 
        if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
-               kvm_before_interrupt(vcpu);
+               kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
 
        kvm_load_host_xsave_state(vcpu);
        stgi();
index 7d90c8d443ac99107f02de20facce9e0ee11c355..a0c24976e6bbd1c8e4f6187abce9391b22be9ad7 100644 (file)
@@ -6317,7 +6317,9 @@ void vmx_do_interrupt_nmi_irqoff(unsigned long entry);
 static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu,
                                        unsigned long entry)
 {
-       kvm_before_interrupt(vcpu);
+       bool is_nmi = entry == (unsigned long)asm_exc_nmi_noist;
+
+       kvm_before_interrupt(vcpu, is_nmi ? KVM_HANDLING_NMI : KVM_HANDLING_IRQ);
        vmx_do_interrupt_nmi_irqoff(entry);
        kvm_after_interrupt(vcpu);
 }
index bb71e10fdb6aa0a317e7bae51e452171c4259661..ab032ef7879f5cd202a0384dc32415dce0e4cd31 100644 (file)
@@ -9896,7 +9896,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
         * interrupts on processors that implement an interrupt shadow, the
         * stat.exits increment will do nicely.
         */
-       kvm_before_interrupt(vcpu);
+       kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ);
        local_irq_enable();
        ++vcpu->stat.exits;
        local_irq_disable();
index d070043fd2e89566ef34ac70673234b3092da1cf..f8d2c58feadc72a69496f72a6155edc5751e0a64 100644 (file)
@@ -385,9 +385,16 @@ static inline bool kvm_cstate_in_guest(struct kvm *kvm)
        return kvm->arch.cstate_in_guest;
 }
 
-static inline void kvm_before_interrupt(struct kvm_vcpu *vcpu)
+enum kvm_intr_type {
+       /* Values are arbitrary, but must be non-zero. */
+       KVM_HANDLING_IRQ = 1,
+       KVM_HANDLING_NMI,
+};
+
+static inline void kvm_before_interrupt(struct kvm_vcpu *vcpu,
+                                       enum kvm_intr_type intr)
 {
-       WRITE_ONCE(vcpu->arch.handling_intr_from_guest, 1);
+       WRITE_ONCE(vcpu->arch.handling_intr_from_guest, (u8)intr);
 }
 
 static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
@@ -397,7 +404,7 @@ static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
 
 static inline bool kvm_handling_nmi_from_guest(struct kvm_vcpu *vcpu)
 {
-       return !!vcpu->arch.handling_intr_from_guest;
+       return vcpu->arch.handling_intr_from_guest == KVM_HANDLING_NMI;
 }
 
 static inline bool kvm_pat_valid(u64 data)