KVM: x86: make vendor code check for all nested events
authorPaolo Bonzini <pbonzini@redhat.com>
Wed, 21 Sep 2022 00:31:50 +0000 (00:31 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 26 Sep 2022 16:37:17 +0000 (12:37 -0400)
Interrupts, NMIs etc. sent while in guest mode are already handled
properly by the *_interrupt_allowed callbacks, but other events can
cause a vCPU to be runnable that are specific to guest mode.

In the case of VMX there are two, the preemption timer and the
monitor trap.  The VMX preemption timer is already special cased via
the hv_timer_pending callback, but the purpose of the callback can be
easily extended to MTF or in fact any other event that can occur only
in guest mode.

Rename the callback and add an MTF check; kvm_arch_vcpu_runnable()
now can return true if an MTF is pending, without relying on
kvm_vcpu_running()'s call to kvm_check_nested_events().  Until that call
is removed, however, the patch introduces no functional change.

Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20220921003201.1441511-2-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/x86.c

index b3ce723efb4391e2b1ad0334c0593d092e1794b3..d40206b16d6cc260e3141f73484390cd00bcc4fb 100644 (file)
@@ -1643,7 +1643,7 @@ struct kvm_x86_nested_ops {
        bool (*is_exception_vmexit)(struct kvm_vcpu *vcpu, u8 vector,
                                    u32 error_code);
        int (*check_events)(struct kvm_vcpu *vcpu);
-       bool (*hv_timer_pending)(struct kvm_vcpu *vcpu);
+       bool (*has_events)(struct kvm_vcpu *vcpu);
        void (*triple_fault)(struct kvm_vcpu *vcpu);
        int (*get_state)(struct kvm_vcpu *vcpu,
                         struct kvm_nested_state __user *user_kvm_nested_state,
index 4da0558943ce506110ae87201edf904e59bf65d7..85318d803f4f4300234d249be40fa9e391ae8b4b 100644 (file)
@@ -3929,6 +3929,12 @@ static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu)
               to_vmx(vcpu)->nested.preemption_timer_expired;
 }
 
+static bool vmx_has_nested_events(struct kvm_vcpu *vcpu)
+{
+       return nested_vmx_preemption_timer_pending(vcpu) ||
+              to_vmx(vcpu)->nested.mtf_pending;
+}
+
 /*
  * Per the Intel SDM's table "Priority Among Concurrent Events", with minor
  * edits to fill in missing examples, e.g. #DB due to split-lock accesses,
@@ -6971,7 +6977,7 @@ struct kvm_x86_nested_ops vmx_nested_ops = {
        .leave_nested = vmx_leave_nested,
        .is_exception_vmexit = nested_vmx_is_exception_vmexit,
        .check_events = vmx_check_nested_events,
-       .hv_timer_pending = nested_vmx_preemption_timer_pending,
+       .has_events = vmx_has_nested_events,
        .triple_fault = nested_vmx_triple_fault,
        .get_state = vmx_get_nested_state,
        .set_state = vmx_set_nested_state,
index a532b9dea57b49300ab224e394fec543a67930e3..10f28954378594d6e79a0a0df184d9e2ca9edd90 100644 (file)
@@ -9968,8 +9968,8 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu,
        }
 
        if (is_guest_mode(vcpu) &&
-           kvm_x86_ops.nested_ops->hv_timer_pending &&
-           kvm_x86_ops.nested_ops->hv_timer_pending(vcpu))
+           kvm_x86_ops.nested_ops->has_events &&
+           kvm_x86_ops.nested_ops->has_events(vcpu))
                *req_immediate_exit = true;
 
        WARN_ON(kvm_is_exception_pending(vcpu));
@@ -12794,8 +12794,8 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
                return true;
 
        if (is_guest_mode(vcpu) &&
-           kvm_x86_ops.nested_ops->hv_timer_pending &&
-           kvm_x86_ops.nested_ops->hv_timer_pending(vcpu))
+           kvm_x86_ops.nested_ops->has_events &&
+           kvm_x86_ops.nested_ops->has_events(vcpu))
                return true;
 
        if (kvm_xen_has_pending_events(vcpu))