Currently, the 'nested_run' statistic counts all guest-entry attempts,
including those that fail during vmentry checks on Intel and during
consistency checks on AMD. Convert this statistic to count only those
guest-entries that make it past these state checks and make it to guest
code. This will tell us the number of guest-entries that actually executed
or tried to execute guest code.
Signed-off-by: Krish Sadhukhan <Krish.Sadhukhan@oracle.com>
Message-Id: <
20210609180340.104248-2-krish.sadhukhan@oracle.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
        struct kvm_host_map map;
        u64 vmcb12_gpa;
 
-       ++vcpu->stat.nested_run;
-
        if (is_smm(vcpu)) {
                kvm_queue_exception(vcpu, UD_VECTOR);
                return 1;
 
        svm->next_rip = 0;
        if (is_guest_mode(vcpu)) {
                nested_sync_control_from_vmcb02(svm);
+
+               /* Track VMRUNs that have made past consistency checking */
+               if (svm->nested.nested_run_pending &&
+                   svm->vmcb->control.exit_code != SVM_EXIT_ERR)
+                        ++vcpu->stat.nested_run;
+
                svm->nested.nested_run_pending = 0;
        }
 
 
        u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
        enum nested_evmptrld_status evmptrld_status;
 
-       ++vcpu->stat.nested_run;
-
        if (!nested_vmx_check_permission(vcpu))
                return 1;
 
 
 
        kvm_load_host_xsave_state(vcpu);
 
-       vmx->nested.nested_run_pending = 0;
+       if (is_guest_mode(vcpu)) {
+               /*
+                * Track VMLAUNCH/VMRESUME that have made past guest state
+                * checking.
+                */
+               if (vmx->nested.nested_run_pending &&
+                   !vmx->exit_reason.failed_vmentry)
+                       ++vcpu->stat.nested_run;
+
+               vmx->nested.nested_run_pending = 0;
+       }
+
        vmx->idt_vectoring_info = 0;
 
        if (unlikely(vmx->fail)) {