KVM: x86: nSVM: restore the L1 host state prior to resuming nested guest on SMM exit
authorMaxim Levitsky <mlevitsk@redhat.com>
Mon, 13 Sep 2021 14:09:49 +0000 (17:09 +0300)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 22 Sep 2021 14:33:16 +0000 (10:33 -0400)
Otherwise guest entry code might see incorrect L1 state (e.g paging state).

Fixes: 37be407b2ce8 ("KVM: nSVM: Fix L1 state corruption upon return from SMM")
Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20210913140954.165665-3-mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm/svm.c

index 05e8d4d279699ee003a028eaee2a5047a38f7587..35cac2046f69af4ecd76454f015b523dae901500 100644 (file)
@@ -4351,11 +4351,6 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
                        if (svm_allocate_nested(svm))
                                return 1;
 
-                       vmcb12 = map.hva;
-
-                       nested_load_control_from_vmcb12(svm, &vmcb12->control);
-
-                       ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12);
                        kvm_vcpu_unmap(vcpu, &map, true);
 
                        /*
@@ -4369,6 +4364,13 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
                        svm_copy_vmrun_state(&svm->vmcb01.ptr->save,
                                             map_save.hva + 0x400);
 
+                       /*
+                        * Enter the nested guest now
+                        */
+                       vmcb12 = map.hva;
+                       nested_load_control_from_vmcb12(svm, &vmcb12->control);
+                       ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12);
+
                        kvm_vcpu_unmap(vcpu, &map_save, true);
                }
        }