target/i386: skip KVM_GET/SET_NESTED_STATE if VMX disabled, or for SVM
authorPaolo Bonzini <pbonzini@redhat.com>
Thu, 11 Jul 2019 13:41:48 +0000 (15:41 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 19 Jul 2019 16:02:22 +0000 (18:02 +0200)
Do not allocate env->nested_state unless we later need to migrate the
nested virtualization state.

With this change, nested_state_needed() will return false if the
VMX flag is not included in the virtual machine.  KVM_GET/SET_NESTED_STATE
is also disabled for SVM which is safer (we know that at least the NPT
root and paging mode have to be saved/loaded), and thus the corresponding
subsection can go away as well.

Inspired by a patch from Liran Alon.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
target/i386/kvm.c
target/i386/machine.c

index 4542f0fad0cb373024c4f7f39362aae0952cc1d1..ada89d27cc2c2e50eb7edbb5c71a4b3b9c7b8823 100644 (file)
@@ -1711,15 +1711,15 @@ int kvm_arch_init_vcpu(CPUState *cs)
     max_nested_state_len = kvm_max_nested_state_length();
     if (max_nested_state_len > 0) {
         assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data));
-        env->nested_state = g_malloc0(max_nested_state_len);
 
-        env->nested_state->size = max_nested_state_len;
-
-        if (IS_INTEL_CPU(env)) {
-            struct kvm_vmx_nested_state_hdr *vmx_hdr =
-                &env->nested_state->hdr.vmx;
+        if (cpu_has_vmx(env)) {
+            struct kvm_vmx_nested_state_hdr *vmx_hdr;
 
+            env->nested_state = g_malloc0(max_nested_state_len);
+            env->nested_state->size = max_nested_state_len;
             env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX;
+
+            vmx_hdr = &env->nested_state->hdr.vmx;
             vmx_hdr->vmxon_pa = -1ull;
             vmx_hdr->vmcs12_pa = -1ull;
         }
@@ -3515,7 +3515,7 @@ static int kvm_put_nested_state(X86CPU *cpu)
     CPUX86State *env = &cpu->env;
     int max_nested_state_len = kvm_max_nested_state_length();
 
-    if (max_nested_state_len <= 0) {
+    if (!env->nested_state) {
         return 0;
     }
 
@@ -3529,7 +3529,7 @@ static int kvm_get_nested_state(X86CPU *cpu)
     int max_nested_state_len = kvm_max_nested_state_length();
     int ret;
 
-    if (max_nested_state_len <= 0) {
+    if (!env->nested_state) {
         return 0;
     }
 
index ac2d1d1d3688a3ee5ac90ba24dd6e53cbd5ae738..b1146093b5724d325bd37a0a20bbc5b01420ca5f 100644 (file)
@@ -1035,31 +1035,13 @@ static const VMStateDescription vmstate_vmx_nested_state = {
     }
 };
 
-static bool svm_nested_state_needed(void *opaque)
-{
-    struct kvm_nested_state *nested_state = opaque;
-
-    return (nested_state->format == KVM_STATE_NESTED_FORMAT_SVM);
-}
-
-static const VMStateDescription vmstate_svm_nested_state = {
-    .name = "cpu/kvm_nested_state/svm",
-    .version_id = 1,
-    .minimum_version_id = 1,
-    .needed = svm_nested_state_needed,
-    .fields = (VMStateField[]) {
-        VMSTATE_END_OF_LIST()
-    }
-};
-
 static bool nested_state_needed(void *opaque)
 {
     X86CPU *cpu = opaque;
     CPUX86State *env = &cpu->env;
 
     return (env->nested_state &&
-            (vmx_nested_state_needed(env->nested_state) ||
-             svm_nested_state_needed(env->nested_state)));
+            vmx_nested_state_needed(env->nested_state));
 }
 
 static int nested_state_post_load(void *opaque, int version_id)
@@ -1121,7 +1103,6 @@ static const VMStateDescription vmstate_kvm_nested_state = {
     },
     .subsections = (const VMStateDescription*[]) {
         &vmstate_vmx_nested_state,
-        &vmstate_svm_nested_state,
         NULL
     }
 };