KVM: selftests: Verify VMX MSRs can be restored to KVM-supported values
authorSean Christopherson <seanjc@google.com>
Tue, 7 Jun 2022 21:36:04 +0000 (21:36 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 28 Jul 2022 17:25:24 +0000 (13:25 -0400)
Verify that KVM allows toggling VMX MSR bits to be "more" restrictive,
and also allows restoring each MSR to KVM's original, less restrictive
value.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20220607213604.3346000-16-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
tools/testing/selftests/kvm/.gitignore
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/include/x86_64/processor.h
tools/testing/selftests/kvm/x86_64/vmx_msrs_test.c [new file with mode: 0644]

index 91429330faea027154f4fb9ad4d113eef80e0695..d625a3f837806a3f03d76671f03d87d0c8e5c76c 100644 (file)
@@ -50,6 +50,7 @@
 /x86_64/vmx_dirty_log_test
 /x86_64/vmx_exception_with_invalid_guest_state
 /x86_64/vmx_invalid_nested_guest_state
+/x86_64/vmx_msrs_test
 /x86_64/vmx_preemption_timer_test
 /x86_64/vmx_set_nested_state_test
 /x86_64/vmx_tsc_adjust_test
index 6b22fb1ce2b90d374900c7fcfc458b9f96199baf..690b499c3471d04a9ab0b3b04f7e94a0d3a89e28 100644 (file)
@@ -107,6 +107,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/vmx_apic_access_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_exception_with_invalid_guest_state
+TEST_GEN_PROGS_x86_64 += x86_64/vmx_msrs_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_invalid_nested_guest_state
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
index 4060fe954d5385b4acef2414c296fa0cac31c935..45edf45821d05391e2bf597bc77b0813fee36fd7 100644 (file)
@@ -100,6 +100,7 @@ struct kvm_x86_cpu_feature {
 #define        X86_FEATURE_SMEP                KVM_X86_CPU_FEATURE(0x7, 0, EBX, 7)
 #define        X86_FEATURE_INVPCID             KVM_X86_CPU_FEATURE(0x7, 0, EBX, 10)
 #define        X86_FEATURE_RTM                 KVM_X86_CPU_FEATURE(0x7, 0, EBX, 11)
+#define        X86_FEATURE_MPX                 KVM_X86_CPU_FEATURE(0x7, 0, EBX, 14)
 #define        X86_FEATURE_SMAP                KVM_X86_CPU_FEATURE(0x7, 0, EBX, 20)
 #define        X86_FEATURE_PCOMMIT             KVM_X86_CPU_FEATURE(0x7, 0, EBX, 22)
 #define        X86_FEATURE_CLFLUSHOPT          KVM_X86_CPU_FEATURE(0x7, 0, EBX, 23)
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_msrs_test.c b/tools/testing/selftests/kvm/x86_64/vmx_msrs_test.c
new file mode 100644 (file)
index 0000000..322d561
--- /dev/null
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * VMX control MSR test
+ *
+ * Copyright (C) 2022 Google LLC.
+ *
+ * Tests for KVM ownership of bits in the VMX entry/exit control MSRs. Checks
+ * that KVM will set owned bits where appropriate, and will not if
+ * KVM_X86_QUIRK_TWEAK_VMX_CTRL_MSRS is disabled.
+ */
+#include <linux/bitmap.h>
+#include "kvm_util.h"
+#include "vmx.h"
+
+static void vmx_fixed1_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index,
+                                 uint64_t mask)
+{
+       uint64_t val = vcpu_get_msr(vcpu, msr_index);
+       uint64_t bit;
+
+       mask &= val;
+
+       for_each_set_bit(bit, &mask, 64) {
+               vcpu_set_msr(vcpu, msr_index, val & ~BIT_ULL(bit));
+               vcpu_set_msr(vcpu, msr_index, val);
+       }
+}
+
+static void vmx_fixed0_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index,
+                               uint64_t mask)
+{
+       uint64_t val = vcpu_get_msr(vcpu, msr_index);
+       uint64_t bit;
+
+       mask = ~mask | val;
+
+       for_each_clear_bit(bit, &mask, 64) {
+               vcpu_set_msr(vcpu, msr_index, val | BIT_ULL(bit));
+               vcpu_set_msr(vcpu, msr_index, val);
+       }
+}
+
+static void vmx_fixed0and1_msr_test(struct kvm_vcpu *vcpu, uint32_t msr_index)
+{
+       vmx_fixed0_msr_test(vcpu, msr_index, GENMASK_ULL(31, 0));
+       vmx_fixed1_msr_test(vcpu, msr_index, GENMASK_ULL(63, 32));
+}
+
+static void vmx_save_restore_msrs_test(struct kvm_vcpu *vcpu)
+{
+       vcpu_set_msr(vcpu, MSR_IA32_VMX_VMCS_ENUM, 0);
+       vcpu_set_msr(vcpu, MSR_IA32_VMX_VMCS_ENUM, -1ull);
+
+       vmx_fixed1_msr_test(vcpu, MSR_IA32_VMX_BASIC,
+                           BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55));
+
+       vmx_fixed1_msr_test(vcpu, MSR_IA32_VMX_MISC,
+                           BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) |
+                           BIT_ULL(15) | BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30));
+
+       vmx_fixed0and1_msr_test(vcpu, MSR_IA32_VMX_PROCBASED_CTLS2);
+       vmx_fixed1_msr_test(vcpu, MSR_IA32_VMX_EPT_VPID_CAP, -1ull);
+       vmx_fixed0and1_msr_test(vcpu, MSR_IA32_VMX_TRUE_PINBASED_CTLS);
+       vmx_fixed0and1_msr_test(vcpu, MSR_IA32_VMX_TRUE_PROCBASED_CTLS);
+       vmx_fixed0and1_msr_test(vcpu, MSR_IA32_VMX_TRUE_EXIT_CTLS);
+       vmx_fixed0and1_msr_test(vcpu, MSR_IA32_VMX_TRUE_ENTRY_CTLS);
+       vmx_fixed1_msr_test(vcpu, MSR_IA32_VMX_VMFUNC, -1ull);
+}
+
+int main(void)
+{
+       struct kvm_vcpu *vcpu;
+       struct kvm_vm *vm;
+
+       TEST_REQUIRE(kvm_has_cap(KVM_CAP_DISABLE_QUIRKS2));
+       TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
+
+       /* No need to actually do KVM_RUN, thus no guest code. */
+       vm = vm_create_with_one_vcpu(&vcpu, NULL);
+
+       vmx_save_restore_msrs_test(vcpu);
+
+       kvm_vm_free(vm);
+}