KVM: allow compiling out SMM support
authorPaolo Bonzini <pbonzini@redhat.com>
Thu, 29 Sep 2022 17:20:13 +0000 (13:20 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 9 Nov 2022 17:31:19 +0000 (12:31 -0500)
Some users of KVM implement the UEFI variable store through a paravirtual device
that does not require the "SMM lockbox" component of edk2; allow them to
compile out system management mode, which is not a full implementation
especially in how it interacts with nested virtualization.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20220929172016.319443-6-pbonzini@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/Kconfig
arch/x86/kvm/Makefile
arch/x86/kvm/smm.h
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
tools/testing/selftests/kvm/x86_64/smm_test.c

index 67be7f217e37bdad646c288ee3a3b18ceba5939b..fbeaa9ddef5985f193afccc58a0dc36c1eeb0103 100644 (file)
@@ -118,6 +118,17 @@ config KVM_AMD_SEV
          Provides support for launching Encrypted VMs (SEV) and Encrypted VMs
          with Encrypted State (SEV-ES) on AMD processors.
 
+config KVM_SMM
+       bool "System Management Mode emulation"
+       default y
+       depends on KVM
+       help
+         Provides support for KVM to emulate System Management Mode (SMM)
+         in virtual machines.  This can be used by the virtual machine
+         firmware to implement UEFI secure boot.
+
+         If unsure, say Y.
+
 config KVM_XEN
        bool "Support for Xen hypercall interface"
        depends on KVM
index b584cb0e06bd8bc70e54136eabd006814369f84c..b8a494b6a5ec9b77254b62ed435f8cd563921c6b 100644 (file)
@@ -20,7 +20,7 @@ endif
 
 kvm-$(CONFIG_X86_64) += mmu/tdp_iter.o mmu/tdp_mmu.o
 kvm-$(CONFIG_KVM_XEN)  += xen.o
-kvm-y                  += smm.o
+kvm-$(CONFIG_KVM_SMM)  += smm.o
 
 kvm-intel-y            += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \
                           vmx/evmcs.o vmx/nested.o vmx/posted_intr.o
index b0602a92e511e1c6c3ac39394d0c9c0e9cc5a511..0e1bd8bd6dc4709b32926afc6e6c918d2e5f14a7 100644 (file)
@@ -8,6 +8,7 @@
 #define PUT_SMSTATE(type, buf, offset, val)                      \
        *(type *)((buf) + (offset) - 0x7e00) = val
 
+#ifdef CONFIG_KVM_SMM
 static inline int kvm_inject_smi(struct kvm_vcpu *vcpu)
 {
        kvm_make_request(KVM_REQ_SMI, vcpu);
@@ -23,5 +24,16 @@ void kvm_smm_changed(struct kvm_vcpu *vcpu, bool in_smm);
 void enter_smm(struct kvm_vcpu *vcpu);
 int emulator_leave_smm(struct x86_emulate_ctxt *ctxt);
 void process_smi(struct kvm_vcpu *vcpu);
+#else
+static inline int kvm_inject_smi(struct kvm_vcpu *vcpu) { return -ENOTTY; }
+static inline bool is_smm(struct kvm_vcpu *vcpu) { return false; }
+static inline void enter_smm(struct kvm_vcpu *vcpu) { WARN_ON_ONCE(1); }
+static inline void process_smi(struct kvm_vcpu *vcpu) { WARN_ON_ONCE(1); }
+
+/*
+ * emulator_leave_smm is used as a function pointer, so the
+ * stub is defined in x86.c.
+ */
+#endif
 
 #endif
index 3bb07ec789859faa29493dc668ecf8f221ad781d..4cc014b464061cdb70842f4ef67dd7ef1d4276e2 100644 (file)
@@ -4115,6 +4115,8 @@ static bool svm_has_emulated_msr(struct kvm *kvm, u32 index)
        case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
                return false;
        case MSR_IA32_SMBASE:
+               if (!IS_ENABLED(CONFIG_KVM_SMM))
+                       return false;
                /* SEV-ES guests do not support SMM, so report false */
                if (kvm && sev_es_guest(kvm))
                        return false;
index 49065614a3dbb39f201e898e8c3884783847d758..6a0b65815206baa3ae47e5f7b3f388612d7c19f6 100644 (file)
@@ -6842,6 +6842,8 @@ static bool vmx_has_emulated_msr(struct kvm *kvm, u32 index)
 {
        switch (index) {
        case MSR_IA32_SMBASE:
+               if (!IS_ENABLED(CONFIG_KVM_SMM))
+                       return false;
                /*
                 * We cannot do SMM unless we can run the guest in big
                 * real mode.
index 019ba8725412ce4f52310f807f64f7c2436a9499..0a80cd1d91c8c8b9dcaad0b867022c43c7c18522 100644 (file)
@@ -3642,7 +3642,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                break;
        }
        case MSR_IA32_SMBASE:
-               if (!msr_info->host_initiated)
+               if (!IS_ENABLED(CONFIG_KVM_SMM) || !msr_info->host_initiated)
                        return 1;
                vcpu->arch.smbase = data;
                break;
@@ -4058,7 +4058,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                msr_info->data = vcpu->arch.ia32_misc_enable_msr;
                break;
        case MSR_IA32_SMBASE:
-               if (!msr_info->host_initiated)
+               if (!IS_ENABLED(CONFIG_KVM_SMM) || !msr_info->host_initiated)
                        return 1;
                msr_info->data = vcpu->arch.smbase;
                break;
@@ -4432,6 +4432,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
                        r |= KVM_X86_DISABLE_EXITS_MWAIT;
                break;
        case KVM_CAP_X86_SMM:
+               if (!IS_ENABLED(CONFIG_KVM_SMM))
+                       break;
+
                /* SMBASE is usually relocated above 1M on modern chipsets,
                 * and SMM handlers might indeed rely on 4G segment limits,
                 * so do not report SMM to be available if real mode is
@@ -5182,6 +5185,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
                vcpu->arch.apic->sipi_vector = events->sipi_vector;
 
        if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
+#ifdef CONFIG_KVM_SMM
                if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
                        kvm_x86_ops.nested_ops->leave_nested(vcpu);
                        kvm_smm_changed(vcpu, events->smi.smm);
@@ -5196,6 +5200,12 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
                                vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK;
                }
 
+#else
+               if (events->smi.smm || events->smi.pending ||
+                   events->smi.smm_inside_nmi)
+                       return -EINVAL;
+#endif
+
                if (lapic_in_kernel(vcpu)) {
                        if (events->smi.latched_init)
                                set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
@@ -8121,6 +8131,14 @@ static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
        return emul_to_vcpu(ctxt)->arch.hflags;
 }
 
+#ifndef CONFIG_KVM_SMM
+static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
+{
+       WARN_ON_ONCE(1);
+       return X86EMUL_UNHANDLEABLE;
+}
+#endif
+
 static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt)
 {
        kvm_make_request(KVM_REQ_TRIPLE_FAULT, emul_to_vcpu(ctxt));
index 1f136a81858e5df0903865d8364869edbaf9ba39..cb38a478e1f62aec9e52ca861baddfaf07b03ee8 100644 (file)
@@ -137,6 +137,8 @@ int main(int argc, char *argv[])
        struct kvm_x86_state *state;
        int stage, stage_reported;
 
+       TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_SMM));
+
        /* Create VM */
        vm = vm_create_with_one_vcpu(&vcpu, guest_code);