KVM: x86: hyper-v: Make Hyper-V emulation enablement conditional
authorVitaly Kuznetsov <vkuznets@redhat.com>
Tue, 26 Jan 2021 13:48:14 +0000 (14:48 +0100)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 9 Feb 2021 13:39:56 +0000 (08:39 -0500)
Hyper-V emulation is enabled in KVM unconditionally. This is bad at least
from security standpoint as it is an extra attack surface. Ideally, there
should be a per-VM capability explicitly enabled by VMM but currently it
is not the case and we can't mandate one without breaking backwards
compatibility. We can, however, check guest visible CPUIDs and only enable
Hyper-V emulation when "Hv#1" interface was exposed in
HYPERV_CPUID_INTERFACE.

Note, VMMs are free to act in any sequence they like, e.g. they can try
to set MSRs first and CPUIDs later so we still need to allow the host
to read/write Hyper-V specific MSRs unconditionally.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <20210126134816.1880136-14-vkuznets@redhat.com>
[Add selftest vcpu_set_hv_cpuid API to avoid breaking xen_vmcall_test. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/cpuid.c
arch/x86/kvm/hyperv.c
arch/x86/kvm/hyperv.h
arch/x86/kvm/x86.c
arch/x86/kvm/xen.c
tools/testing/selftests/kvm/include/x86_64/processor.h
tools/testing/selftests/kvm/lib/x86_64/processor.c
tools/testing/selftests/kvm/x86_64/evmcs_test.c
tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c

index 95eb6e93a4b7ff8bec7ff6a572db6e8eef6e729d..84499aad01a468820a961568ee58682d6726938b 100644 (file)
@@ -736,6 +736,7 @@ struct kvm_vcpu_arch {
        /* used for guest single stepping over the given code position */
        unsigned long singlestep_rip;
 
+       bool hyperv_enabled;
        struct kvm_vcpu_hv *hyperv;
        struct kvm_vcpu_xen xen;
 
index f47bbd4b083735a196f1a5a817052a41a7d5f8c6..c8f2592ccc999780c4c97e7a0fa8c391d7e2466e 100644 (file)
@@ -179,6 +179,8 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
        vcpu->arch.cr4_guest_rsvd_bits =
            __cr4_reserved_bits(guest_cpuid_has, vcpu);
 
+       kvm_hv_set_cpuid(vcpu);
+
        /* Invoke the vendor callback only after the above state is updated. */
        static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu);
 
index 333bd200f18654f8eab704a4d2f1b3070115f5ad..11d16fe52324c6a6c1d45c938f175f19c7d1556a 100644 (file)
@@ -37,6 +37,9 @@
 #include "trace.h"
 #include "irq.h"
 
+/* "Hv#1" signature */
+#define HYPERV_CPUID_SIGNATURE_EAX 0x31237648
+
 #define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64)
 
 static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
@@ -1473,6 +1476,9 @@ int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
 {
        struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
 
+       if (!host && !vcpu->arch.hyperv_enabled)
+               return 1;
+
        if (kvm_hv_msr_partition_wide(msr)) {
                int r;
 
@@ -1488,6 +1494,9 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
 {
        struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
 
+       if (!host && !vcpu->arch.hyperv_enabled)
+               return 1;
+
        if (kvm_hv_msr_partition_wide(msr)) {
                int r;
 
@@ -1701,9 +1710,20 @@ ret_success:
        return HV_STATUS_SUCCESS;
 }
 
-bool kvm_hv_hypercall_enabled(struct kvm *kvm)
+void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpuid_entry2 *entry;
+
+       entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE, 0);
+       if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX)
+               vcpu->arch.hyperv_enabled = true;
+       else
+               vcpu->arch.hyperv_enabled = false;
+}
+
+bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu)
 {
-       return to_kvm_hv(kvm)->hv_guest_os_id != 0;
+       return vcpu->arch.hyperv_enabled && to_kvm_hv(vcpu->kvm)->hv_guest_os_id;
 }
 
 static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
@@ -2036,8 +2056,7 @@ int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
                        break;
 
                case HYPERV_CPUID_INTERFACE:
-                       memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
-                       ent->eax = signature[0];
+                       ent->eax = HYPERV_CPUID_SIGNATURE_EAX;
                        break;
 
                case HYPERV_CPUID_VERSION:
index 4bc2ab0caa92c2953803787fa1014ba4d587f8f2..f28e7fb887656924dbcd7b17a3cf722fa3316fea 100644 (file)
@@ -92,7 +92,7 @@ static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu)
 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);
 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host);
 
-bool kvm_hv_hypercall_enabled(struct kvm *kvm);
+bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu);
 int kvm_hv_hypercall(struct kvm_vcpu *vcpu);
 
 void kvm_hv_irq_routing_update(struct kvm *kvm);
@@ -141,6 +141,7 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
 
 void kvm_hv_init_vm(struct kvm *kvm);
 void kvm_hv_destroy_vm(struct kvm *kvm);
+void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu);
 int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args);
 int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
                     struct kvm_cpuid_entry2 __user *entries);
index a75d01523c4facd1ca2d10a6b8de71dd2a40cfcd..1e5c304d14abbff0046bff5806684d516862a643 100644 (file)
@@ -8162,7 +8162,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
        if (kvm_xen_hypercall_enabled(vcpu->kvm))
                return kvm_xen_hypercall(vcpu);
 
-       if (kvm_hv_hypercall_enabled(vcpu->kvm))
+       if (kvm_hv_hypercall_enabled(vcpu))
                return kvm_hv_hypercall(vcpu);
 
        nr = kvm_rax_read(vcpu);
index b414fed1576b000a4b594e82a753f6eb094acb2a..b08a688e56f9ebab72edd9440d99f2154645e649 100644 (file)
@@ -369,7 +369,7 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
 
        /* Hyper-V hypercalls get bit 31 set in EAX */
        if ((input & 0x80000000) &&
-           kvm_hv_hypercall_enabled(vcpu->kvm))
+           kvm_hv_hypercall_enabled(vcpu))
                return kvm_hv_hypercall(vcpu);
 
        longmode = is_64_bit_mode(vcpu);
index 42292d676effecb444361a28461f1ff2ff53b4c2..0b30b4e15c38636f485ba0b88fb5c33312dc36c6 100644 (file)
@@ -407,6 +407,7 @@ uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
                       uint64_t a3);
 
 struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void);
+void vcpu_set_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid);
 struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid);
 
 /*
index 18483df2b3318bdc37ca55fff7b6f3007c5b91bf..de0c76177d02a74cedbc0fe318b18867682e6a17 100644 (file)
@@ -1323,6 +1323,41 @@ struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void)
        return cpuid;
 }
 
+void vcpu_set_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid)
+{
+       static struct kvm_cpuid2 *cpuid_full;
+       struct kvm_cpuid2 *cpuid_sys, *cpuid_hv;
+       int i, nent = 0;
+
+       if (!cpuid_full) {
+               cpuid_sys = kvm_get_supported_cpuid();
+               cpuid_hv = kvm_get_supported_hv_cpuid();
+
+               cpuid_full = malloc(sizeof(*cpuid_full) +
+                                   (cpuid_sys->nent + cpuid_hv->nent) *
+                                   sizeof(struct kvm_cpuid_entry2));
+               if (!cpuid_full) {
+                       perror("malloc");
+                       abort();
+               }
+
+               /* Need to skip KVM CPUID leaves 0x400000xx */
+               for (i = 0; i < cpuid_sys->nent; i++) {
+                       if (cpuid_sys->entries[i].function >= 0x40000000 &&
+                           cpuid_sys->entries[i].function < 0x40000100)
+                               continue;
+                       cpuid_full->entries[nent] = cpuid_sys->entries[i];
+                       nent++;
+               }
+
+               memcpy(&cpuid_full->entries[nent], cpuid_hv->entries,
+                      cpuid_hv->nent * sizeof(struct kvm_cpuid_entry2));
+               cpuid_full->nent = nent + cpuid_hv->nent;
+       }
+
+       vcpu_set_cpuid(vm, vcpuid, cpuid_full);
+}
+
 struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid)
 {
        static struct kvm_cpuid2 *cpuid;
index 39a3cb2bd103f4137652351d14811752477cdf7d..ca22ee6d19cbdd7c4c03fdc1ffc672730f133010 100644 (file)
@@ -78,42 +78,6 @@ void guest_code(struct vmx_pages *vmx_pages)
        GUEST_ASSERT(vmlaunch());
 }
 
-struct kvm_cpuid2 *guest_get_cpuid(void)
-{
-       static struct kvm_cpuid2 *cpuid_full;
-       struct kvm_cpuid2 *cpuid_sys, *cpuid_hv;
-       int i, nent = 0;
-
-       if (cpuid_full)
-               return cpuid_full;
-
-       cpuid_sys = kvm_get_supported_cpuid();
-       cpuid_hv = kvm_get_supported_hv_cpuid();
-
-       cpuid_full = malloc(sizeof(*cpuid_full) +
-                           (cpuid_sys->nent + cpuid_hv->nent) *
-                           sizeof(struct kvm_cpuid_entry2));
-       if (!cpuid_full) {
-               perror("malloc");
-               abort();
-       }
-
-       /* Need to skip KVM CPUID leaves 0x400000xx */
-       for (i = 0; i < cpuid_sys->nent; i++) {
-               if (cpuid_sys->entries[i].function >= 0x40000000 &&
-                   cpuid_sys->entries[i].function < 0x40000100)
-                       continue;
-               cpuid_full->entries[nent] = cpuid_sys->entries[i];
-               nent++;
-       }
-
-       memcpy(&cpuid_full->entries[nent], cpuid_hv->entries,
-              cpuid_hv->nent * sizeof(struct kvm_cpuid_entry2));
-       cpuid_full->nent = nent + cpuid_hv->nent;
-
-       return cpuid_full;
-}
-
 int main(int argc, char *argv[])
 {
        vm_vaddr_t vmx_pages_gva = 0;
@@ -135,7 +99,7 @@ int main(int argc, char *argv[])
                exit(KSFT_SKIP);
        }
 
-       vcpu_set_cpuid(vm, VCPU_ID, guest_get_cpuid());
+       vcpu_set_hv_cpuid(vm, VCPU_ID);
        vcpu_enable_evmcs(vm, VCPU_ID);
 
        run = vcpu_state(vm, VCPU_ID);
@@ -179,7 +143,7 @@ int main(int argc, char *argv[])
                /* Restore state in a new VM.  */
                kvm_vm_restart(vm, O_RDWR);
                vm_vcpu_add(vm, VCPU_ID);
-               vcpu_set_cpuid(vm, VCPU_ID, guest_get_cpuid());
+               vcpu_set_hv_cpuid(vm, VCPU_ID);
                vcpu_enable_evmcs(vm, VCPU_ID);
                vcpu_load_state(vm, VCPU_ID, state);
                run = vcpu_state(vm, VCPU_ID);
index 6e7b069322fdb4b6db99a8002f0f4a4c2d8c410d..86653361c695c2558152be8f5d09cf3d98153876 100644 (file)
@@ -92,7 +92,7 @@ int main(int argc, char *argv[])
        }
 
        vm = vm_create_default(VCPU_ID, 0, (void *) guest_code);
-       vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+       vcpu_set_hv_cpuid(vm, VCPU_ID);
 
        struct kvm_xen_hvm_config hvmc = {
                .flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL,