KVM: SVM: hyper-v: Direct Virtual Flush support
authorVineeth Pillai <viremana@linux.microsoft.com>
Thu, 3 Jun 2021 15:14:40 +0000 (15:14 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 17 Jun 2021 17:09:38 +0000 (13:09 -0400)
From Hyper-V TLFS:
 "The hypervisor exposes hypercalls (HvFlushVirtualAddressSpace,
  HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressList, and
  HvFlushVirtualAddressListEx) that allow operating systems to more
  efficiently manage the virtual TLB. The L1 hypervisor can choose to
  allow its guest to use those hypercalls and delegate the responsibility
  to handle them to the L0 hypervisor. This requires the use of a
  partition assist page."

Add the Direct Virtual Flush support for SVM.

Related VMX changes:
commit 6f6a657c9998 ("KVM/Hyper-V/VMX: Add direct tlb flush support")

Signed-off-by: Vineeth Pillai <viremana@linux.microsoft.com>
Message-Id: <fc8d24d8eb7017266bb961e39a171b0caf298d7f.1622730232.git.viremana@linux.microsoft.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/Makefile
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm_onhyperv.c [new file with mode: 0644]
arch/x86/kvm/svm/svm_onhyperv.h

index a06745c2fef1e54a5358748df5690cdeaf60d9e1..83331376b779b4d872f942a4b78d80bcfb0233d6 100644 (file)
@@ -32,6 +32,10 @@ kvm-intel-$(CONFIG_X86_SGX_KVM)      += vmx/sgx.o
 
 kvm-amd-y              += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o svm/sev.o
 
+ifdef CONFIG_HYPERV
+kvm-amd-y              += svm/svm_onhyperv.o
+endif
+
 obj-$(CONFIG_KVM)      += kvm.o
 obj-$(CONFIG_KVM_INTEL)        += kvm-intel.o
 obj-$(CONFIG_KVM_AMD)  += kvm-amd.o
index 1b0056ef36af657fbbf38410ee11833eddbd7f89..9bb4692728ef235bf951805eec1b29b49452ec53 100644 (file)
@@ -3781,6 +3781,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
        }
        svm->vmcb->save.cr2 = vcpu->arch.cr2;
 
+       svm_hv_update_vp_id(svm->vmcb, vcpu);
+
        /*
         * Run with all-zero DR6 unless needed, so that we can get the exact cause
         * of a #DB.
diff --git a/arch/x86/kvm/svm/svm_onhyperv.c b/arch/x86/kvm/svm/svm_onhyperv.c
new file mode 100644 (file)
index 0000000..98aa981
--- /dev/null
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KVM L1 hypervisor optimizations on Hyper-V for SVM.
+ */
+
+#include <linux/kvm_host.h>
+#include "kvm_cache_regs.h"
+
+#include <asm/mshyperv.h>
+
+#include "svm.h"
+#include "svm_ops.h"
+
+#include "hyperv.h"
+#include "kvm_onhyperv.h"
+#include "svm_onhyperv.h"
+
+int svm_hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu)
+{
+       struct hv_enlightenments *hve;
+       struct hv_partition_assist_pg **p_hv_pa_pg =
+                       &to_kvm_hv(vcpu->kvm)->hv_pa_pg;
+
+       if (!*p_hv_pa_pg)
+               *p_hv_pa_pg = kzalloc(PAGE_SIZE, GFP_KERNEL);
+
+       if (!*p_hv_pa_pg)
+               return -ENOMEM;
+
+       hve = (struct hv_enlightenments *)to_svm(vcpu)->vmcb->control.reserved_sw;
+
+       hve->partition_assist_page = __pa(*p_hv_pa_pg);
+       hve->hv_vm_id = (unsigned long)vcpu->kvm;
+       if (!hve->hv_enlightenments_control.nested_flush_hypercall) {
+               hve->hv_enlightenments_control.nested_flush_hypercall = 1;
+               vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS);
+       }
+
+       return 0;
+}
+
index 0f262460b2e68c47734b068b1c6d72f260458c93..9b9a55abc29fb0e7b08c1a280522a39b3d16c48e 100644 (file)
@@ -36,6 +36,8 @@ struct hv_enlightenments {
  */
 #define VMCB_HV_NESTED_ENLIGHTENMENTS VMCB_SW
 
+int svm_hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu);
+
 static inline void svm_hv_init_vmcb(struct vmcb *vmcb)
 {
        struct hv_enlightenments *hve =
@@ -55,6 +57,23 @@ static inline void svm_hv_hardware_setup(void)
                svm_x86_ops.tlb_remote_flush_with_range =
                                hv_remote_flush_tlb_with_range;
        }
+
+       if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH) {
+               int cpu;
+
+               pr_info("kvm: Hyper-V Direct TLB Flush enabled\n");
+               for_each_online_cpu(cpu) {
+                       struct hv_vp_assist_page *vp_ap =
+                               hv_get_vp_assist_page(cpu);
+
+                       if (!vp_ap)
+                               continue;
+
+                       vp_ap->nested_control.features.directhypercall = 1;
+               }
+               svm_x86_ops.enable_direct_tlbflush =
+                               svm_hv_enable_direct_tlbflush;
+       }
 }
 
 static inline void svm_hv_vmcb_dirty_nested_enlightenments(
@@ -74,6 +93,19 @@ static inline void svm_hv_vmcb_dirty_nested_enlightenments(
            hve->hv_enlightenments_control.msr_bitmap)
                vmcb_mark_dirty(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS);
 }
+
+static inline void svm_hv_update_vp_id(struct vmcb *vmcb,
+               struct kvm_vcpu *vcpu)
+{
+       struct hv_enlightenments *hve =
+               (struct hv_enlightenments *)vmcb->control.reserved_sw;
+       u32 vp_index = kvm_hv_get_vpindex(vcpu);
+
+       if (hve->hv_vp_id != vp_index) {
+               hve->hv_vp_id = vp_index;
+               vmcb_mark_dirty(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS);
+       }
+}
 #else
 
 static inline void svm_hv_init_vmcb(struct vmcb *vmcb)
@@ -88,6 +120,11 @@ static inline void svm_hv_vmcb_dirty_nested_enlightenments(
                struct kvm_vcpu *vcpu)
 {
 }
+
+static inline void svm_hv_update_vp_id(struct vmcb *vmcb,
+               struct kvm_vcpu *vcpu)
+{
+}
 #endif /* CONFIG_HYPERV */
 
 #endif /* __ARCH_X86_KVM_SVM_ONHYPERV_H__ */