KVM: X86: Add functions for retrieving L2 TSC fields from common code
authorIlias Stamatis <ilstam@amazon.com>
Wed, 26 May 2021 18:44:13 +0000 (19:44 +0100)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 17 Jun 2021 17:09:28 +0000 (13:09 -0400)
In order to implement as much of the nested TSC scaling logic as
possible in common code, we need these vendor callbacks for retrieving
the TSC offset and the TSC multiplier that L1 has set for L2.

Signed-off-by: Ilias Stamatis <ilstam@amazon.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20210526184418.28881-7-ilstam@amazon.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm-x86-ops.h
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h

index e7bef91cee04ab6e1d23345561f94bcc76428727..c4906f73603d90845dce32c455103714d05af869 100644 (file)
@@ -87,6 +87,8 @@ KVM_X86_OP(set_identity_map_addr)
 KVM_X86_OP(get_mt_mask)
 KVM_X86_OP(load_mmu_pgd)
 KVM_X86_OP_NULL(has_wbinvd_exit)
+KVM_X86_OP(get_l2_tsc_offset)
+KVM_X86_OP(get_l2_tsc_multiplier)
 KVM_X86_OP(write_l1_tsc_offset)
 KVM_X86_OP(get_exit_info)
 KVM_X86_OP(check_intercept)
index d6bba19bc094a5dee960fdb688348e70469c16e2..6ec00427c6fd5acf7e92a201499856b0ecb3fcda 100644 (file)
@@ -1311,6 +1311,8 @@ struct kvm_x86_ops {
 
        bool (*has_wbinvd_exit)(void);
 
+       u64 (*get_l2_tsc_offset)(struct kvm_vcpu *vcpu);
+       u64 (*get_l2_tsc_multiplier)(struct kvm_vcpu *vcpu);
        /* Returns actual tsc_offset set in active VMCS */
        u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
 
index 8c3918a11826a01e02ab054e4d23a4d15068d149..95ae2734760ebed5ffd1c84e524154ee50f9beaf 100644 (file)
@@ -1080,6 +1080,18 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
        seg->base = 0;
 }
 
+static u64 svm_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_svm *svm = to_svm(vcpu);
+
+       return svm->nested.ctl.tsc_offset;
+}
+
+static u64 svm_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
+{
+       return kvm_default_tsc_scaling_ratio;
+}
+
 static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -4524,6 +4536,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
 
        .has_wbinvd_exit = svm_has_wbinvd_exit,
 
+       .get_l2_tsc_offset = svm_get_l2_tsc_offset,
+       .get_l2_tsc_multiplier = svm_get_l2_tsc_multiplier,
        .write_l1_tsc_offset = svm_write_l1_tsc_offset,
 
        .load_mmu_pgd = svm_load_mmu_pgd,
index d3201efa6a07f2bdfa336d6311f7a41aeaf3ea9b..2ce2c73645bf9bcaa235ad5e5972ca21ddd719ea 100644 (file)
@@ -1787,6 +1787,27 @@ static void setup_msrs(struct vcpu_vmx *vmx)
        vmx->guest_uret_msrs_loaded = false;
 }
 
+u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
+{
+       struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+
+       if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING))
+               return vmcs12->tsc_offset;
+
+       return 0;
+}
+
+u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
+{
+       struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+
+       if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING) &&
+           nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING))
+               return vmcs12->tsc_multiplier;
+
+       return kvm_default_tsc_scaling_ratio;
+}
+
 static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
 {
        struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
@@ -7700,6 +7721,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
 
        .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
 
+       .get_l2_tsc_offset = vmx_get_l2_tsc_offset,
+       .get_l2_tsc_multiplier = vmx_get_l2_tsc_multiplier,
        .write_l1_tsc_offset = vmx_write_l1_tsc_offset,
 
        .load_mmu_pgd = vmx_load_mmu_pgd,
index 16e4e457ba23c98ef20798e880022f092921ed97..aa97c82e3451bab56a39068661a3f1e6c9523644 100644 (file)
@@ -404,6 +404,9 @@ void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
 void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
 void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
 
+u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
+u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
+
 static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
                                             int type, bool value)
 {