KVM: x86/pmu: Move handling PERF_GLOBAL_CTRL and friends to common x86
authorLike Xu <likexu@tencent.com>
Sat, 3 Jun 2023 01:10:50 +0000 (18:10 -0700)
committerSean Christopherson <seanjc@google.com>
Wed, 7 Jun 2023 00:31:44 +0000 (17:31 -0700)
Move the handling of GLOBAL_CTRL, GLOBAL_STATUS, and GLOBAL_OVF_CTRL,
a.k.a. GLOBAL_STATUS_RESET, from Intel PMU code to generic x86 PMU code.
AMD PerfMonV2 defines three registers that have the same semantics as
Intel's variants, just with different names and indices.  Conveniently,
since KVM virtualizes GLOBAL_CTRL on Intel only for PMU v2 and above, and
AMD's version shows up in v2, KVM can use common code for the existence
check as well.

Signed-off-by: Like Xu <likexu@tencent.com>
Co-developed-by: Sean Christopherson <seanjc@google.com>
Link: https://lore.kernel.org/r/20230603011058.1038821-5-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/pmu.c
arch/x86/kvm/pmu.h
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/pmu_intel.c
arch/x86/kvm/vmx/vmx.h

index 1690d41c183085eb9433f69264ae9fd2066778c3..c720cc186ab4e9de42f6bf352fd26a3dc5b34b15 100644 (file)
@@ -562,6 +562,14 @@ void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
 
 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
 {
+       switch (msr) {
+       case MSR_CORE_PERF_GLOBAL_STATUS:
+       case MSR_CORE_PERF_GLOBAL_CTRL:
+       case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+               return kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu));
+       default:
+               break;
+       }
        return static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr) ||
                static_call(kvm_x86_pmu_is_valid_msr)(vcpu, msr);
 }
@@ -577,13 +585,70 @@ static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
 
 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
-       return static_call(kvm_x86_pmu_get_msr)(vcpu, msr_info);
+       struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+       u32 msr = msr_info->index;
+
+       switch (msr) {
+       case MSR_CORE_PERF_GLOBAL_STATUS:
+               msr_info->data = pmu->global_status;
+               break;
+       case MSR_CORE_PERF_GLOBAL_CTRL:
+               msr_info->data = pmu->global_ctrl;
+               break;
+       case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+               msr_info->data = 0;
+               break;
+       default:
+               return static_call(kvm_x86_pmu_get_msr)(vcpu, msr_info);
+       }
+
+       return 0;
 }
 
 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
-       kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
-       return static_call(kvm_x86_pmu_set_msr)(vcpu, msr_info);
+       struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+       u32 msr = msr_info->index;
+       u64 data = msr_info->data;
+       u64 diff;
+
+       switch (msr) {
+       case MSR_CORE_PERF_GLOBAL_STATUS:
+               if (!msr_info->host_initiated)
+                       return 1; /* RO MSR */
+
+               if (data & pmu->global_status_mask)
+                       return 1;
+
+               pmu->global_status = data;
+               break;
+       case MSR_CORE_PERF_GLOBAL_CTRL:
+               if (!kvm_valid_perf_global_ctrl(pmu, data))
+                       return 1;
+
+               if (pmu->global_ctrl != data) {
+                       diff = pmu->global_ctrl ^ data;
+                       pmu->global_ctrl = data;
+                       reprogram_counters(pmu, diff);
+               }
+               break;
+       case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+               /*
+                * GLOBAL_OVF_CTRL, a.k.a. GLOBAL STATUS_RESET, clears bits in
+                * GLOBAL_STATUS, and so the set of reserved bits is the same.
+                */
+               if (data & pmu->global_status_mask)
+                       return 1;
+
+               if (!msr_info->host_initiated)
+                       pmu->global_status &= ~data;
+               break;
+       default:
+               kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
+               return static_call(kvm_x86_pmu_set_msr)(vcpu, msr_info);
+       }
+
+       return 0;
 }
 
 /* refresh PMU settings. This function generally is called when underlying
index 986563aeeef8fc6d3a457bb29da66cf51f023c18..7c2c6414244365e8171af82bae7086166d8dbcf1 100644 (file)
@@ -41,6 +41,20 @@ struct kvm_pmu_ops {
 
 void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops);
 
+static inline bool kvm_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu)
+{
+       /*
+        * Architecturally, Intel's SDM states that IA32_PERF_GLOBAL_CTRL is
+        * supported if "CPUID.0AH: EAX[7:0] > 0", i.e. if the PMU version is
+        * greater than zero.  However, KVM only exposes and emulates the MSR
+        * to/for the guest if the guest PMU supports at least "Architectural
+        * Performance Monitoring Version 2".
+        *
+        * AMD's version of PERF_GLOBAL_CTRL conveniently shows up with v2.
+        */
+       return pmu->version > 1;
+}
+
 static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
 {
        struct kvm_pmu *pmu = pmc_to_pmu(pmc);
index e35cf0bd0df9cd421c5de275d19624eaf37670b8..ba2ed6d87364512ebc00c4e059695e9c8925b509 100644 (file)
@@ -2649,7 +2649,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
        }
 
        if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
-           intel_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)) &&
+           kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)) &&
            WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
                                     vmcs12->guest_ia32_perf_global_ctrl))) {
                *entry_failure_code = ENTRY_FAIL_DEFAULT;
@@ -4524,7 +4524,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
                vcpu->arch.pat = vmcs12->host_ia32_pat;
        }
        if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) &&
-           intel_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)))
+           kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)))
                WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
                                         vmcs12->host_ia32_perf_global_ctrl));
 
index efd113f24c1b43295751d113c7cb135295801526..ff2f52d1e22f7dfb8b52be241981befa3cbbd66d 100644 (file)
@@ -100,7 +100,7 @@ static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
 {
        struct kvm_pmu *pmu = pmc_to_pmu(pmc);
 
-       if (!intel_pmu_has_perf_global_ctrl(pmu))
+       if (!kvm_pmu_has_perf_global_ctrl(pmu))
                return true;
 
        return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
@@ -186,11 +186,7 @@ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
 
        switch (msr) {
        case MSR_CORE_PERF_FIXED_CTR_CTRL:
-       case MSR_CORE_PERF_GLOBAL_STATUS:
-       case MSR_CORE_PERF_GLOBAL_CTRL:
-       case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
-               return intel_pmu_has_perf_global_ctrl(pmu);
-               break;
+               return kvm_pmu_has_perf_global_ctrl(pmu);
        case MSR_IA32_PEBS_ENABLE:
                ret = vcpu_get_perf_capabilities(vcpu) & PERF_CAP_PEBS_FORMAT;
                break;
@@ -340,15 +336,6 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_CORE_PERF_FIXED_CTR_CTRL:
                msr_info->data = pmu->fixed_ctr_ctrl;
                break;
-       case MSR_CORE_PERF_GLOBAL_STATUS:
-               msr_info->data = pmu->global_status;
-               break;
-       case MSR_CORE_PERF_GLOBAL_CTRL:
-               msr_info->data = pmu->global_ctrl;
-               break;
-       case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
-               msr_info->data = 0;
-               break;
        case MSR_IA32_PEBS_ENABLE:
                msr_info->data = pmu->pebs_enable;
                break;
@@ -398,36 +385,6 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                if (pmu->fixed_ctr_ctrl != data)
                        reprogram_fixed_counters(pmu, data);
                break;
-       case MSR_CORE_PERF_GLOBAL_STATUS:
-               if (!msr_info->host_initiated)
-                       return 1; /* RO MSR */
-
-               if (data & pmu->global_status_mask)
-                       return 1;
-
-               pmu->global_status = data;
-               break;
-       case MSR_CORE_PERF_GLOBAL_CTRL:
-               if (!kvm_valid_perf_global_ctrl(pmu, data))
-                       return 1;
-
-               if (pmu->global_ctrl != data) {
-                       diff = pmu->global_ctrl ^ data;
-                       pmu->global_ctrl = data;
-                       reprogram_counters(pmu, diff);
-               }
-               break;
-       case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
-               /*
-                * GLOBAL_OVF_CTRL, a.k.a. GLOBAL STATUS_RESET, clears bits in
-                * GLOBAL_STATUS, and so the set of reserved bits is the same.
-                */
-               if (data & pmu->global_status_mask)
-                       return 1;
-
-               if (!msr_info->host_initiated)
-                       pmu->global_status &= ~data;
-               break;
        case MSR_IA32_PEBS_ENABLE:
                if (data & pmu->pebs_enable_mask)
                        return 1;
index 9e66531861cf92f532ac555a8bc70970e2358f5a..32384ba3849949c2b04f195b7a37c47c7fcc7b43 100644 (file)
@@ -93,18 +93,6 @@ union vmx_exit_reason {
        u32 full;
 };
 
-static inline bool intel_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu)
-{
-       /*
-        * Architecturally, Intel's SDM states that IA32_PERF_GLOBAL_CTRL is
-        * supported if "CPUID.0AH: EAX[7:0] > 0", i.e. if the PMU version is
-        * greater than zero.  However, KVM only exposes and emulates the MSR
-        * to/for the guest if the guest PMU supports at least "Architectural
-        * Performance Monitoring Version 2".
-        */
-       return pmu->version > 1;
-}
-
 struct lbr_desc {
        /* Basic info about guest LBR records. */
        struct x86_pmu_lbr records;