KVM: x86/pmu: Refactoring find_arch_event() to pmc_perf_hw_id()
authorLike Xu <likexu@tencent.com>
Tue, 30 Nov 2021 07:42:17 +0000 (15:42 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 23 Feb 2022 11:03:16 +0000 (12:03 +0100)
[ Upstream commit 7c174f305cbee6bdba5018aae02b84369e7ab995 ]

The find_arch_event() returns a "unsigned int" value,
which is used by the pmc_reprogram_counter() to
program a PERF_TYPE_HARDWARE type perf_event.

The returned value is actually the kernel defined generic
perf_hw_id, let's rename it to pmc_perf_hw_id() with simpler
incoming parameters for better self-explanation.

Signed-off-by: Like Xu <likexu@tencent.com>
Message-Id: <20211130074221.93635-3-likexu@tencent.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/x86/kvm/pmu.c
arch/x86/kvm/pmu.h
arch/x86/kvm/svm/pmu.c
arch/x86/kvm/vmx/pmu_intel.c

index 0772bad9165c55b09c805caee6287de9df1257bb..eec614de9af30565682b0c0f16b2abbcdc54f7d9 100644 (file)
@@ -174,7 +174,6 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
 void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
 {
        unsigned config, type = PERF_TYPE_RAW;
-       u8 event_select, unit_mask;
        struct kvm *kvm = pmc->vcpu->kvm;
        struct kvm_pmu_event_filter *filter;
        int i;
@@ -206,17 +205,12 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
        if (!allow_event)
                return;
 
-       event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
-       unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
-
        if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
                          ARCH_PERFMON_EVENTSEL_INV |
                          ARCH_PERFMON_EVENTSEL_CMASK |
                          HSW_IN_TX |
                          HSW_IN_TX_CHECKPOINTED))) {
-               config = kvm_x86_ops.pmu_ops->find_arch_event(pmc_to_pmu(pmc),
-                                                     event_select,
-                                                     unit_mask);
+               config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc);
                if (config != PERF_COUNT_HW_MAX)
                        type = PERF_TYPE_HARDWARE;
        }
index 0e4f2b1fa9fbdc20574483a04ec0c74a14c96550..a06d95165ac7cf5687686b3e4dfcfac35d996d6d 100644 (file)
@@ -24,8 +24,7 @@ struct kvm_event_hw_type_mapping {
 };
 
 struct kvm_pmu_ops {
-       unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select,
-                                   u8 unit_mask);
+       unsigned int (*pmc_perf_hw_id)(struct kvm_pmc *pmc);
        unsigned (*find_fixed_event)(int idx);
        bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
        struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
index e152241d1d709852b90b318c682de2012ed17703..06f8034f62e4fd6a9c3ba873e194bd49374953c0 100644 (file)
@@ -134,10 +134,10 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
        return &pmu->gp_counters[msr_to_index(msr)];
 }
 
-static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
-                                   u8 event_select,
-                                   u8 unit_mask)
+static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
 {
+       u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
+       u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
        int i;
 
        for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
@@ -320,7 +320,7 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu)
 }
 
 struct kvm_pmu_ops amd_pmu_ops = {
-       .find_arch_event = amd_find_arch_event,
+       .pmc_perf_hw_id = amd_pmc_perf_hw_id,
        .find_fixed_event = amd_find_fixed_event,
        .pmc_is_enabled = amd_pmc_is_enabled,
        .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
index 10cc4f65c4efdb36c0c2fce05e3945d138cff093..6427d95de01cf8e408c6ae410da2dfc4238f3b8d 100644 (file)
@@ -68,10 +68,11 @@ static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
                reprogram_counter(pmu, bit);
 }
 
-static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
-                                     u8 event_select,
-                                     u8 unit_mask)
+static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc)
 {
+       struct kvm_pmu *pmu = pmc_to_pmu(pmc);
+       u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
+       u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
        int i;
 
        for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
@@ -706,7 +707,7 @@ static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
 }
 
 struct kvm_pmu_ops intel_pmu_ops = {
-       .find_arch_event = intel_find_arch_event,
+       .pmc_perf_hw_id = intel_pmc_perf_hw_id,
        .find_fixed_event = intel_find_fixed_event,
        .pmc_is_enabled = intel_pmc_is_enabled,
        .pmc_idx_to_pmc = intel_pmc_idx_to_pmc,