From: Paolo Bonzini Date: Wed, 25 May 2022 09:28:56 +0000 (-0400) Subject: KVM: x86/pmu: Use only the uniform interface reprogram_counter() X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=e99fae6edebcdf53658f531ee3c913ca74536355;p=linux.git KVM: x86/pmu: Use only the uniform interface reprogram_counter() Since reprogram_counter(), reprogram_{gp, fixed}_counter() currently have the same incoming parameter "struct kvm_pmc *pmc", the callers can simplify the conetxt by using uniformly exported interface, which makes reprogram_ {gp, fixed}_counter() static and eliminates EXPORT_SYMBOL_GPL. Signed-off-by: Like Xu Message-Id: <20220518132512.37864-8-likexu@tencent.com> Signed-off-by: Paolo Bonzini --- diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 4c354298e5160..d2a0581d9d4d6 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -283,7 +283,7 @@ out: return allow_event; } -void reprogram_gp_counter(struct kvm_pmc *pmc) +static void reprogram_gp_counter(struct kvm_pmc *pmc) { u64 config; u32 type = PERF_TYPE_RAW; @@ -325,9 +325,8 @@ void reprogram_gp_counter(struct kvm_pmc *pmc) !(eventsel & ARCH_PERFMON_EVENTSEL_OS), eventsel & ARCH_PERFMON_EVENTSEL_INT); } -EXPORT_SYMBOL_GPL(reprogram_gp_counter); -void reprogram_fixed_counter(struct kvm_pmc *pmc) +static void reprogram_fixed_counter(struct kvm_pmc *pmc) { struct kvm_pmu *pmu = pmc_to_pmu(pmc); int idx = pmc->idx - INTEL_PMC_IDX_FIXED; @@ -355,7 +354,6 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc) !(en_field & 0x1), /* exclude kernel */ pmi); } -EXPORT_SYMBOL_GPL(reprogram_fixed_counter); void reprogram_counter(struct kvm_pmc *pmc) { diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index fe31bbd1f9060..60faf27678d9c 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -173,8 +173,6 @@ static inline void kvm_init_pmu_capability(void) KVM_PMC_MAX_FIXED); } -void reprogram_gp_counter(struct kvm_pmc *pmc); -void reprogram_fixed_counter(struct kvm_pmc *pmc); void reprogram_counter(struct kvm_pmc *pmc); void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c index a1fbb72d6fbb6..79346def7c961 100644 --- a/arch/x86/kvm/svm/pmu.c +++ b/arch/x86/kvm/svm/pmu.c @@ -288,7 +288,7 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) data &= ~pmu->reserved_bits; if (data != pmc->eventsel) { pmc->eventsel = data; - reprogram_gp_counter(pmc); + reprogram_counter(pmc); } return 0; } diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 13d54c5fd12ba..0dc270e6717cc 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -52,7 +52,7 @@ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data) pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i); __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use); - reprogram_fixed_counter(pmc); + reprogram_counter(pmc); } } @@ -493,7 +493,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) reserved_bits ^= HSW_IN_TX_CHECKPOINTED; if (!(data & reserved_bits)) { pmc->eventsel = data; - reprogram_gp_counter(pmc); + reprogram_counter(pmc); return 0; } } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false))