arm: perf/kvm: Use GENMASK for ARMV8_PMU_PMCR_N
authorJames Clark <james.clark@arm.com>
Mon, 11 Dec 2023 16:13:14 +0000 (16:13 +0000)
committerWill Deacon <will@kernel.org>
Tue, 12 Dec 2023 09:46:21 +0000 (09:46 +0000)
This is so that FIELD_GET and FIELD_PREP can be used and that the fields
are in a consistent format to arm64/tools/sysreg

Signed-off-by: James Clark <james.clark@arm.com>
Link: https://lore.kernel.org/r/20231211161331.1277825-3-james.clark@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/kvm/pmu-emul.c
arch/arm64/kvm/sys_regs.c
drivers/perf/arm_pmuv3.c
include/linux/perf/arm_pmuv3.h

index fe99b3dab6ce5d4b9c9f1e9e0071abfb38cd93d9..3d9467ff73bcbff82f8f22f51dd0784a83cecede 100644 (file)
@@ -267,9 +267,8 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
 
 u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
 {
-       u64 val = kvm_vcpu_read_pmcr(vcpu) >> ARMV8_PMU_PMCR_N_SHIFT;
+       u64 val = FIELD_GET(ARMV8_PMU_PMCR_N, kvm_vcpu_read_pmcr(vcpu));
 
-       val &= ARMV8_PMU_PMCR_N_MASK;
        if (val == 0)
                return BIT(ARMV8_PMU_CYCLE_IDX);
        else
@@ -1136,8 +1135,7 @@ u8 kvm_arm_pmu_get_pmuver_limit(void)
  */
 u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
 {
-       u64 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0) &
-                       ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
+       u64 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
 
-       return pmcr | ((u64)vcpu->kvm->arch.pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
+       return u64_replace_bits(pmcr, vcpu->kvm->arch.pmcr_n, ARMV8_PMU_PMCR_N);
 }
index 4735e1b37fb3e08afd9825ae4e9ebad953605937..ff45d688bd7daa8445b99cda9cf87c1650df18e8 100644 (file)
@@ -877,7 +877,7 @@ static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
        u64 pmcr, val;
 
        pmcr = kvm_vcpu_read_pmcr(vcpu);
-       val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
+       val = FIELD_GET(ARMV8_PMU_PMCR_N, pmcr);
        if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
                kvm_inject_undefined(vcpu);
                return false;
@@ -1143,7 +1143,7 @@ static int get_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
 static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
                    u64 val)
 {
-       u8 new_n = (val >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
+       u8 new_n = FIELD_GET(ARMV8_PMU_PMCR_N, val);
        struct kvm *kvm = vcpu->kvm;
 
        mutex_lock(&kvm->arch.config_lock);
index 09478e2b825e037e4932d7c5a73683626c94caa5..374e973a4e4296ccb76f571c49e1f8c1d3801ecc 100644 (file)
@@ -15,6 +15,7 @@
 #include <clocksource/arm_arch_timer.h>
 
 #include <linux/acpi.h>
+#include <linux/bitfield.h>
 #include <linux/clocksource.h>
 #include <linux/of.h>
 #include <linux/perf/arm_pmu.h>
@@ -1111,8 +1112,7 @@ static void __armv8pmu_probe_pmu(void *info)
        probe->present = true;
 
        /* Read the nb of CNTx counters supported from PMNC */
-       cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT)
-               & ARMV8_PMU_PMCR_N_MASK;
+       cpu_pmu->num_events = FIELD_GET(ARMV8_PMU_PMCR_N, armv8pmu_pmcr_read());
 
        /* Add the CPU cycles counter */
        cpu_pmu->num_events += 1;
index 9c226adf938a2a185f006533afeb01e347cceb4e..ed62bd75cec7aeab8073d1b708aaf5599fc2cf1d 100644 (file)
 #define ARMV8_PMU_PMCR_DP      (1 << 5) /* Disable CCNT if non-invasive debug*/
 #define ARMV8_PMU_PMCR_LC      (1 << 6) /* Overflow on 64 bit cycle counter */
 #define ARMV8_PMU_PMCR_LP      (1 << 7) /* Long event counter enable */
-#define ARMV8_PMU_PMCR_N_SHIFT 11  /* Number of counters supported */
-#define ARMV8_PMU_PMCR_N_MASK  0x1f
+#define ARMV8_PMU_PMCR_N       GENMASK(15, 11) /* Number of counters supported */
 #define ARMV8_PMU_PMCR_MASK    0xff    /* Mask for writable bits */
 
 /*