KVM: arm64: Add feature checking helpers
authorMarc Zyngier <maz@kernel.org>
Wed, 14 Feb 2024 13:18:03 +0000 (13:18 +0000)
committerOliver Upton <oliver.upton@linux.dev>
Mon, 19 Feb 2024 17:12:59 +0000 (17:12 +0000)
In order to make it easier to check whether a particular feature
is exposed to a guest, add a new set of helpers, with kvm_has_feat()
being the most useful.

Let's start making use of them in the PMU code (courtesy of Oliver).
Follow-up changes will introduce additional use patterns.

Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Co-developed--by: Oliver Upton <oliver.upton@linux.dev>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20240214131827.2856277-3-maz@kernel.org
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/pmu-emul.c
arch/arm64/kvm/sys_regs.c
include/kvm/arm_pmu.h

index 21c57b812569f22532bd57c7fb17af669d3eb370..527da3d2cedb6a2a4264431bd0251370a9d9903e 100644 (file)
@@ -1233,4 +1233,48 @@ static inline void kvm_hyp_reserve(void) { }
 void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu);
 bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
 
+#define __expand_field_sign_unsigned(id, fld, val)                     \
+       ((u64)SYS_FIELD_VALUE(id, fld, val))
+
+#define __expand_field_sign_signed(id, fld, val)                       \
+       ({                                                              \
+               u64 __val = SYS_FIELD_VALUE(id, fld, val);              \
+               sign_extend64(__val, id##_##fld##_WIDTH - 1);           \
+       })
+
+#define expand_field_sign(id, fld, val)                                        \
+       (id##_##fld##_SIGNED ?                                          \
+        __expand_field_sign_signed(id, fld, val) :                     \
+        __expand_field_sign_unsigned(id, fld, val))
+
+#define get_idreg_field_unsigned(kvm, id, fld)                         \
+       ({                                                              \
+               u64 __val = IDREG((kvm), SYS_##id);                     \
+               FIELD_GET(id##_##fld##_MASK, __val);                    \
+       })
+
+#define get_idreg_field_signed(kvm, id, fld)                           \
+       ({                                                              \
+               u64 __val = get_idreg_field_unsigned(kvm, id, fld);     \
+               sign_extend64(__val, id##_##fld##_WIDTH - 1);           \
+       })
+
+#define get_idreg_field_enum(kvm, id, fld)                             \
+       get_idreg_field_unsigned(kvm, id, fld)
+
+#define get_idreg_field(kvm, id, fld)                                  \
+       (id##_##fld##_SIGNED ?                                          \
+        get_idreg_field_signed(kvm, id, fld) :                         \
+        get_idreg_field_unsigned(kvm, id, fld))
+
+#define kvm_has_feat(kvm, id, fld, limit)                              \
+       (get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, limit))
+
+#define kvm_has_feat_enum(kvm, id, fld, val)                           \
+       (get_idreg_field_unsigned((kvm), id, fld) == __expand_field_sign_unsigned(id, fld, val))
+
+#define kvm_has_feat_range(kvm, id, fld, min, max)                     \
+       (get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, min) && \
+        get_idreg_field((kvm), id, fld) <= expand_field_sign(id, fld, max))
+
 #endif /* __ARM64_KVM_HOST_H__ */
index 3d9467ff73bcbff82f8f22f51dd0784a83cecede..925522470b2bab949b8bac8930a69bf364874d1e 100644 (file)
@@ -64,12 +64,11 @@ u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
 {
        u64 mask = ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMU_EXCLUDE_EL0 |
                   kvm_pmu_event_mask(kvm);
-       u64 pfr0 = IDREG(kvm, SYS_ID_AA64PFR0_EL1);
 
-       if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL2, pfr0))
+       if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL2, IMP))
                mask |= ARMV8_PMU_INCLUDE_EL2;
 
-       if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL3, pfr0))
+       if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL3, IMP))
                mask |= ARMV8_PMU_EXCLUDE_NS_EL0 |
                        ARMV8_PMU_EXCLUDE_NS_EL1 |
                        ARMV8_PMU_EXCLUDE_EL3;
@@ -83,8 +82,10 @@ u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
  */
 static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc)
 {
+       struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
+
        return (pmc->idx == ARMV8_PMU_CYCLE_IDX ||
-               kvm_pmu_is_3p5(kvm_pmc_to_vcpu(pmc)));
+               kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5));
 }
 
 static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc)
@@ -556,7 +557,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
                return;
 
        /* Fixup PMCR_EL0 to reconcile the PMU version and the LP bit */
-       if (!kvm_pmu_is_3p5(vcpu))
+       if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5))
                val &= ~ARMV8_PMU_PMCR_LP;
 
        /* The reset bits don't indicate any state, and shouldn't be saved. */
index 041b11825578d54452ed0fcb993f135bc6147000..3c31f8cb9eefac3dfba620a6070ecb807d394515 100644 (file)
@@ -505,10 +505,9 @@ static bool trap_loregion(struct kvm_vcpu *vcpu,
                          struct sys_reg_params *p,
                          const struct sys_reg_desc *r)
 {
-       u64 val = IDREG(vcpu->kvm, SYS_ID_AA64MMFR1_EL1);
        u32 sr = reg_to_encoding(r);
 
-       if (!(val & (0xfUL << ID_AA64MMFR1_EL1_LO_SHIFT))) {
+       if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, LO, IMP)) {
                kvm_inject_undefined(vcpu);
                return false;
        }
@@ -2748,8 +2747,7 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
                return ignore_write(vcpu, p);
        } else {
                u64 dfr = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
-               u64 pfr = IDREG(vcpu->kvm, SYS_ID_AA64PFR0_EL1);
-               u32 el3 = !!SYS_FIELD_GET(ID_AA64PFR0_EL1, EL3, pfr);
+               u32 el3 = kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, EL3, IMP);
 
                p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) |
                             (SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr) << 24) |
index 4b9d8fb393a8496da369a9f899cee00d4bab252b..eb4c369a79eb31b705aa78ec28437d3de69b97b7 100644 (file)
@@ -90,16 +90,6 @@ void kvm_vcpu_pmu_resync_el0(void);
                        vcpu->arch.pmu.events = *kvm_get_pmu_events();  \
        } while (0)
 
-/*
- * Evaluates as true when emulating PMUv3p5, and false otherwise.
- */
-#define kvm_pmu_is_3p5(vcpu) ({                                                \
-       u64 val = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1);                \
-       u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val);        \
-                                                                       \
-       pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5;                          \
-})
-
 u8 kvm_arm_pmu_get_pmuver_limit(void);
 u64 kvm_pmu_evtyper_mask(struct kvm *kvm);
 int kvm_arm_set_default_pmu(struct kvm *kvm);
@@ -168,7 +158,6 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
 }
 
 #define kvm_vcpu_has_pmu(vcpu)         ({ false; })
-#define kvm_pmu_is_3p5(vcpu)           ({ false; })
 static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
 static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
 static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}