}
 
                if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu))
-                       kvm_pmu_handle_pmcr(vcpu,
-                                           __vcpu_sys_reg(vcpu, PMCR_EL0));
+                       kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu));
 
                if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu))
                        kvm_vcpu_pmu_restore_guest(vcpu);
 
 
 static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc)
 {
-       u64 val = __vcpu_sys_reg(kvm_pmc_to_vcpu(pmc), PMCR_EL0);
+       u64 val = kvm_vcpu_read_pmcr(kvm_pmc_to_vcpu(pmc));
 
        return (pmc->idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) ||
               (pmc->idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC));
 
 u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
 {
-       u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
+       u64 val = kvm_vcpu_read_pmcr(vcpu) >> ARMV8_PMU_PMCR_N_SHIFT;
 
        val &= ARMV8_PMU_PMCR_N_MASK;
        if (val == 0)
        if (!kvm_vcpu_has_pmu(vcpu))
                return;
 
-       if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
+       if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) || !val)
                return;
 
        for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
 {
        u64 reg = 0;
 
-       if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
+       if ((kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) {
                reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
                reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
                reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
 {
        int i;
 
-       if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
+       if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E))
                return;
 
        /* Weed out disabled counters */
 static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
 {
        struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
-       return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
+       return (kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) &&
               (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx));
 }
 
                                              ID_AA64DFR0_EL1_PMUVer_V3P5);
        return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), tmp);
 }
+
+/**
+ * kvm_vcpu_read_pmcr - Read PMCR_EL0 register for the vCPU
+ * @vcpu: The vcpu pointer
+ */
+u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
+{
+       return __vcpu_sys_reg(vcpu, PMCR_EL0);
+}
 
                 * Only update writeable bits of PMCR (continuing into
                 * kvm_pmu_handle_pmcr() as well)
                 */
-               val = __vcpu_sys_reg(vcpu, PMCR_EL0);
+               val = kvm_vcpu_read_pmcr(vcpu);
                val &= ~ARMV8_PMU_PMCR_MASK;
                val |= p->regval & ARMV8_PMU_PMCR_MASK;
                if (!kvm_supports_32bit_el0())
                kvm_pmu_handle_pmcr(vcpu, val);
        } else {
                /* PMCR.P & PMCR.C are RAZ */
-               val = __vcpu_sys_reg(vcpu, PMCR_EL0)
+               val = kvm_vcpu_read_pmcr(vcpu)
                      & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
                p->regval = val;
        }
 {
        u64 pmcr, val;
 
-       pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
+       pmcr = kvm_vcpu_read_pmcr(vcpu);
        val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
        if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
                kvm_inject_undefined(vcpu);