KVM: arm64: Drop unnecessary masking of PMU registers
authorMarc Zyngier <maz@kernel.org>
Mon, 19 Jul 2021 12:39:00 +0000 (13:39 +0100)
committerMarc Zyngier <maz@kernel.org>
Mon, 2 Aug 2021 13:26:33 +0000 (14:26 +0100)
We always sanitise our PMU sysreg on the write side, so there
is no need to do it on the read side as well.

Drop the unnecessary masking.

Acked-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
Reviewed-by: Alexandru Elisei <alexandru.elisei@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20210719123902.1493805-3-maz@kernel.org
arch/arm64/kvm/pmu-emul.c
arch/arm64/kvm/sys_regs.c

index f33825c995cbbe215d603aa0e3ca659f8245c6ea..fae4e95b586c0f72119708c7553e318a9180e904 100644 (file)
@@ -373,7 +373,6 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
                reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
                reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
                reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
-               reg &= kvm_pmu_valid_counter_mask(vcpu);
        }
 
        return reg;
@@ -569,7 +568,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
 
        if (val & ARMV8_PMU_PMCR_E) {
                kvm_pmu_enable_counter_mask(vcpu,
-                      __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
+                      __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
        } else {
                kvm_pmu_disable_counter_mask(vcpu, mask);
        }
index 96bdfa0e68b22ed9153f8ac2a9b80989b47070c3..f22139658e48184de8abe76dfb4a5d0f2cd70ed0 100644 (file)
@@ -880,7 +880,7 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
                        kvm_pmu_disable_counter_mask(vcpu, val);
                }
        } else {
-               p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
+               p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
        }
 
        return true;
@@ -904,7 +904,7 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
                        /* accessing PMINTENCLR_EL1 */
                        __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
        } else {
-               p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
+               p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
        }
 
        return true;
@@ -926,7 +926,7 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
                        /* accessing PMOVSCLR_EL0 */
                        __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
        } else {
-               p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
+               p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
        }
 
        return true;