KVM: arm64: Always set HCR_TID2
authorAkihiko Odaki <akihiko.odaki@daynix.com>
Thu, 12 Jan 2023 02:38:50 +0000 (11:38 +0900)
committerOliver Upton <oliver.upton@linux.dev>
Thu, 12 Jan 2023 21:07:43 +0000 (21:07 +0000)
Always set HCR_TID2 to trap CTR_EL0, CCSIDR2_EL1, CLIDR_EL1, and
CSSELR_EL1. This saves a few lines of code and allows to employ their
access trap handlers for more purposes anticipated by the old
condition for setting HCR_TID2.

Suggested-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Reviewed-by: Reiji Watanabe <reijiw@google.com>
Link: https://lore.kernel.org/r/20230112023852.42012-6-akihiko.odaki@daynix.com
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/include/asm/kvm_arm.h
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h

index 0df3fc3a017371cf402efc50c32fe326a4e14711..158f2033fde97daeea2a1e761410f7cce3aafb45 100644 (file)
  * SWIO:       Turn set/way invalidates into set/way clean+invalidate
  * PTW:                Take a stage2 fault if a stage1 walk steps in device memory
  * TID3:       Trap EL1 reads of group 3 ID registers
+ * TID2:       Trap CTR_EL0, CCSIDR2_EL1, CLIDR_EL1, and CSSELR_EL1
  */
 #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
                         HCR_BSU_IS | HCR_FB | HCR_TACR | \
                         HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
-                        HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3 )
+                        HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3 | HCR_TID2)
 #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
 #define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA)
 #define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
index 9bdba47f7e149f90055657aa251155708a806ad6..30c4598d643b39851ce24718ec487fd6adf7ca2a 100644 (file)
@@ -88,10 +88,6 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
        if (vcpu_el1_is_32bit(vcpu))
                vcpu->arch.hcr_el2 &= ~HCR_RW;
 
-       if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
-           vcpu_el1_is_32bit(vcpu))
-               vcpu->arch.hcr_el2 |= HCR_TID2;
-
        if (kvm_has_mte(vcpu->kvm))
                vcpu->arch.hcr_el2 |= HCR_ATA;
 }
index 35a159d131b5f862c16b52842ef6b0ba8088fb00..374390a9212eb1654099a9687c0344ebd6a02b89 100644 (file)
@@ -705,7 +705,6 @@ static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
                return false;
 
        switch (reg) {
-       case CSSELR_EL1:        *val = read_sysreg_s(SYS_CSSELR_EL1);   break;
        case SCTLR_EL1:         *val = read_sysreg_s(SYS_SCTLR_EL12);   break;
        case CPACR_EL1:         *val = read_sysreg_s(SYS_CPACR_EL12);   break;
        case TTBR0_EL1:         *val = read_sysreg_s(SYS_TTBR0_EL12);   break;
@@ -750,7 +749,6 @@ static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
                return false;
 
        switch (reg) {
-       case CSSELR_EL1:        write_sysreg_s(val, SYS_CSSELR_EL1);    break;
        case SCTLR_EL1:         write_sysreg_s(val, SYS_SCTLR_EL12);    break;
        case CPACR_EL1:         write_sysreg_s(val, SYS_CPACR_EL12);    break;
        case TTBR0_EL1:         write_sysreg_s(val, SYS_TTBR0_EL12);    break;
index baa5b9b3dde58ac46bfdf56dde0de4db38996460..147cb4c846c68102096f243a060071e3d4432d5d 100644 (file)
@@ -39,7 +39,6 @@ static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt)
 
 static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
 {
-       ctxt_sys_reg(ctxt, CSSELR_EL1)  = read_sysreg(csselr_el1);
        ctxt_sys_reg(ctxt, SCTLR_EL1)   = read_sysreg_el1(SYS_SCTLR);
        ctxt_sys_reg(ctxt, CPACR_EL1)   = read_sysreg_el1(SYS_CPACR);
        ctxt_sys_reg(ctxt, TTBR0_EL1)   = read_sysreg_el1(SYS_TTBR0);
@@ -95,7 +94,6 @@ static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
 static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
 {
        write_sysreg(ctxt_sys_reg(ctxt, MPIDR_EL1),     vmpidr_el2);
-       write_sysreg(ctxt_sys_reg(ctxt, CSSELR_EL1),    csselr_el1);
 
        if (has_vhe() ||
            !cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {