KVM: arm64: Relax trapping of CTR_EL0 when FEAT_EVT is available
authorMarc Zyngier <maz@kernel.org>
Mon, 15 May 2023 17:00:16 +0000 (18:00 +0100)
committerOliver Upton <oliver.upton@linux.dev>
Sun, 21 May 2023 19:09:44 +0000 (19:09 +0000)
CTR_EL0 can often be used in userspace, and it would be nice if
KVM didn't have to emulate it unnecessarily.

While it isn't possible to trap the cache configuration registers
independently from CTR_EL0 in the base ARMv8.0 architecture, FEAT_EVT
allows these cache configuration registers (CCSIDR_EL1, CCSIDR2_EL1,
CLIDR_EL1 and CSSELR_EL1) to be trapped independently by setting
HCR_EL2.TID4.

Switch to using TID4 instead of TID2 in the cases where FEAT_EVT
is available *and* that KVM doesn't need to sanitise CTR_EL0 to
paper over mismatched cache configurations.

Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20230515170016.965378-1-maz@kernel.org
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/include/asm/kvm_arm.h
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/kernel/cpufeature.c
arch/arm64/tools/cpucaps

index baef29fcbeeedc9a978a8ee2aa2346ad7382f314..fe25a88f98a5e8964da023a4bd22cecacab2eeff 100644 (file)
@@ -18,6 +18,7 @@
 #define HCR_ATA_SHIFT  56
 #define HCR_ATA                (UL(1) << HCR_ATA_SHIFT)
 #define HCR_AMVOFFEN   (UL(1) << 51)
+#define HCR_TID4       (UL(1) << 49)
 #define HCR_FIEN       (UL(1) << 47)
 #define HCR_FWB                (UL(1) << 46)
 #define HCR_API                (UL(1) << 41)
@@ -86,7 +87,7 @@
 #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
                         HCR_BSU_IS | HCR_FB | HCR_TACR | \
                         HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
-                        HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3 | HCR_TID2)
+                        HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3)
 #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
 #define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA)
 #define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
index b31b32ecbe2d12697dd8c686a6e72de3ee0da79f..35bffdec0214c14c3298e4156cf62b5c7b1c84e6 100644 (file)
@@ -95,6 +95,12 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
                vcpu->arch.hcr_el2 |= HCR_TVM;
        }
 
+       if (cpus_have_final_cap(ARM64_HAS_EVT) &&
+           !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE))
+               vcpu->arch.hcr_el2 |= HCR_TID4;
+       else
+               vcpu->arch.hcr_el2 |= HCR_TID2;
+
        if (vcpu_el1_is_32bit(vcpu))
                vcpu->arch.hcr_el2 &= ~HCR_RW;
 
index 7d7128c651614533a8b81fc52c20353b88c3250f..4a2ab3f366de1421990279b14a900b9721f85477 100644 (file)
@@ -2641,6 +2641,17 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .cpu_enable = cpu_enable_dit,
                ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, DIT, IMP)
        },
+       {
+               .desc = "Enhanced Virtualization Traps",
+               .capability = ARM64_HAS_EVT,
+               .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .sys_reg = SYS_ID_AA64MMFR2_EL1,
+               .sign = FTR_UNSIGNED,
+               .field_pos = ID_AA64MMFR2_EL1_EVT_SHIFT,
+               .field_width = 4,
+               .min_field_value = ID_AA64MMFR2_EL1_EVT_IMP,
+               .matches = has_cpuid_feature,
+       },
        {},
 };
 
index 40ba95472594db7f053ec19b0d10af5088991e95..606d1184a5e95f9f656aa11fa66bf3ed21dbb6f7 100644 (file)
@@ -25,6 +25,7 @@ HAS_E0PD
 HAS_ECV
 HAS_ECV_CNTPOFF
 HAS_EPAN
+HAS_EVT
 HAS_GENERIC_AUTH
 HAS_GENERIC_AUTH_ARCH_QARMA3
 HAS_GENERIC_AUTH_ARCH_QARMA5