From: Marc Zyngier Date: Wed, 14 Feb 2024 13:18:07 +0000 (+0000) Subject: KVM: arm64: nv: Add sanitising to VNCR-backed HCRX_EL2 X-Git-Url: http://git.maquefel.me/?a=commitdiff_plain;h=d39051d39269dff2ec82c61f3da60edec08d5643;p=linux.git KVM: arm64: nv: Add sanitising to VNCR-backed HCRX_EL2 Just like its little friends, HCRX_EL2 gets the feature set treatment when backed by VNCR. Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20240214131827.2856277-7-maz@kernel.org Signed-off-by: Oliver Upton --- diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c index eda05eb16c0a3..ced30c90521a0 100644 --- a/arch/arm64/kvm/nested.c +++ b/arch/arm64/kvm/nested.c @@ -265,6 +265,48 @@ int kvm_init_nv_sysregs(struct kvm *kvm) res1 |= HCR_E2H; set_sysreg_masks(kvm, HCR_EL2, res0, res1); + /* HCRX_EL2 */ + res0 = HCRX_EL2_RES0; + res1 = HCRX_EL2_RES1; + if (!kvm_has_feat(kvm, ID_AA64ISAR3_EL1, PACM, TRIVIAL_IMP)) + res0 |= HCRX_EL2_PACMEn; + if (!kvm_has_feat(kvm, ID_AA64PFR2_EL1, FPMR, IMP)) + res0 |= HCRX_EL2_EnFPM; + if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP)) + res0 |= HCRX_EL2_GCSEn; + if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, SYSREG_128, IMP)) + res0 |= HCRX_EL2_EnIDCP128; + if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, ADERR, DEV_ASYNC)) + res0 |= (HCRX_EL2_EnSDERR | HCRX_EL2_EnSNERR); + if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, DF2, IMP)) + res0 |= HCRX_EL2_TMEA; + if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, D128, IMP)) + res0 |= HCRX_EL2_D128En; + if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, THE, IMP)) + res0 |= HCRX_EL2_PTTWI; + if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, SCTLRX, IMP)) + res0 |= HCRX_EL2_SCTLR2En; + if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, TCRX, IMP)) + res0 |= HCRX_EL2_TCR2En; + if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP)) + res0 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2); + if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, CMOW, IMP)) + res0 |= HCRX_EL2_CMOW; + if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, NMI, IMP)) + res0 |= (HCRX_EL2_VFNMI | HCRX_EL2_VINMI | HCRX_EL2_TALLINT); + if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP) || + !(read_sysreg_s(SYS_SMIDR_EL1) & SMIDR_EL1_SMPS)) + res0 |= HCRX_EL2_SMPME; + if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP)) + res0 |= (HCRX_EL2_FGTnXS | HCRX_EL2_FnXS); + if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V)) + res0 |= HCRX_EL2_EnASR; + if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64)) + res0 |= HCRX_EL2_EnALS; + if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA)) + res0 |= HCRX_EL2_EnAS0; + set_sysreg_masks(kvm, HCRX_EL2, res0, res1); + /* HFG[RW]TR_EL2 */ res0 = res1 = 0; if (!(__vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_ADDRESS) &&