KVM: arm64: nv: Emulate PSTATE.M for a guest hypervisor
authorMarc Zyngier <maz@kernel.org>
Thu, 9 Feb 2023 17:58:16 +0000 (17:58 +0000)
committerOliver Upton <oliver.upton@linux.dev>
Sat, 11 Feb 2023 10:13:29 +0000 (10:13 +0000)
We can no longer blindly copy the VCPU's PSTATE into SPSR_EL2 and return
to the guest and vice versa when taking an exception to the hypervisor,
because we emulate virtual EL2 in EL1 and therefore have to translate
the mode field from EL2 to EL1 and vice versa.

This requires keeping track of the state we enter the guest, for which
we transiently use a dedicated flag.

Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
Reviewed-by: Alexandru Elisei <alexandru.elisei@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20230209175820.1939006-15-maz@kernel.org
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
arch/arm64/kvm/hyp/vhe/switch.c

index 1b585a4dd1227079220acf826a8201b148cda32b..70eab7a6386bd9d9c8242cf7f25f9579fb8828c8 100644 (file)
@@ -645,6 +645,8 @@ struct kvm_vcpu_arch {
 #define DEBUG_STATE_SAVE_SPE   __vcpu_single_flag(iflags, BIT(5))
 /* Save TRBE context if active  */
 #define DEBUG_STATE_SAVE_TRBE  __vcpu_single_flag(iflags, BIT(6))
+/* vcpu running in HYP context */
+#define VCPU_HYP_CONTEXT       __vcpu_single_flag(iflags, BIT(7))
 
 /* SVE enabled for host EL0 */
 #define HOST_SVE_ENABLED       __vcpu_single_flag(sflags, BIT(0))
index baa5b9b3dde58ac46bfdf56dde0de4db38996460..0fbbf2870b7b58c14f62aca7c1148615fe821b75 100644 (file)
@@ -156,9 +156,26 @@ static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
        write_sysreg_el1(ctxt_sys_reg(ctxt, SPSR_EL1),  SYS_SPSR);
 }
 
+/* Read the VCPU state's PSTATE, but translate (v)EL2 to EL1. */
+static inline u64 to_hw_pstate(const struct kvm_cpu_context *ctxt)
+{
+       u64 mode = ctxt->regs.pstate & (PSR_MODE_MASK | PSR_MODE32_BIT);
+
+       switch (mode) {
+       case PSR_MODE_EL2t:
+               mode = PSR_MODE_EL1t;
+               break;
+       case PSR_MODE_EL2h:
+               mode = PSR_MODE_EL1h;
+               break;
+       }
+
+       return (ctxt->regs.pstate & ~(PSR_MODE_MASK | PSR_MODE32_BIT)) | mode;
+}
+
 static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
 {
-       u64 pstate = ctxt->regs.pstate;
+       u64 pstate = to_hw_pstate(ctxt);
        u64 mode = pstate & PSR_AA32_MODE_MASK;
 
        /*
index 1a97391fedd29335647796ce491f5bda03a3f189..76ea9392363d4c8352c56574c821228fb253b34c 100644 (file)
@@ -120,6 +120,25 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
 
 static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
 {
+       /*
+        * If we were in HYP context on entry, adjust the PSTATE view
+        * so that the usual helpers work correctly.
+        */
+       if (unlikely(vcpu_get_flag(vcpu, VCPU_HYP_CONTEXT))) {
+               u64 mode = *vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT);
+
+               switch (mode) {
+               case PSR_MODE_EL1t:
+                       mode = PSR_MODE_EL2t;
+                       break;
+               case PSR_MODE_EL1h:
+                       mode = PSR_MODE_EL2h;
+                       break;
+               }
+
+               *vcpu_cpsr(vcpu) &= ~(PSR_MODE_MASK | PSR_MODE32_BIT);
+               *vcpu_cpsr(vcpu) |= mode;
+       }
 }
 
 /* Switch to the guest for VHE systems running in EL2 */
@@ -154,6 +173,11 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
        sysreg_restore_guest_state_vhe(guest_ctxt);
        __debug_switch_to_guest(vcpu);
 
+       if (is_hyp_ctxt(vcpu))
+               vcpu_set_flag(vcpu, VCPU_HYP_CONTEXT);
+       else
+               vcpu_clear_flag(vcpu, VCPU_HYP_CONTEXT);
+
        do {
                /* Jump in the fire! */
                exit_code = __guest_enter(vcpu);