KVM: arm64: Save PSTATE early on exit
authorMarc Zyngier <maz@kernel.org>
Tue, 16 Nov 2021 10:20:06 +0000 (10:20 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 17 Dec 2021 09:30:14 +0000 (10:30 +0100)
[ Upstream commit 83bb2c1a01d7127d5adc7d69d7aaa3f7072de2b4 ]

In order to be able to use primitives such as vcpu_mode_is_32bit(),
we need to synchronize the guest PSTATE. However, this is currently
done deep into the bowels of the world-switch code, and we do have
helpers evaluating this much earlier (__vgic_v3_perform_cpuif_access
and handle_aarch32_guest, for example).

Move the saving of the guest pstate into the early fixups, which
cures the first issue. The second one will be addressed separately.

Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h

index a0e78a6027be0d1f5a7a1cd4f74fa1d38feeece5..c75e84489f57b96729f1dd82694711986c84adbc 100644 (file)
@@ -416,6 +416,12 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
  */
 static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
 {
+       /*
+        * Save PSTATE early so that we can evaluate the vcpu mode
+        * early on.
+        */
+       vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
+
        if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
                vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
 
index de7e14c862e6c9b5415df6a7daf815142a302a16..7ecca8b078519fd315c92cbc7cc059c0d2269bed 100644 (file)
@@ -70,7 +70,12 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
 static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
 {
        ctxt->regs.pc                   = read_sysreg_el2(SYS_ELR);
-       ctxt->regs.pstate               = read_sysreg_el2(SYS_SPSR);
+       /*
+        * Guest PSTATE gets saved at guest fixup time in all
+        * cases. We still need to handle the nVHE host side here.
+        */
+       if (!has_vhe() && ctxt->__hyp_running_vcpu)
+               ctxt->regs.pstate       = read_sysreg_el2(SYS_SPSR);
 
        if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
                ctxt_sys_reg(ctxt, DISR_EL1) = read_sysreg_s(SYS_VDISR_EL2);