KVM: arm64: Restore the stage-2 context in VHE's __tlb_switch_to_host()
authorMarc Zyngier <maz@kernel.org>
Wed, 18 Oct 2023 23:32:09 +0000 (23:32 +0000)
committerOliver Upton <oliver.upton@linux.dev>
Fri, 20 Oct 2023 17:52:01 +0000 (17:52 +0000)
An MMU notifier could cause us to clobber the stage-2 context loaded on
a CPU when we switch to another VM's context to invalidate. This isn't
an issue right now as the stage-2 context gets reloaded on every guest
entry, but is disastrous when moving __load_stage2() into the
vcpu_load() path.

Restore the previous stage-2 context on the way out of a TLB
invalidation if we installed something else. Deliberately do this after
TGE=1 is synchronized to keep things safe in light of the speculative AT
errata.

Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20231018233212.2888027-3-oliver.upton@linux.dev
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/kvm/hyp/vhe/tlb.c

index f3f2e142e4f44581c8f2f3f6d164b36bb2e5e045..b636b4111dbf504e18b68ef6bacd2c5f3418ef40 100644 (file)
 #include <asm/tlbflush.h>
 
 struct tlb_inv_context {
-       unsigned long   flags;
-       u64             tcr;
-       u64             sctlr;
+       struct kvm_s2_mmu       *mmu;
+       unsigned long           flags;
+       u64                     tcr;
+       u64                     sctlr;
 };
 
 static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
                                  struct tlb_inv_context *cxt)
 {
+       struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
        u64 val;
 
        local_irq_save(cxt->flags);
 
+       if (vcpu && mmu != vcpu->arch.hw_mmu)
+               cxt->mmu = vcpu->arch.hw_mmu;
+       else
+               cxt->mmu = NULL;
+
        if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
                /*
                 * For CPUs that are affected by ARM errata 1165522 or 1530923,
@@ -69,6 +76,10 @@ static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
        write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
        isb();
 
+       /* ... and the stage-2 MMU context that we switched away from */
+       if (cxt->mmu)
+               __load_stage2(cxt->mmu, cxt->mmu->arch);
+
        if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
                /* Restore the registers to what they were */
                write_sysreg_el1(cxt->tcr, SYS_TCR);