KVM: arm64: Reload stage-2 for VMID change on VHE
authorMarc Zyngier <maz@kernel.org>
Wed, 18 Oct 2023 23:32:10 +0000 (23:32 +0000)
committerOliver Upton <oliver.upton@linux.dev>
Fri, 20 Oct 2023 17:52:01 +0000 (17:52 +0000)
Naturally, a change to the VMID for an MMU implies a new value for
VTTBR. Reload on VMID change in anticipation of loading stage-2 on
vcpu_load() instead of every guest entry.

Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20231018233212.2888027-4-oliver.upton@linux.dev
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/arm.c
arch/arm64/kvm/vmid.c

index af06ccb7ee343304535c30b8855d6de8d8b1f4fc..be0ab101c5577157f390a7e21b3f5ecbda73d61c 100644 (file)
@@ -1025,7 +1025,7 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
 extern unsigned int __ro_after_init kvm_arm_vmid_bits;
 int __init kvm_arm_vmid_alloc_init(void);
 void __init kvm_arm_vmid_alloc_free(void);
-void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
+bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
 void kvm_arm_vmid_clear_active(void);
 
 static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
index 4866b3f7b4ea3847d885e00cfac47a4d7abf9da3..3ab904adbd6489ca45b38aa044d3abc980b29f62 100644 (file)
@@ -950,7 +950,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                 * making a thread's VMID inactive. So we need to call
                 * kvm_arm_vmid_update() in non-premptible context.
                 */
-               kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid);
+               if (kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid) &&
+                   has_vhe())
+                       __load_stage2(vcpu->arch.hw_mmu,
+                                     vcpu->arch.hw_mmu->arch);
 
                kvm_pmu_flush_hwstate(vcpu);
 
index 7fe8ba1a2851c5b71acbf17075987b96436f1a4a..806223b7022afdc69e3b8fb599b3a9304c9fbfba 100644 (file)
@@ -135,10 +135,11 @@ void kvm_arm_vmid_clear_active(void)
        atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID);
 }
 
-void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
+bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
 {
        unsigned long flags;
        u64 vmid, old_active_vmid;
+       bool updated = false;
 
        vmid = atomic64_read(&kvm_vmid->id);
 
@@ -156,17 +157,21 @@ void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
        if (old_active_vmid != 0 && vmid_gen_match(vmid) &&
            0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids),
                                          old_active_vmid, vmid))
-               return;
+               return false;
 
        raw_spin_lock_irqsave(&cpu_vmid_lock, flags);
 
        /* Check that our VMID belongs to the current generation. */
        vmid = atomic64_read(&kvm_vmid->id);
-       if (!vmid_gen_match(vmid))
+       if (!vmid_gen_match(vmid)) {
                vmid = new_vmid(kvm_vmid);
+               updated = true;
+       }
 
        atomic64_set(this_cpu_ptr(&active_vmids), vmid);
        raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags);
+
+       return updated;
 }
 
 /*