KVM: arm64: Make active_vmids invalid on vCPU schedule out
authorShameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Mon, 22 Nov 2021 12:18:44 +0000 (12:18 +0000)
committerMarc Zyngier <maz@kernel.org>
Tue, 8 Feb 2022 14:57:04 +0000 (14:57 +0000)
Like ASID allocator, we copy the active_vmids into the
reserved_vmids on a rollover. But it's unlikely that
every CPU will have a vCPU as current task and we may
end up unnecessarily reserving the VMID space.

Hence, set active_vmids to an invalid one when scheduling
out a vCPU.

Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20211122121844.867-5-shameerali.kolothum.thodi@huawei.com
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/arm.c
arch/arm64/kvm/vmid.c

index 47aeed22e2adef5069860472ce6791c9c07333af..305ea13c4fd0a273bf6adf5eb6f6ea4646297f12 100644 (file)
@@ -694,6 +694,7 @@ extern unsigned int kvm_arm_vmid_bits;
 int kvm_arm_vmid_alloc_init(void);
 void kvm_arm_vmid_alloc_free(void);
 void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
+void kvm_arm_vmid_clear_active(void);
 
 static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
 {
index be2fd84d526b233afa1bdacfb5b344ac6a8fdedc..418014998f18e3ba5dc822af99b9fdad6adea6a1 100644 (file)
@@ -417,6 +417,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
        kvm_timer_vcpu_put(vcpu);
        kvm_vgic_put(vcpu);
        kvm_vcpu_pmu_restore_host(vcpu);
+       kvm_arm_vmid_clear_active();
 
        vcpu->cpu = -1;
 }
index 9aff692b6b7d3d17104261c3b21e194afb78c45c..8d5f0506fd87f392de4792431c1fdf7c0b2c2745 100644 (file)
@@ -32,6 +32,13 @@ static DEFINE_PER_CPU(u64, reserved_vmids);
 #define vmid2idx(vmid)         ((vmid) & ~VMID_MASK)
 #define idx2vmid(idx)          vmid2idx(idx)
 
+/*
+ * As vmid #0 is always reserved, we will never allocate one
+ * as below and can be treated as invalid. This is used to
+ * set the active_vmids on vCPU schedule out.
+ */
+#define VMID_ACTIVE_INVALID            VMID_FIRST_VERSION
+
 #define vmid_gen_match(vmid) \
        (!(((vmid) ^ atomic64_read(&vmid_generation)) >> kvm_arm_vmid_bits))
 
@@ -122,6 +129,12 @@ set_vmid:
        return vmid;
 }
 
+/* Called from vCPU sched out with preemption disabled */
+void kvm_arm_vmid_clear_active(void)
+{
+       atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID);
+}
+
 void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
 {
        unsigned long flags;
@@ -132,11 +145,17 @@ void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
        /*
         * Please refer comments in check_and_switch_context() in
         * arch/arm64/mm/context.c.
+        *
+        * Unlike ASID allocator, we set the active_vmids to
+        * VMID_ACTIVE_INVALID on vCPU schedule out to avoid
+        * reserving the VMID space needlessly on rollover.
+        * Hence explicitly check here for a "!= 0" to
+        * handle the sync with a concurrent rollover.
         */
        old_active_vmid = atomic64_read(this_cpu_ptr(&active_vmids));
-       if (old_active_vmid && vmid_gen_match(vmid) &&
-           atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids),
-                                    old_active_vmid, vmid))
+       if (old_active_vmid != 0 && vmid_gen_match(vmid) &&
+           0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids),
+                                         old_active_vmid, vmid))
                return;
 
        raw_spin_lock_irqsave(&cpu_vmid_lock, flags);