KVM: arm64: Fix circular locking dependency
authorSebastian Ene <sebastianene@google.com>
Wed, 24 Jan 2024 09:10:28 +0000 (09:10 +0000)
committerMarc Zyngier <maz@kernel.org>
Tue, 30 Jan 2024 21:30:33 +0000 (21:30 +0000)
The rule inside kvm enforces that the vcpu->mutex is taken *inside*
kvm->lock. The rule is violated by the pkvm_create_hyp_vm() which acquires
the kvm->lock while already holding the vcpu->mutex lock from
kvm_vcpu_ioctl(). Avoid the circular locking dependency altogether by
protecting the hyp vm handle with the config_lock, much like we already
do for other forms of VM-scoped data.

Signed-off-by: Sebastian Ene <sebastianene@google.com>
Cc: stable@vger.kernel.org
Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20240124091027.1477174-2-sebastianene@google.com
arch/arm64/kvm/pkvm.c

index 8350fb8fee0b998ccf27dca4b7bf2e858846ccd3..b7be96a5359737d41576af46eee1f68852632846 100644 (file)
@@ -101,6 +101,17 @@ void __init kvm_hyp_reserve(void)
                 hyp_mem_base);
 }
 
+static void __pkvm_destroy_hyp_vm(struct kvm *host_kvm)
+{
+       if (host_kvm->arch.pkvm.handle) {
+               WARN_ON(kvm_call_hyp_nvhe(__pkvm_teardown_vm,
+                                         host_kvm->arch.pkvm.handle));
+       }
+
+       host_kvm->arch.pkvm.handle = 0;
+       free_hyp_memcache(&host_kvm->arch.pkvm.teardown_mc);
+}
+
 /*
  * Allocates and donates memory for hypervisor VM structs at EL2.
  *
@@ -181,7 +192,7 @@ static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
        return 0;
 
 destroy_vm:
-       pkvm_destroy_hyp_vm(host_kvm);
+       __pkvm_destroy_hyp_vm(host_kvm);
        return ret;
 free_vm:
        free_pages_exact(hyp_vm, hyp_vm_sz);
@@ -194,23 +205,19 @@ int pkvm_create_hyp_vm(struct kvm *host_kvm)
 {
        int ret = 0;
 
-       mutex_lock(&host_kvm->lock);
+       mutex_lock(&host_kvm->arch.config_lock);
        if (!host_kvm->arch.pkvm.handle)
                ret = __pkvm_create_hyp_vm(host_kvm);
-       mutex_unlock(&host_kvm->lock);
+       mutex_unlock(&host_kvm->arch.config_lock);
 
        return ret;
 }
 
 void pkvm_destroy_hyp_vm(struct kvm *host_kvm)
 {
-       if (host_kvm->arch.pkvm.handle) {
-               WARN_ON(kvm_call_hyp_nvhe(__pkvm_teardown_vm,
-                                         host_kvm->arch.pkvm.handle));
-       }
-
-       host_kvm->arch.pkvm.handle = 0;
-       free_hyp_memcache(&host_kvm->arch.pkvm.teardown_mc);
+       mutex_lock(&host_kvm->arch.config_lock);
+       __pkvm_destroy_hyp_vm(host_kvm);
+       mutex_unlock(&host_kvm->arch.config_lock);
 }
 
 int pkvm_init_host_vm(struct kvm *host_kvm)