* We do not need to take the kvm->lock here, because nobody else
         * has a reference to the struct kvm at this point and therefore
         * cannot access the devices list anyhow.
+        *
+        * The device list is generally managed as an rculist, but list_del()
+        * is used intentionally here. If a bug in KVM introduced a reader that
+        * was not backed by a reference on the kvm struct, the hope is that
+        * it'd consume the poisoned forward pointer instead of suffering a
+        * use-after-free, even though this cannot be guaranteed.
         */
        list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
                list_del(&dev->vm_node);
 
        if (dev->ops->release) {
                mutex_lock(&kvm->lock);
-               list_del(&dev->vm_node);
+               list_del_rcu(&dev->vm_node);
+               synchronize_rcu();
                dev->ops->release(dev);
                mutex_unlock(&kvm->lock);
        }
                kfree(dev);
                return ret;
        }
-       list_add(&dev->vm_node, &kvm->devices);
+       list_add_rcu(&dev->vm_node, &kvm->devices);
        mutex_unlock(&kvm->lock);
 
        if (ops->init)
        if (ret < 0) {
                kvm_put_kvm_no_destroy(kvm);
                mutex_lock(&kvm->lock);
-               list_del(&dev->vm_node);
+               list_del_rcu(&dev->vm_node);
+               synchronize_rcu();
                if (ops->release)
                        ops->release(dev);
                mutex_unlock(&kvm->lock);
 
        struct kvm_device *tmp;
        struct kvm_vfio *kv;
 
+       lockdep_assert_held(&dev->kvm->lock);
+
        /* Only one VFIO "device" per VM */
        list_for_each_entry(tmp, &dev->kvm->devices, vm_node)
                if (tmp->ops == &kvm_vfio_ops)