kvm_sys_regs_create_debugfs(kvm);
 }
 
+static void kvm_destroy_mpidr_data(struct kvm *kvm)
+{
+       struct kvm_mpidr_data *data;
+
+       mutex_lock(&kvm->arch.config_lock);
+
+       data = rcu_dereference_protected(kvm->arch.mpidr_data,
+                                        lockdep_is_held(&kvm->arch.config_lock));
+       if (data) {
+               rcu_assign_pointer(kvm->arch.mpidr_data, NULL);
+               synchronize_rcu();
+               kfree(data);
+       }
+
+       mutex_unlock(&kvm->arch.config_lock);
+}
+
 /**
  * kvm_arch_destroy_vm - destroy the VM data structure
  * @kvm:       pointer to the KVM struct
        if (is_protected_kvm_enabled())
                pkvm_destroy_hyp_vm(kvm);
 
-       kfree(kvm->arch.mpidr_data);
+       kvm_destroy_mpidr_data(kvm);
+
        kfree(kvm->arch.sysreg_masks);
        kvm_destroy_vcpus(kvm);
 
 
        vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
 
+       /*
+        * This vCPU may have been created after mpidr_data was initialized.
+        * Throw out the pre-computed mappings if that is the case which forces
+        * KVM to fall back to iteratively searching the vCPUs.
+        */
+       kvm_destroy_mpidr_data(vcpu->kvm);
+
        err = kvm_vgic_vcpu_init(vcpu);
        if (err)
                return err;
 
        mutex_lock(&kvm->arch.config_lock);
 
-       if (kvm->arch.mpidr_data || atomic_read(&kvm->online_vcpus) == 1)
+       if (rcu_access_pointer(kvm->arch.mpidr_data) ||
+           atomic_read(&kvm->online_vcpus) == 1)
                goto out;
 
        kvm_for_each_vcpu(c, vcpu, kvm) {
                data->cmpidr_to_idx[index] = c;
        }
 
-       kvm->arch.mpidr_data = data;
+       rcu_assign_pointer(kvm->arch.mpidr_data, data);
 out:
        mutex_unlock(&kvm->arch.config_lock);
 }
 
 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
 {
-       struct kvm_vcpu *vcpu;
+       struct kvm_vcpu *vcpu = NULL;
+       struct kvm_mpidr_data *data;
        unsigned long i;
 
        mpidr &= MPIDR_HWID_BITMASK;
 
-       if (kvm->arch.mpidr_data) {
-               u16 idx = kvm_mpidr_index(kvm->arch.mpidr_data, mpidr);
+       rcu_read_lock();
+       data = rcu_dereference(kvm->arch.mpidr_data);
 
-               vcpu = kvm_get_vcpu(kvm,
-                                   kvm->arch.mpidr_data->cmpidr_to_idx[idx]);
+       if (data) {
+               u16 idx = kvm_mpidr_index(data, mpidr);
+
+               vcpu = kvm_get_vcpu(kvm, data->cmpidr_to_idx[idx]);
                if (mpidr != kvm_vcpu_get_mpidr_aff(vcpu))
                        vcpu = NULL;
+       }
 
+       rcu_read_unlock();
+
+       if (vcpu)
                return vcpu;
-       }
 
        kvm_for_each_vcpu(i, vcpu, kvm) {
                if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))