struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
 void kvm_arm_halt_guest(struct kvm *kvm);
 void kvm_arm_resume_guest(struct kvm *kvm);
-void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu);
-void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu);
 
 int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
 unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
 
 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
 void kvm_arm_halt_guest(struct kvm *kvm);
 void kvm_arm_resume_guest(struct kvm *kvm);
-void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu);
-void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu);
 
 u64 __kvm_call_hyp(void *hypfn, ...);
 #define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__)
 
        kvm_make_all_cpus_request(kvm, KVM_REQ_VCPU_EXIT);
 }
 
-void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu)
-{
-       vcpu->arch.pause = true;
-       kvm_vcpu_kick(vcpu);
-}
-
-void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu)
-{
-       struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
-
-       vcpu->arch.pause = false;
-       swake_up(wq);
-}
-
 void kvm_arm_resume_guest(struct kvm *kvm)
 {
        int i;
        struct kvm_vcpu *vcpu;
 
-       kvm_for_each_vcpu(i, vcpu, kvm)
-               kvm_arm_resume_vcpu(vcpu);
+       kvm_for_each_vcpu(i, vcpu, kvm) {
+               vcpu->arch.pause = false;
+               swake_up(kvm_arch_vcpu_wq(vcpu));
+       }
 }
 
 static void vcpu_sleep(struct kvm_vcpu *vcpu)
 
  * be migrated while we don't hold the IRQ locks and we don't want to be
  * chasing moving targets.
  *
- * For private interrupts, we only have to make sure the single and only VCPU
- * that can potentially queue the IRQ is stopped.
+ * For private interrupts we don't have to do anything because userspace
+ * accesses to the VGIC state already require all VCPUs to be stopped, and
+ * only the VCPU itself can modify its private interrupts active state, which
+ * guarantees that the VCPU is not running.
  */
 static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
 {
-       if (intid < VGIC_NR_PRIVATE_IRQS)
-               kvm_arm_halt_vcpu(vcpu);
-       else
+       if (intid > VGIC_NR_PRIVATE_IRQS)
                kvm_arm_halt_guest(vcpu->kvm);
 }
 
 /* See vgic_change_active_prepare */
 static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
 {
-       if (intid < VGIC_NR_PRIVATE_IRQS)
-               kvm_arm_resume_vcpu(vcpu);
-       else
+       if (intid > VGIC_NR_PRIVATE_IRQS)
                kvm_arm_resume_guest(vcpu->kvm);
 }
 
 {
        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 
+       mutex_lock(&vcpu->kvm->lock);
        vgic_change_active_prepare(vcpu, intid);
 
        __vgic_mmio_write_cactive(vcpu, addr, len, val);
 
        vgic_change_active_finish(vcpu, intid);
+       mutex_unlock(&vcpu->kvm->lock);
 }
 
 void vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
 {
        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 
+       mutex_lock(&vcpu->kvm->lock);
        vgic_change_active_prepare(vcpu, intid);
 
        __vgic_mmio_write_sactive(vcpu, addr, len, val);
 
        vgic_change_active_finish(vcpu, intid);
+       mutex_unlock(&vcpu->kvm->lock);
 }
 
 void vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
 
 
 /*
  * Locking order is always:
- * its->cmd_lock (mutex)
- *   its->its_lock (mutex)
- *     vgic_cpu->ap_list_lock
- *       kvm->lpi_list_lock
- *         vgic_irq->irq_lock
+ * kvm->lock (mutex)
+ *   its->cmd_lock (mutex)
+ *     its->its_lock (mutex)
+ *       vgic_cpu->ap_list_lock
+ *         kvm->lpi_list_lock
+ *           vgic_irq->irq_lock
  *
  * If you need to take multiple locks, always take the upper lock first,
  * then the lower ones, e.g. first take the its_lock, then the irq_lock.