#define KVM_REQ_SLEEP \
        KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_IRQ_PENDING    KVM_ARCH_REQ(1)
 
 u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
 int __attribute_const__ kvm_target_cpu(void);
 
 
 #define KVM_REQ_SLEEP \
        KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_IRQ_PENDING    KVM_ARCH_REQ(1)
 
 int __attribute_const__ kvm_target_cpu(void);
 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
 
        if (kvm_request_pending(vcpu)) {
                if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
                        vcpu_req_sleep(vcpu);
+
+               /*
+                * Clear IRQ_PENDING requests that were made to guarantee
+                * that a VCPU sees new virtual interrupts.
+                */
+               kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu);
        }
 }
 
         * trigger a world-switch round on the running physical CPU to set the
         * virtual IRQ/FIQ fields in the HCR appropriately.
         */
+       kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
        kvm_vcpu_kick(vcpu);
 
        return 0;
 
                 * won't see this one until it exits for some other
                 * reason.
                 */
-               if (vcpu)
+               if (vcpu) {
+                       kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
                        kvm_vcpu_kick(vcpu);
+               }
                return false;
        }
 
        spin_unlock(&irq->irq_lock);
        spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
 
+       kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
        kvm_vcpu_kick(vcpu);
 
        return true;
         * a good kick...
         */
        kvm_for_each_vcpu(c, vcpu, kvm) {
-               if (kvm_vgic_vcpu_pending_irq(vcpu))
+               if (kvm_vgic_vcpu_pending_irq(vcpu)) {
+                       kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
                        kvm_vcpu_kick(vcpu);
+               }
        }
 }