kvm_lapic_set_irr(vector, apic);
                        kvm_make_request(KVM_REQ_EVENT, vcpu);
                        kvm_vcpu_kick(vcpu);
+               } else {
+                       trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode,
+                                                  trig_mode, vector);
                }
                break;
 
 
                  __entry->bit)
 );
 
+TRACE_EVENT(kvm_apicv_accept_irq,
+           TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec),
+           TP_ARGS(apicid, dm, tm, vec),
+
+       TP_STRUCT__entry(
+               __field(        __u32,          apicid          )
+               __field(        __u16,          dm              )
+               __field(        __u16,          tm              )
+               __field(        __u8,           vec             )
+       ),
+
+       TP_fast_assign(
+               __entry->apicid         = apicid;
+               __entry->dm             = dm;
+               __entry->tm             = tm;
+               __entry->vec            = vec;
+       ),
+
+       TP_printk("apicid %x vec %u (%s|%s)",
+                 __entry->apicid, __entry->vec,
+                 __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode),
+                 __entry->tm ? "level" : "edge")
+);
+
 /*
  * Tracepoint for AMD AVIC
  */
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_update_request);
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_accept_irq);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_enter);