if (!avic_kick_target_vcpus_fast(kvm, source, icrl, icrh, index))
                return;
 
+       trace_kvm_avic_kick_vcpu_slowpath(icrh, icrl, index);
+
        /*
         * Wake any target vCPUs that are blocking, i.e. waiting for a wake
         * event.  There's no need to signal doorbells, as hardware has handled
 
                  __entry->vmid, __entry->vcpuid)
 );
 
+TRACE_EVENT(kvm_avic_kick_vcpu_slowpath,
+           TP_PROTO(u32 icrh, u32 icrl, u32 index),
+           TP_ARGS(icrh, icrl, index),
+
+       TP_STRUCT__entry(
+               __field(u32, icrh)
+               __field(u32, icrl)
+               __field(u32, index)
+       ),
+
+       TP_fast_assign(
+               __entry->icrh = icrh;
+               __entry->icrl = icrl;
+               __entry->index = index;
+       ),
+
+       TP_printk("icrh:icrl=%#08x:%08x, index=%u",
+                 __entry->icrh, __entry->icrl, __entry->index)
+);
+
 TRACE_EVENT(kvm_hv_timer_state,
                TP_PROTO(unsigned int vcpu_id, unsigned int hv_timer_in_use),
                TP_ARGS(vcpu_id, hv_timer_in_use),
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log);
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_kick_vcpu_slowpath);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_accept_irq);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit);