void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu)
 {
+       kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_RCVD);
        local_flush_icache_all();
 }
 
                                                d.addr, d.size, d.order);
                        break;
                case KVM_RISCV_HFENCE_VVMA_ASID_GVA:
+                       kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
                        kvm_riscv_local_hfence_vvma_asid_gva(
                                                READ_ONCE(v->vmid), d.asid,
                                                d.addr, d.size, d.order);
                        break;
                case KVM_RISCV_HFENCE_VVMA_ASID_ALL:
+                       kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
                        kvm_riscv_local_hfence_vvma_asid_all(
                                                READ_ONCE(v->vmid), d.asid);
                        break;
                case KVM_RISCV_HFENCE_VVMA_GVA:
+                       kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD);
                        kvm_riscv_local_hfence_vvma_gva(
                                                READ_ONCE(v->vmid),
                                                d.addr, d.size, d.order);
 
 #include <linux/kvm_host.h>
 #include <asm/sbi.h>
 #include <asm/kvm_vcpu_timer.h>
+#include <asm/kvm_vcpu_pmu.h>
 #include <asm/kvm_vcpu_sbi.h>
 
 static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
                return 0;
        }
 
+       kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_SET_TIMER);
 #if __riscv_xlen == 32
        next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0;
 #else
                return 0;
        }
 
+       kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_IPI_SENT);
        kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
                if (hbase != -1UL) {
                        if (tmp->vcpu_id < hbase)
                ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT);
                if (ret < 0)
                        break;
+               kvm_riscv_vcpu_pmu_incr_fw(tmp, SBI_PMU_FW_IPI_RCVD);
        }
 
        return ret;
        switch (funcid) {
        case SBI_EXT_RFENCE_REMOTE_FENCE_I:
                kvm_riscv_fence_i(vcpu->kvm, hbase, hmask);
+               kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_FENCE_I_SENT);
                break;
        case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
                if (cp->a2 == 0 && cp->a3 == 0)
                else
                        kvm_riscv_hfence_vvma_gva(vcpu->kvm, hbase, hmask,
                                                  cp->a2, cp->a3, PAGE_SHIFT);
+               kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_SENT);
                break;
        case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
                if (cp->a2 == 0 && cp->a3 == 0)
                                                       hbase, hmask,
                                                       cp->a2, cp->a3,
                                                       PAGE_SHIFT, cp->a4);
+               kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_SENT);
                break;
        case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA:
        case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID: