return 0;
 }
 
+void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_vmx *vmx;
+
+       if (!nested_vmx_allowed(vcpu))
+               return;
+
+       vmx = to_vmx(vcpu);
+       if (kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL)) {
+               vmx->nested.msrs.entry_ctls_high |=
+                               VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+               vmx->nested.msrs.exit_ctls_high |=
+                               VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
+       } else {
+               vmx->nested.msrs.entry_ctls_high &=
+                               ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+               vmx->nested.msrs.exit_ctls_high &=
+                               ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+       }
+}
+
 static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
 {
        gva_t gva;
 
 int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata);
 int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
                        u32 vmx_instruction_info, bool wr, int len, gva_t *ret);
+void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu);
 
 static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
 {
 
 #include "x86.h"
 #include "cpuid.h"
 #include "lapic.h"
+#include "nested.h"
 #include "pmu.h"
 
 static struct kvm_event_hw_type_mapping intel_arch_events[] = {
                0, pmu->nr_arch_gp_counters);
        bitmap_set(pmu->all_valid_pmc_idx,
                INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
+
+       nested_vmx_pmu_entry_exit_ctls_update(vcpu);
 }
 
 static void intel_pmu_init(struct kvm_vcpu *vcpu)