KVM_X86_OP_NULL(slot_disable_log_dirty)
 KVM_X86_OP_NULL(flush_log_dirty)
 KVM_X86_OP_NULL(enable_log_dirty_pt_masked)
-KVM_X86_OP_NULL(cpu_dirty_log_size)
 KVM_X86_OP_NULL(pre_block)
 KVM_X86_OP_NULL(post_block)
 KVM_X86_OP_NULL(vcpu_blocking)
 
        void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
                                           struct kvm_memory_slot *slot,
                                           gfn_t offset, unsigned long mask);
-       int (*cpu_dirty_log_size)(void);
+       int cpu_dirty_log_size;
 
        /* pmu operations of sub-arch */
        const struct kvm_pmu_ops *pmu_ops;
 
 
 int kvm_cpu_dirty_log_size(void)
 {
-       if (kvm_x86_ops.cpu_dirty_log_size)
-               return static_call(kvm_x86_cpu_dirty_log_size)();
-
-       return 0;
+       return kvm_x86_ops.cpu_dirty_log_size;
 }
 
 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
 
        return supported & BIT(bit);
 }
 
-static int vmx_cpu_dirty_log_size(void)
-{
-       return enable_pml ? PML_ENTITY_NUM : 0;
-}
-
 static struct kvm_x86_ops vmx_x86_ops __initdata = {
        .hardware_unsetup = hardware_unsetup,
 
        .slot_disable_log_dirty = vmx_slot_disable_log_dirty,
        .flush_log_dirty = vmx_flush_log_dirty,
        .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
+       .cpu_dirty_log_size = PML_ENTITY_NUM,
 
        .pre_block = vmx_pre_block,
        .post_block = vmx_post_block,
 
        .msr_filter_changed = vmx_msr_filter_changed,
        .complete_emulated_msr = kvm_complete_insn_gp,
-       .cpu_dirty_log_size = vmx_cpu_dirty_log_size,
 
        .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
 };
                vmx_x86_ops.slot_disable_log_dirty = NULL;
                vmx_x86_ops.flush_log_dirty = NULL;
                vmx_x86_ops.enable_log_dirty_pt_masked = NULL;
-               vmx_x86_ops.cpu_dirty_log_size = NULL;
+               vmx_x86_ops.cpu_dirty_log_size = 0;
        }
 
        if (!cpu_has_vmx_preemption_timer())