KVM_X86_OP(handle_exit_irqoff)
 KVM_X86_OP_NULL(request_immediate_exit)
 KVM_X86_OP(sched_in)
-KVM_X86_OP_NULL(slot_enable_log_dirty)
-KVM_X86_OP_NULL(slot_disable_log_dirty)
-KVM_X86_OP_NULL(flush_log_dirty)
-KVM_X86_OP_NULL(enable_log_dirty_pt_masked)
 KVM_X86_OP_NULL(pre_block)
 KVM_X86_OP_NULL(post_block)
 KVM_X86_OP_NULL(vcpu_blocking)
 
        void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
 
        /*
-        * Arch-specific dirty logging hooks. These hooks are only supposed to
-        * be valid if the specific arch has hardware-accelerated dirty logging
-        * mechanism. Currently only for PML on VMX.
-        *
-        *  - slot_enable_log_dirty:
-        *      called when enabling log dirty mode for the slot.
-        *  - slot_disable_log_dirty:
-        *      called when disabling log dirty mode for the slot.
-        *      also called when slot is created with log dirty disabled.
-        *  - flush_log_dirty:
-        *      called before reporting dirty_bitmap to userspace.
-        *  - enable_log_dirty_pt_masked:
-        *      called when reenabling log dirty for the GFNs in the mask after
-        *      corresponding bits are cleared in slot->dirty_bitmap.
+        * Size of the CPU's dirty log buffer, i.e. VMX's PML buffer.  A zero
+        * value indicates CPU dirty logging is unsupported or disabled.
         */
-       void (*slot_enable_log_dirty)(struct kvm *kvm,
-                                     struct kvm_memory_slot *slot);
-       void (*slot_disable_log_dirty)(struct kvm *kvm,
-                                      struct kvm_memory_slot *slot);
-       void (*flush_log_dirty)(struct kvm *kvm);
-       void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
-                                          struct kvm_memory_slot *slot,
-                                          gfn_t offset, unsigned long mask);
        int cpu_dirty_log_size;
 
        /* pmu operations of sub-arch */
                                        struct kvm_memory_slot *memslot);
 void kvm_mmu_slot_set_dirty(struct kvm *kvm,
                            struct kvm_memory_slot *memslot);
-void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
-                                  struct kvm_memory_slot *slot,
-                                  gfn_t gfn_offset, unsigned long mask);
 void kvm_mmu_zap_all(struct kvm *kvm);
 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
 unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
 
  *
  * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
  */
-void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
-                                    struct kvm_memory_slot *slot,
-                                    gfn_t gfn_offset, unsigned long mask)
+static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
+                                        struct kvm_memory_slot *slot,
+                                        gfn_t gfn_offset, unsigned long mask)
 {
        struct kvm_rmap_head *rmap_head;
 
                mask &= mask - 1;
        }
 }
-EXPORT_SYMBOL_GPL(kvm_mmu_clear_dirty_pt_masked);
 
 /**
  * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
                                struct kvm_memory_slot *slot,
                                gfn_t gfn_offset, unsigned long mask)
 {
-       if (kvm_x86_ops.enable_log_dirty_pt_masked)
-               static_call(kvm_x86_enable_log_dirty_pt_masked)(kvm, slot,
-                                                               gfn_offset,
-                                                               mask);
+       if (kvm_x86_ops.cpu_dirty_log_size)
+               kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
        else
                kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
 }
        if (flush)
                kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
 }
-EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty);
 
 void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
                                        struct kvm_memory_slot *memslot)
        if (flush)
                kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
 }
-EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);
 
 void kvm_mmu_slot_set_dirty(struct kvm *kvm,
                            struct kvm_memory_slot *memslot)
        if (flush)
                kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
 }
-EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
 
 void kvm_mmu_zap_all(struct kvm *kvm)
 {
 
        vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
 }
 
-/*
- * Flush all vcpus' PML buffer and update logged GPAs to dirty_bitmap.
- * Called before reporting dirty_bitmap to userspace.
- */
-static void kvm_flush_pml_buffers(struct kvm *kvm)
-{
-       int i;
-       struct kvm_vcpu *vcpu;
-       /*
-        * We only need to kick vcpu out of guest mode here, as PML buffer
-        * is flushed at beginning of all VMEXITs, and it's obvious that only
-        * vcpus running in guest are possible to have unflushed GPAs in PML
-        * buffer.
-        */
-       kvm_for_each_vcpu(i, vcpu, kvm)
-               kvm_vcpu_kick(vcpu);
-}
-
 static void vmx_dump_sel(char *name, uint32_t sel)
 {
        pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n",
                shrink_ple_window(vcpu);
 }
 
-static void vmx_slot_enable_log_dirty(struct kvm *kvm,
-                                    struct kvm_memory_slot *slot)
-{
-       if (!kvm_dirty_log_manual_protect_and_init_set(kvm))
-               kvm_mmu_slot_leaf_clear_dirty(kvm, slot);
-       kvm_mmu_slot_largepage_remove_write_access(kvm, slot);
-}
-
-static void vmx_slot_disable_log_dirty(struct kvm *kvm,
-                                      struct kvm_memory_slot *slot)
-{
-       kvm_mmu_slot_set_dirty(kvm, slot);
-}
-
-static void vmx_flush_log_dirty(struct kvm *kvm)
-{
-       kvm_flush_pml_buffers(kvm);
-}
-
-static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
-                                          struct kvm_memory_slot *memslot,
-                                          gfn_t offset, unsigned long mask)
-{
-       kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask);
-}
-
 static int vmx_pre_block(struct kvm_vcpu *vcpu)
 {
        if (pi_pre_block(vcpu))
 
        .sched_in = vmx_sched_in,
 
-       .slot_enable_log_dirty = vmx_slot_enable_log_dirty,
-       .slot_disable_log_dirty = vmx_slot_disable_log_dirty,
-       .flush_log_dirty = vmx_flush_log_dirty,
-       .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
        .cpu_dirty_log_size = PML_ENTITY_NUM,
 
        .pre_block = vmx_pre_block,
        if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml())
                enable_pml = 0;
 
-       if (!enable_pml) {
-               vmx_x86_ops.slot_enable_log_dirty = NULL;
-               vmx_x86_ops.slot_disable_log_dirty = NULL;
-               vmx_x86_ops.flush_log_dirty = NULL;
-               vmx_x86_ops.enable_log_dirty_pt_masked = NULL;
+       if (!enable_pml)
                vmx_x86_ops.cpu_dirty_log_size = 0;
-       }
 
        if (!cpu_has_vmx_preemption_timer())
                enable_preemption_timer = false;
 
 
 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
 {
+
        /*
-        * Flush potentially hardware-cached dirty pages to dirty_bitmap.
+        * Flush all CPUs' dirty log buffers to the  dirty_bitmap.  Called
+        * before reporting dirty_bitmap to userspace.  KVM flushes the buffers
+        * on all VM-Exits, thus we only need to kick running vCPUs to force a
+        * VM-Exit.
         */
-       static_call_cond(kvm_x86_flush_log_dirty)(kvm);
+       struct kvm_vcpu *vcpu;
+       int i;
+
+       kvm_for_each_vcpu(i, vcpu, kvm)
+               kvm_vcpu_kick(vcpu);
 }
 
 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
         * is enabled the D-bit or the W-bit will be cleared.
         */
        if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
-               if (kvm_x86_ops.slot_enable_log_dirty) {
-                       static_call(kvm_x86_slot_enable_log_dirty)(kvm, new);
+               if (kvm_x86_ops.cpu_dirty_log_size) {
+                       if (!kvm_dirty_log_manual_protect_and_init_set(kvm))
+                               kvm_mmu_slot_leaf_clear_dirty(kvm, new);
+                       kvm_mmu_slot_largepage_remove_write_access(kvm, new);
                } else {
                        int level =
                                kvm_dirty_log_manual_protect_and_init_set(kvm) ?
                         */
                        kvm_mmu_slot_remove_write_access(kvm, new, level);
                }
-       } else {
-               static_call_cond(kvm_x86_slot_disable_log_dirty)(kvm, new);
+       } else if (kvm_x86_ops.cpu_dirty_log_size) {
+               kvm_mmu_slot_set_dirty(kvm, new);
        }
 }