const struct kvm_memory_slot *new,
                                   enum kvm_mr_change change)
 {
+       bool log_dirty_pages = new && new->flags & KVM_MEM_LOG_DIRTY_PAGES;
+
        /*
         * At this point memslot has been committed and there is an
         * allocated dirty_bitmap[], dirty pages will be tracked while the
         * memory slot is write protected.
         */
-       if (change != KVM_MR_DELETE && new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
+       if (log_dirty_pages) {
+
+               if (change == KVM_MR_DELETE)
+                       return;
+
                /*
-                * If we're with initial-all-set, we don't need to write
-                * protect any pages because they're all reported as dirty.
-                * Huge pages and normal pages will be write protect gradually.
+                * Pages are write-protected on either of these two
+                * cases:
+                *
+                * 1. with initial-all-set: gradually with CLEAR ioctls,
                 */
-               if (!kvm_dirty_log_manual_protect_and_init_set(kvm)) {
-                       kvm_mmu_wp_memory_region(kvm, new->id);
-               }
+               if (kvm_dirty_log_manual_protect_and_init_set(kvm))
+                       return;
+               /*
+                * or
+                * 2. without initial-all-set: all in one shot when
+                *    enabling dirty logging.
+                */
+               kvm_mmu_wp_memory_region(kvm, new->id);
        }
 }