When getting the Modified Change Topology Report value, the attr->addr
 must point to a byte where the value will be stored or retrieved from.
 
+8.40 KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
+---------------------------------------
+
+:Capability: KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
+:Architectures: arm64
+:Type: vm
+:Parameters: arg[0] is the new split chunk size.
+:Returns: 0 on success, -EINVAL if any memslot was already created.
+
+This capability sets the chunk size used in Eager Page Splitting.
+
+Eager Page Splitting improves the performance of dirty-logging (used
+in live migrations) when guest memory is backed by huge-pages.  It
+avoids splitting huge-pages (into PAGE_SIZE pages) on fault, by doing
+it eagerly when enabling dirty logging (with the
+KVM_MEM_LOG_DIRTY_PAGES flag for a memory region), or when using
+KVM_CLEAR_DIRTY_LOG.
+
+The chunk size specifies how many pages to break at a time, using a
+single allocation for each chunk. Bigger the chunk size, more pages
+need to be allocated ahead of time.
+
+The chunk size needs to be a valid block size. The list of acceptable
+block sizes is exposed in KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES as a
+64-bit bitmap (each bit describing a block size). The default value is
+0, to disable the eager page splitting.
+
 9. Known KVM API problems
 =========================
 
 
        /* The last vcpu id that ran on each physical CPU */
        int __percpu *last_vcpu_ran;
 
+#define KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT 0
+       /*
+        * Memory cache used to split
+        * KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE worth of huge pages. It
+        * is used to allocate stage2 page tables while splitting huge
+        * pages. The choice of KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
+        * influences both the capacity of the split page cache, and
+        * how often KVM reschedules. Be wary of raising CHUNK_SIZE
+        * too high.
+        *
+        * Protected by kvm->slots_lock.
+        */
+       struct kvm_mmu_memory_cache split_page_cache;
+       uint64_t split_page_chunk_size;
+
        struct kvm_arch *arch;
 };
 
 
        return level >= KVM_PGTABLE_MIN_BLOCK_LEVEL;
 }
 
+static inline u32 kvm_supported_block_sizes(void)
+{
+       u32 level = KVM_PGTABLE_MIN_BLOCK_LEVEL;
+       u32 r = 0;
+
+       for (; level < KVM_PGTABLE_MAX_LEVELS; level++)
+               r |= BIT(kvm_granule_shift(level));
+
+       return r;
+}
+
+static inline bool kvm_is_block_size_supported(u64 size)
+{
+       bool is_power_of_two = IS_ALIGNED(size, size);
+
+       return is_power_of_two && (size & kvm_supported_block_sizes());
+}
+
 /**
  * struct kvm_pgtable_mm_ops - Memory management callbacks.
  * @zalloc_page:               Allocate a single zeroed memory page.
 
                            struct kvm_enable_cap *cap)
 {
        int r;
+       u64 new_cap;
 
        if (cap->flags)
                return -EINVAL;
                r = 0;
                set_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, &kvm->arch.flags);
                break;
+       case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE:
+               new_cap = cap->args[0];
+
+               mutex_lock(&kvm->slots_lock);
+               /*
+                * To keep things simple, allow changing the chunk
+                * size only when no memory slots have been created.
+                */
+               if (!kvm_are_all_memslots_empty(kvm)) {
+                       r = -EINVAL;
+               } else if (new_cap && !kvm_is_block_size_supported(new_cap)) {
+                       r = -EINVAL;
+               } else {
+                       r = 0;
+                       kvm->arch.mmu.split_page_chunk_size = new_cap;
+               }
+               mutex_unlock(&kvm->slots_lock);
+               break;
        default:
                r = -EINVAL;
                break;
        case KVM_CAP_ARM_PTRAUTH_GENERIC:
                r = system_has_full_ptr_auth();
                break;
+       case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE:
+               if (kvm)
+                       r = kvm->arch.mmu.split_page_chunk_size;
+               else
+                       r = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT;
+               break;
+       case KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES:
+               r = kvm_supported_block_sizes();
+               break;
        default:
                r = 0;
        }
 
        for_each_possible_cpu(cpu)
                *per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1;
 
+        /* The eager page splitting is disabled by default */
+       mmu->split_page_chunk_size = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT;
+       mmu->split_page_cache.gfp_zero = __GFP_ZERO;
+
        mmu->pgt = pgt;
        mmu->pgd_phys = __pa(pgt->pgd);
        return 0;
 
 #define KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP 225
 #define KVM_CAP_PMU_EVENT_MASKED_EVENTS 226
 #define KVM_CAP_COUNTER_OFFSET 227
+#define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228
+#define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229
 
 #ifdef KVM_CAP_IRQ_ROUTING