KVM: arm64: Add KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
authorRicardo Koller <ricarkol@google.com>
Wed, 26 Apr 2023 17:23:23 +0000 (17:23 +0000)
committerOliver Upton <oliver.upton@linux.dev>
Tue, 16 May 2023 17:39:18 +0000 (17:39 +0000)
Add a capability for userspace to specify the eager split chunk size.
The chunk size specifies how many pages to break at a time, using a
single allocation. Bigger the chunk size, more pages need to be
allocated ahead of time.

Suggested-by: Oliver Upton <oliver.upton@linux.dev>
Signed-off-by: Ricardo Koller <ricarkol@google.com>
Reviewed-by: Gavin Shan <gshan@redhat.com>
Link: https://lore.kernel.org/r/20230426172330.1439644-6-ricarkol@google.com
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
Documentation/virt/kvm/api.rst
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_pgtable.h
arch/arm64/kvm/arm.c
arch/arm64/kvm/mmu.c
include/uapi/linux/kvm.h

index add067793b90b19f2fc7f03cef128b590d604610..656bd293c8f420335b257de5b9097952755a7bb9 100644 (file)
@@ -8445,6 +8445,33 @@ structure.
 When getting the Modified Change Topology Report value, the attr->addr
 must point to a byte where the value will be stored or retrieved from.
 
+8.40 KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
+---------------------------------------
+
+:Capability: KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
+:Architectures: arm64
+:Type: vm
+:Parameters: arg[0] is the new split chunk size.
+:Returns: 0 on success, -EINVAL if any memslot was already created.
+
+This capability sets the chunk size used in Eager Page Splitting.
+
+Eager Page Splitting improves the performance of dirty-logging (used
+in live migrations) when guest memory is backed by huge-pages.  It
+avoids splitting huge-pages (into PAGE_SIZE pages) on fault, by doing
+it eagerly when enabling dirty logging (with the
+KVM_MEM_LOG_DIRTY_PAGES flag for a memory region), or when using
+KVM_CLEAR_DIRTY_LOG.
+
+The chunk size specifies how many pages to break at a time, using a
+single allocation for each chunk. Bigger the chunk size, more pages
+need to be allocated ahead of time.
+
+The chunk size needs to be a valid block size. The list of acceptable
+block sizes is exposed in KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES as a
+64-bit bitmap (each bit describing a block size). The default value is
+0, to disable the eager page splitting.
+
 9. Known KVM API problems
 =========================
 
index 7e7e19ef6993ede45aea71c6f19b624731290fbe..b743198450b3b14d2fe7a1bd08be0de461ae1bac 100644 (file)
@@ -159,6 +159,21 @@ struct kvm_s2_mmu {
        /* The last vcpu id that ran on each physical CPU */
        int __percpu *last_vcpu_ran;
 
+#define KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT 0
+       /*
+        * Memory cache used to split
+        * KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE worth of huge pages. It
+        * is used to allocate stage2 page tables while splitting huge
+        * pages. The choice of KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
+        * influences both the capacity of the split page cache, and
+        * how often KVM reschedules. Be wary of raising CHUNK_SIZE
+        * too high.
+        *
+        * Protected by kvm->slots_lock.
+        */
+       struct kvm_mmu_memory_cache split_page_cache;
+       uint64_t split_page_chunk_size;
+
        struct kvm_arch *arch;
 };
 
index c8e0e7d9303b26833ccc5df12767906964956d08..cbc6971e2cb4cff7f686075d7a05a2a513c9f339 100644 (file)
@@ -92,6 +92,24 @@ static inline bool kvm_level_supports_block_mapping(u32 level)
        return level >= KVM_PGTABLE_MIN_BLOCK_LEVEL;
 }
 
+static inline u32 kvm_supported_block_sizes(void)
+{
+       u32 level = KVM_PGTABLE_MIN_BLOCK_LEVEL;
+       u32 r = 0;
+
+       for (; level < KVM_PGTABLE_MAX_LEVELS; level++)
+               r |= BIT(kvm_granule_shift(level));
+
+       return r;
+}
+
+static inline bool kvm_is_block_size_supported(u64 size)
+{
+       bool is_power_of_two = IS_ALIGNED(size, size);
+
+       return is_power_of_two && (size & kvm_supported_block_sizes());
+}
+
 /**
  * struct kvm_pgtable_mm_ops - Memory management callbacks.
  * @zalloc_page:               Allocate a single zeroed memory page.
index 14391826241c8ca689232506144d681295482996..c605626801c4f2096b14d7b780935d85c2385022 100644 (file)
@@ -65,6 +65,7 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
                            struct kvm_enable_cap *cap)
 {
        int r;
+       u64 new_cap;
 
        if (cap->flags)
                return -EINVAL;
@@ -89,6 +90,24 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
                r = 0;
                set_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, &kvm->arch.flags);
                break;
+       case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE:
+               new_cap = cap->args[0];
+
+               mutex_lock(&kvm->slots_lock);
+               /*
+                * To keep things simple, allow changing the chunk
+                * size only when no memory slots have been created.
+                */
+               if (!kvm_are_all_memslots_empty(kvm)) {
+                       r = -EINVAL;
+               } else if (new_cap && !kvm_is_block_size_supported(new_cap)) {
+                       r = -EINVAL;
+               } else {
+                       r = 0;
+                       kvm->arch.mmu.split_page_chunk_size = new_cap;
+               }
+               mutex_unlock(&kvm->slots_lock);
+               break;
        default:
                r = -EINVAL;
                break;
@@ -302,6 +321,15 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_ARM_PTRAUTH_GENERIC:
                r = system_has_full_ptr_auth();
                break;
+       case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE:
+               if (kvm)
+                       r = kvm->arch.mmu.split_page_chunk_size;
+               else
+                       r = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT;
+               break;
+       case KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES:
+               r = kvm_supported_block_sizes();
+               break;
        default:
                r = 0;
        }
index a0d3c773af99513d89cb44dcf645aa83025aec09..f2d30486f7558acfc1c25769445d19c007454c2a 100644 (file)
@@ -775,6 +775,10 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t
        for_each_possible_cpu(cpu)
                *per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1;
 
+        /* The eager page splitting is disabled by default */
+       mmu->split_page_chunk_size = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT;
+       mmu->split_page_cache.gfp_zero = __GFP_ZERO;
+
        mmu->pgt = pgt;
        mmu->pgd_phys = __pa(pgt->pgd);
        return 0;
index 737318b1c1d9a16345853c6c561847bf6270e435..44edee0211fb18ac7561fb6a034b0e855fe66fc9 100644 (file)
@@ -1190,6 +1190,8 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP 225
 #define KVM_CAP_PMU_EVENT_MASKED_EVENTS 226
 #define KVM_CAP_COUNTER_OFFSET 227
+#define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228
+#define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229
 
 #ifdef KVM_CAP_IRQ_ROUTING