KVM: selftests: Introduce num-pages conversion utilities
authorAndrew Jones <drjones@redhat.com>
Fri, 14 Feb 2020 14:59:20 +0000 (15:59 +0100)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 24 Feb 2020 19:05:23 +0000 (20:05 +0100)
Guests and hosts don't have to have the same page size. This means
calculations are necessary when selecting the number of guest pages
to allocate in order to ensure the number is compatible with the
host. Provide utilities to help with those calculations and apply
them where appropriate.

We also revert commit bffed38d4fb5 ("kvm: selftests: aarch64:
dirty_log_test: fix unaligned memslot size") and then use
vm_adjust_num_guest_pages() there instead.

Signed-off-by: Andrew Jones <drjones@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
tools/testing/selftests/kvm/dirty_log_test.c
tools/testing/selftests/kvm/include/kvm_util.h
tools/testing/selftests/kvm/lib/kvm_util.c

index e0f3337dfccb1ffec6cd66d55c24026e9acf288e..edc5c071bf02c71ce05cdcf0016f574701b8b394 100644 (file)
@@ -178,12 +178,11 @@ static void *vcpu_worker(void *data)
        return NULL;
 }
 
-static void vm_dirty_log_verify(unsigned long *bmap)
+static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long *bmap)
 {
+       uint64_t step = vm_num_host_pages(mode, 1);
        uint64_t page;
        uint64_t *value_ptr;
-       uint64_t step = host_page_size >= guest_page_size ? 1 :
-                               guest_page_size / host_page_size;
 
        for (page = 0; page < host_num_pages; page += step) {
                value_ptr = host_test_mem + page * host_page_size;
@@ -289,14 +288,14 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
         * case where the size is not aligned to 64 pages.
         */
        guest_num_pages = (1ul << (DIRTY_MEM_BITS -
-                                  vm_get_page_shift(vm))) + 16;
+                                  vm_get_page_shift(vm))) + 3;
+       guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
 #ifdef __s390x__
        /* Round up to multiple of 1M (segment size) */
        guest_num_pages = (guest_num_pages + 0xff) & ~0xffUL;
 #endif
        host_page_size = getpagesize();
-       host_num_pages = (guest_num_pages * guest_page_size) / host_page_size +
-                        !!((guest_num_pages * guest_page_size) % host_page_size);
+       host_num_pages = vm_num_host_pages(mode, guest_num_pages);
 
        if (!phys_offset) {
                guest_test_phys_mem = (vm_get_max_gfn(vm) -
@@ -367,7 +366,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
                kvm_vm_clear_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap, 0,
                                       host_num_pages);
 #endif
-               vm_dirty_log_verify(bmap);
+               vm_dirty_log_verify(mode, bmap);
                iteration++;
                sync_global_to_guest(vm, iteration);
        }
index ae0d14c2540aa37608f59af67e0c283b2de5ab30..1dc13bfa88b7ca245256153a1adb52a61ecdb5d7 100644 (file)
@@ -164,6 +164,14 @@ unsigned int vm_get_page_size(struct kvm_vm *vm);
 unsigned int vm_get_page_shift(struct kvm_vm *vm);
 unsigned int vm_get_max_gfn(struct kvm_vm *vm);
 
+unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages);
+unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages);
+static inline unsigned int
+vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
+{
+       return vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages));
+}
+
 struct kvm_userspace_memory_region *
 kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
                                 uint64_t end);
index 1b133583d6c7515e6bf71e04f8396b9e36452ede..67f5dc9a6a32c50265107516ee551131e110cb01 100644 (file)
@@ -580,6 +580,10 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
        size_t huge_page_size = KVM_UTIL_PGS_PER_HUGEPG * vm->page_size;
        size_t alignment;
 
+       TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages,
+               "Number of guest pages is not compatible with the host. "
+               "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages));
+
        TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical "
                "address not on a page boundary.\n"
                "  guest_paddr: 0x%lx vm->page_size: 0x%x",
@@ -1701,3 +1705,36 @@ unsigned int vm_get_max_gfn(struct kvm_vm *vm)
 {
        return vm->max_gfn;
 }
+
+static unsigned int vm_calc_num_pages(unsigned int num_pages,
+                                     unsigned int page_shift,
+                                     unsigned int new_page_shift,
+                                     bool ceil)
+{
+       unsigned int n = 1 << (new_page_shift - page_shift);
+
+       if (page_shift >= new_page_shift)
+               return num_pages * (1 << (page_shift - new_page_shift));
+
+       return num_pages / n + !!(ceil && num_pages % n);
+}
+
+static inline int getpageshift(void)
+{
+       return __builtin_ffs(getpagesize()) - 1;
+}
+
+unsigned int
+vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
+{
+       return vm_calc_num_pages(num_guest_pages,
+                                vm_guest_mode_params[mode].page_shift,
+                                getpageshift(), true);
+}
+
+unsigned int
+vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages)
+{
+       return vm_calc_num_pages(num_host_pages, getpageshift(),
+                                vm_guest_mode_params[mode].page_shift, false);
+}