KVM: selftests: Make set_memory_region_test common to all architectures
authorSean Christopherson <sean.j.christopherson@intel.com>
Fri, 10 Apr 2020 23:17:06 +0000 (16:17 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 15 Apr 2020 16:08:47 +0000 (12:08 -0400)
Make set_memory_region_test available on all architectures by wrapping
the bits that are x86-specific in ifdefs.  A future testcase
to create the maximum number of memslots will be architecture
agnostic.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200410231707.7128-10-sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
tools/testing/selftests/kvm/.gitignore
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/set_memory_region_test.c [new file with mode: 0644]
tools/testing/selftests/kvm/x86_64/set_memory_region_test.c [deleted file]

index 16877c3daabfc180fe3b711668617f93cd487abb..5947cc119abccfbeb27cfb41c8814f93182eb46f 100644 (file)
@@ -6,7 +6,6 @@
 /x86_64/hyperv_cpuid
 /x86_64/mmio_warning_test
 /x86_64/platform_info_test
-/x86_64/set_memory_region_test
 /x86_64/set_sregs_test
 /x86_64/smm_test
 /x86_64/state_test
@@ -21,4 +20,5 @@
 /demand_paging_test
 /dirty_log_test
 /kvm_create_max_vcpus
+/set_memory_region_test
 /steal_time
index 712a2ddd2a2711a8271706fd2d37501ae3a3386d..7af62030c12f6de366fc6557fce2fd751714e176 100644 (file)
@@ -17,7 +17,6 @@ TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
 TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
 TEST_GEN_PROGS_x86_64 += x86_64/mmio_warning_test
 TEST_GEN_PROGS_x86_64 += x86_64/platform_info_test
-TEST_GEN_PROGS_x86_64 += x86_64/set_memory_region_test
 TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test
 TEST_GEN_PROGS_x86_64 += x86_64/smm_test
 TEST_GEN_PROGS_x86_64 += x86_64/state_test
@@ -32,12 +31,14 @@ TEST_GEN_PROGS_x86_64 += clear_dirty_log_test
 TEST_GEN_PROGS_x86_64 += demand_paging_test
 TEST_GEN_PROGS_x86_64 += dirty_log_test
 TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
+TEST_GEN_PROGS_x86_64 += set_memory_region_test
 TEST_GEN_PROGS_x86_64 += steal_time
 
 TEST_GEN_PROGS_aarch64 += clear_dirty_log_test
 TEST_GEN_PROGS_aarch64 += demand_paging_test
 TEST_GEN_PROGS_aarch64 += dirty_log_test
 TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus
+TEST_GEN_PROGS_aarch64 += set_memory_region_test
 TEST_GEN_PROGS_aarch64 += steal_time
 
 TEST_GEN_PROGS_s390x = s390x/memop
@@ -46,6 +47,7 @@ TEST_GEN_PROGS_s390x += s390x/sync_regs_test
 TEST_GEN_PROGS_s390x += demand_paging_test
 TEST_GEN_PROGS_s390x += dirty_log_test
 TEST_GEN_PROGS_s390x += kvm_create_max_vcpus
+TEST_GEN_PROGS_s390x += set_memory_region_test
 
 TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M))
 LIBKVM += $(LIBKVM_$(UNAME_M))
diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c
new file mode 100644 (file)
index 0000000..ac4945f
--- /dev/null
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <fcntl.h>
+#include <pthread.h>
+#include <sched.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include <linux/compiler.h>
+
+#include <test_util.h>
+#include <kvm_util.h>
+#include <processor.h>
+
+#define VCPU_ID 0
+
+#ifdef __x86_64__
+/*
+ * Somewhat arbitrary location and slot, intended to not overlap anything.  The
+ * location and size are specifically 2mb sized/aligned so that the initial
+ * region corresponds to exactly one large page.
+ */
+#define MEM_REGION_GPA         0xc0000000
+#define MEM_REGION_SIZE                0x200000
+#define MEM_REGION_SLOT                10
+
+static const uint64_t MMIO_VAL = 0xbeefull;
+
+extern const uint64_t final_rip_start;
+extern const uint64_t final_rip_end;
+
+static sem_t vcpu_ready;
+
+static inline uint64_t guest_spin_on_val(uint64_t spin_val)
+{
+       uint64_t val;
+
+       do {
+               val = READ_ONCE(*((uint64_t *)MEM_REGION_GPA));
+       } while (val == spin_val);
+
+       GUEST_SYNC(0);
+       return val;
+}
+
+static void *vcpu_worker(void *data)
+{
+       struct kvm_vm *vm = data;
+       struct kvm_run *run;
+       struct ucall uc;
+       uint64_t cmd;
+
+       /*
+        * Loop until the guest is done.  Re-enter the guest on all MMIO exits,
+        * which will occur if the guest attempts to access a memslot after it
+        * has been deleted or while it is being moved .
+        */
+       run = vcpu_state(vm, VCPU_ID);
+
+       while (1) {
+               vcpu_run(vm, VCPU_ID);
+
+               if (run->exit_reason == KVM_EXIT_IO) {
+                       cmd = get_ucall(vm, VCPU_ID, &uc);
+                       if (cmd != UCALL_SYNC)
+                               break;
+
+                       sem_post(&vcpu_ready);
+                       continue;
+               }
+
+               if (run->exit_reason != KVM_EXIT_MMIO)
+                       break;
+
+               TEST_ASSERT(!run->mmio.is_write, "Unexpected exit mmio write");
+               TEST_ASSERT(run->mmio.len == 8,
+                           "Unexpected exit mmio size = %u", run->mmio.len);
+
+               TEST_ASSERT(run->mmio.phys_addr == MEM_REGION_GPA,
+                           "Unexpected exit mmio address = 0x%llx",
+                           run->mmio.phys_addr);
+               memcpy(run->mmio.data, &MMIO_VAL, 8);
+       }
+
+       if (run->exit_reason == KVM_EXIT_IO && cmd == UCALL_ABORT)
+               TEST_FAIL("%s at %s:%ld, val = %lu", (const char *)uc.args[0],
+                         __FILE__, uc.args[1], uc.args[2]);
+
+       return NULL;
+}
+
+static void wait_for_vcpu(void)
+{
+       struct timespec ts;
+
+       TEST_ASSERT(!clock_gettime(CLOCK_REALTIME, &ts),
+                   "clock_gettime() failed: %d\n", errno);
+
+       ts.tv_sec += 2;
+       TEST_ASSERT(!sem_timedwait(&vcpu_ready, &ts),
+                   "sem_timedwait() failed: %d\n", errno);
+
+       /* Wait for the vCPU thread to reenter the guest. */
+       usleep(100000);
+}
+
+static struct kvm_vm *spawn_vm(pthread_t *vcpu_thread, void *guest_code)
+{
+       struct kvm_vm *vm;
+       uint64_t *hva;
+       uint64_t gpa;
+
+       vm = vm_create_default(VCPU_ID, 0, guest_code);
+
+       vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+
+       vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP,
+                                   MEM_REGION_GPA, MEM_REGION_SLOT,
+                                   MEM_REGION_SIZE / getpagesize(), 0);
+
+       /*
+        * Allocate and map two pages so that the GPA accessed by guest_code()
+        * stays valid across the memslot move.
+        */
+       gpa = vm_phy_pages_alloc(vm, 2, MEM_REGION_GPA, MEM_REGION_SLOT);
+       TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc\n");
+
+       virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 2, 0);
+
+       /* Ditto for the host mapping so that both pages can be zeroed. */
+       hva = addr_gpa2hva(vm, MEM_REGION_GPA);
+       memset(hva, 0, 2 * 4096);
+
+       pthread_create(vcpu_thread, NULL, vcpu_worker, vm);
+
+       /* Ensure the guest thread is spun up. */
+       wait_for_vcpu();
+
+       return vm;
+}
+
+
+static void guest_code_move_memory_region(void)
+{
+       uint64_t val;
+
+       GUEST_SYNC(0);
+
+       /*
+        * Spin until the memory region is moved to a misaligned address.  This
+        * may or may not trigger MMIO, as the window where the memslot is
+        * invalid is quite small.
+        */
+       val = guest_spin_on_val(0);
+       GUEST_ASSERT_1(val == 1 || val == MMIO_VAL, val);
+
+       /* Spin until the memory region is realigned. */
+       val = guest_spin_on_val(MMIO_VAL);
+       GUEST_ASSERT_1(val == 1, val);
+
+       GUEST_DONE();
+}
+
+static void test_move_memory_region(void)
+{
+       pthread_t vcpu_thread;
+       struct kvm_vm *vm;
+       uint64_t *hva;
+
+       vm = spawn_vm(&vcpu_thread, guest_code_move_memory_region);
+
+       hva = addr_gpa2hva(vm, MEM_REGION_GPA);
+
+       /*
+        * Shift the region's base GPA.  The guest should not see "2" as the
+        * hva->gpa translation is misaligned, i.e. the guest is accessing a
+        * different host pfn.
+        */
+       vm_mem_region_move(vm, MEM_REGION_SLOT, MEM_REGION_GPA - 4096);
+       WRITE_ONCE(*hva, 2);
+
+       /*
+        * The guest _might_ see an invalid memslot and trigger MMIO, but it's
+        * a tiny window.  Spin and defer the sync until the memslot is
+        * restored and guest behavior is once again deterministic.
+        */
+       usleep(100000);
+
+       /*
+        * Note, value in memory needs to be changed *before* restoring the
+        * memslot, else the guest could race the update and see "2".
+        */
+       WRITE_ONCE(*hva, 1);
+
+       /* Restore the original base, the guest should see "1". */
+       vm_mem_region_move(vm, MEM_REGION_SLOT, MEM_REGION_GPA);
+       wait_for_vcpu();
+       /* Defered sync from when the memslot was misaligned (above). */
+       wait_for_vcpu();
+
+       pthread_join(vcpu_thread, NULL);
+
+       kvm_vm_free(vm);
+}
+
+static void guest_code_delete_memory_region(void)
+{
+       uint64_t val;
+
+       GUEST_SYNC(0);
+
+       /* Spin until the memory region is deleted. */
+       val = guest_spin_on_val(0);
+       GUEST_ASSERT_1(val == MMIO_VAL, val);
+
+       /* Spin until the memory region is recreated. */
+       val = guest_spin_on_val(MMIO_VAL);
+       GUEST_ASSERT_1(val == 0, val);
+
+       /* Spin until the memory region is deleted. */
+       val = guest_spin_on_val(0);
+       GUEST_ASSERT_1(val == MMIO_VAL, val);
+
+       asm("1:\n\t"
+           ".pushsection .rodata\n\t"
+           ".global final_rip_start\n\t"
+           "final_rip_start: .quad 1b\n\t"
+           ".popsection");
+
+       /* Spin indefinitely (until the code memslot is deleted). */
+       guest_spin_on_val(MMIO_VAL);
+
+       asm("1:\n\t"
+           ".pushsection .rodata\n\t"
+           ".global final_rip_end\n\t"
+           "final_rip_end: .quad 1b\n\t"
+           ".popsection");
+
+       GUEST_ASSERT_1(0, 0);
+}
+
+static void test_delete_memory_region(void)
+{
+       pthread_t vcpu_thread;
+       struct kvm_regs regs;
+       struct kvm_run *run;
+       struct kvm_vm *vm;
+
+       vm = spawn_vm(&vcpu_thread, guest_code_delete_memory_region);
+
+       /* Delete the memory region, the guest should not die. */
+       vm_mem_region_delete(vm, MEM_REGION_SLOT);
+       wait_for_vcpu();
+
+       /* Recreate the memory region.  The guest should see "0". */
+       vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP,
+                                   MEM_REGION_GPA, MEM_REGION_SLOT,
+                                   MEM_REGION_SIZE / getpagesize(), 0);
+       wait_for_vcpu();
+
+       /* Delete the region again so that there's only one memslot left. */
+       vm_mem_region_delete(vm, MEM_REGION_SLOT);
+       wait_for_vcpu();
+
+       /*
+        * Delete the primary memslot.  This should cause an emulation error or
+        * shutdown due to the page tables getting nuked.
+        */
+       vm_mem_region_delete(vm, 0);
+
+       pthread_join(vcpu_thread, NULL);
+
+       run = vcpu_state(vm, VCPU_ID);
+
+       TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN ||
+                   run->exit_reason == KVM_EXIT_INTERNAL_ERROR,
+                   "Unexpected exit reason = %d", run->exit_reason);
+
+       vcpu_regs_get(vm, VCPU_ID, &regs);
+
+       TEST_ASSERT(regs.rip >= final_rip_start &&
+                   regs.rip < final_rip_end,
+                   "Bad rip, expected 0x%lx - 0x%lx, got 0x%llx\n",
+                   final_rip_start, final_rip_end, regs.rip);
+
+       kvm_vm_free(vm);
+}
+
+static void test_zero_memory_regions(void)
+{
+       struct kvm_run *run;
+       struct kvm_vm *vm;
+
+       pr_info("Testing KVM_RUN with zero added memory regions\n");
+
+       vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
+       vm_vcpu_add(vm, VCPU_ID);
+
+       TEST_ASSERT(!ioctl(vm_get_fd(vm), KVM_SET_NR_MMU_PAGES, 64),
+                   "KVM_SET_NR_MMU_PAGES failed, errno = %d\n", errno);
+       vcpu_run(vm, VCPU_ID);
+
+       run = vcpu_state(vm, VCPU_ID);
+       TEST_ASSERT(run->exit_reason == KVM_EXIT_INTERNAL_ERROR,
+                   "Unexpected exit_reason = %u\n", run->exit_reason);
+
+       kvm_vm_free(vm);
+}
+#endif /* __x86_64__ */
+
+int main(int argc, char *argv[])
+{
+#ifdef __x86_64__
+       int i, loops;
+#endif
+
+       /* Tell stdout not to buffer its content */
+       setbuf(stdout, NULL);
+
+#ifdef __x86_64__
+       /*
+        * FIXME: the zero-memslot test fails on aarch64 and s390x because
+        * KVM_RUN fails with ENOEXEC or EFAULT.
+        */
+       test_zero_memory_regions();
+
+       if (argc > 1)
+               loops = atoi(argv[1]);
+       else
+               loops = 10;
+
+       pr_info("Testing MOVE of in-use region, %d loops\n", loops);
+       for (i = 0; i < loops; i++)
+               test_move_memory_region();
+
+       pr_info("Testing DELETE of in-use region, %d loops\n", loops);
+       for (i = 0; i < loops; i++)
+               test_delete_memory_region();
+#endif
+
+       return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/set_memory_region_test.c b/tools/testing/selftests/kvm/x86_64/set_memory_region_test.c
deleted file mode 100644 (file)
index c274ce6..0000000
+++ /dev/null
@@ -1,337 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#define _GNU_SOURCE /* for program_invocation_short_name */
-#include <fcntl.h>
-#include <pthread.h>
-#include <sched.h>
-#include <semaphore.h>
-#include <signal.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/ioctl.h>
-
-#include <linux/compiler.h>
-
-#include <test_util.h>
-#include <kvm_util.h>
-#include <processor.h>
-
-#define VCPU_ID 0
-
-/*
- * Somewhat arbitrary location and slot, intended to not overlap anything.  The
- * location and size are specifically 2mb sized/aligned so that the initial
- * region corresponds to exactly one large page.
- */
-#define MEM_REGION_GPA         0xc0000000
-#define MEM_REGION_SIZE                0x200000
-#define MEM_REGION_SLOT                10
-
-static const uint64_t MMIO_VAL = 0xbeefull;
-
-extern const uint64_t final_rip_start;
-extern const uint64_t final_rip_end;
-
-static sem_t vcpu_ready;
-
-static inline uint64_t guest_spin_on_val(uint64_t spin_val)
-{
-       uint64_t val;
-
-       do {
-               val = READ_ONCE(*((uint64_t *)MEM_REGION_GPA));
-       } while (val == spin_val);
-
-       GUEST_SYNC(0);
-       return val;
-}
-
-static void *vcpu_worker(void *data)
-{
-       struct kvm_vm *vm = data;
-       struct kvm_run *run;
-       struct ucall uc;
-       uint64_t cmd;
-
-       /*
-        * Loop until the guest is done.  Re-enter the guest on all MMIO exits,
-        * which will occur if the guest attempts to access a memslot after it
-        * has been deleted or while it is being moved .
-        */
-       run = vcpu_state(vm, VCPU_ID);
-
-       while (1) {
-               vcpu_run(vm, VCPU_ID);
-
-               if (run->exit_reason == KVM_EXIT_IO) {
-                       cmd = get_ucall(vm, VCPU_ID, &uc);
-                       if (cmd != UCALL_SYNC)
-                               break;
-
-                       sem_post(&vcpu_ready);
-                       continue;
-               }
-
-               if (run->exit_reason != KVM_EXIT_MMIO)
-                       break;
-
-               TEST_ASSERT(!run->mmio.is_write, "Unexpected exit mmio write");
-               TEST_ASSERT(run->mmio.len == 8,
-                           "Unexpected exit mmio size = %u", run->mmio.len);
-
-               TEST_ASSERT(run->mmio.phys_addr == MEM_REGION_GPA,
-                           "Unexpected exit mmio address = 0x%llx",
-                           run->mmio.phys_addr);
-               memcpy(run->mmio.data, &MMIO_VAL, 8);
-       }
-
-       if (run->exit_reason == KVM_EXIT_IO && cmd == UCALL_ABORT)
-               TEST_FAIL("%s at %s:%ld, val = %lu", (const char *)uc.args[0],
-                         __FILE__, uc.args[1], uc.args[2]);
-
-       return NULL;
-}
-
-static void wait_for_vcpu(void)
-{
-       struct timespec ts;
-
-       TEST_ASSERT(!clock_gettime(CLOCK_REALTIME, &ts),
-                   "clock_gettime() failed: %d\n", errno);
-
-       ts.tv_sec += 2;
-       TEST_ASSERT(!sem_timedwait(&vcpu_ready, &ts),
-                   "sem_timedwait() failed: %d\n", errno);
-
-       /* Wait for the vCPU thread to reenter the guest. */
-       usleep(100000);
-}
-
-static struct kvm_vm *spawn_vm(pthread_t *vcpu_thread, void *guest_code)
-{
-       struct kvm_vm *vm;
-       uint64_t *hva;
-       uint64_t gpa;
-
-       vm = vm_create_default(VCPU_ID, 0, guest_code);
-
-       vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
-
-       vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP,
-                                   MEM_REGION_GPA, MEM_REGION_SLOT,
-                                   MEM_REGION_SIZE / getpagesize(), 0);
-
-       /*
-        * Allocate and map two pages so that the GPA accessed by guest_code()
-        * stays valid across the memslot move.
-        */
-       gpa = vm_phy_pages_alloc(vm, 2, MEM_REGION_GPA, MEM_REGION_SLOT);
-       TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc\n");
-
-       virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 2, 0);
-
-       /* Ditto for the host mapping so that both pages can be zeroed. */
-       hva = addr_gpa2hva(vm, MEM_REGION_GPA);
-       memset(hva, 0, 2 * 4096);
-
-       pthread_create(vcpu_thread, NULL, vcpu_worker, vm);
-
-       /* Ensure the guest thread is spun up. */
-       wait_for_vcpu();
-
-       return vm;
-}
-
-
-static void guest_code_move_memory_region(void)
-{
-       uint64_t val;
-
-       GUEST_SYNC(0);
-
-       /*
-        * Spin until the memory region is moved to a misaligned address.  This
-        * may or may not trigger MMIO, as the window where the memslot is
-        * invalid is quite small.
-        */
-       val = guest_spin_on_val(0);
-       GUEST_ASSERT_1(val == 1 || val == MMIO_VAL, val);
-
-       /* Spin until the memory region is realigned. */
-       val = guest_spin_on_val(MMIO_VAL);
-       GUEST_ASSERT_1(val == 1, val);
-
-       GUEST_DONE();
-}
-
-static void test_move_memory_region(void)
-{
-       pthread_t vcpu_thread;
-       struct kvm_vm *vm;
-       uint64_t *hva;
-
-       vm = spawn_vm(&vcpu_thread, guest_code_move_memory_region);
-
-       hva = addr_gpa2hva(vm, MEM_REGION_GPA);
-
-       /*
-        * Shift the region's base GPA.  The guest should not see "2" as the
-        * hva->gpa translation is misaligned, i.e. the guest is accessing a
-        * different host pfn.
-        */
-       vm_mem_region_move(vm, MEM_REGION_SLOT, MEM_REGION_GPA - 4096);
-       WRITE_ONCE(*hva, 2);
-
-       /*
-        * The guest _might_ see an invalid memslot and trigger MMIO, but it's
-        * a tiny window.  Spin and defer the sync until the memslot is
-        * restored and guest behavior is once again deterministic.
-        */
-       usleep(100000);
-
-       /*
-        * Note, value in memory needs to be changed *before* restoring the
-        * memslot, else the guest could race the update and see "2".
-        */
-       WRITE_ONCE(*hva, 1);
-
-       /* Restore the original base, the guest should see "1". */
-       vm_mem_region_move(vm, MEM_REGION_SLOT, MEM_REGION_GPA);
-       wait_for_vcpu();
-       /* Defered sync from when the memslot was misaligned (above). */
-       wait_for_vcpu();
-
-       pthread_join(vcpu_thread, NULL);
-
-       kvm_vm_free(vm);
-}
-
-static void guest_code_delete_memory_region(void)
-{
-       uint64_t val;
-
-       GUEST_SYNC(0);
-
-       /* Spin until the memory region is deleted. */
-       val = guest_spin_on_val(0);
-       GUEST_ASSERT_1(val == MMIO_VAL, val);
-
-       /* Spin until the memory region is recreated. */
-       val = guest_spin_on_val(MMIO_VAL);
-       GUEST_ASSERT_1(val == 0, val);
-
-       /* Spin until the memory region is deleted. */
-       val = guest_spin_on_val(0);
-       GUEST_ASSERT_1(val == MMIO_VAL, val);
-
-       asm("1:\n\t"
-           ".pushsection .rodata\n\t"
-           ".global final_rip_start\n\t"
-           "final_rip_start: .quad 1b\n\t"
-           ".popsection");
-
-       /* Spin indefinitely (until the code memslot is deleted). */
-       guest_spin_on_val(MMIO_VAL);
-
-       asm("1:\n\t"
-           ".pushsection .rodata\n\t"
-           ".global final_rip_end\n\t"
-           "final_rip_end: .quad 1b\n\t"
-           ".popsection");
-
-       GUEST_ASSERT_1(0, 0);
-}
-
-static void test_delete_memory_region(void)
-{
-       pthread_t vcpu_thread;
-       struct kvm_regs regs;
-       struct kvm_run *run;
-       struct kvm_vm *vm;
-
-       vm = spawn_vm(&vcpu_thread, guest_code_delete_memory_region);
-
-       /* Delete the memory region, the guest should not die. */
-       vm_mem_region_delete(vm, MEM_REGION_SLOT);
-       wait_for_vcpu();
-
-       /* Recreate the memory region.  The guest should see "0". */
-       vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS_THP,
-                                   MEM_REGION_GPA, MEM_REGION_SLOT,
-                                   MEM_REGION_SIZE / getpagesize(), 0);
-       wait_for_vcpu();
-
-       /* Delete the region again so that there's only one memslot left. */
-       vm_mem_region_delete(vm, MEM_REGION_SLOT);
-       wait_for_vcpu();
-
-       /*
-        * Delete the primary memslot.  This should cause an emulation error or
-        * shutdown due to the page tables getting nuked.
-        */
-       vm_mem_region_delete(vm, 0);
-
-       pthread_join(vcpu_thread, NULL);
-
-       run = vcpu_state(vm, VCPU_ID);
-
-       TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN ||
-                   run->exit_reason == KVM_EXIT_INTERNAL_ERROR,
-                   "Unexpected exit reason = %d", run->exit_reason);
-
-       vcpu_regs_get(vm, VCPU_ID, &regs);
-
-       TEST_ASSERT(regs.rip >= final_rip_start &&
-                   regs.rip < final_rip_end,
-                   "Bad rip, expected 0x%lx - 0x%lx, got 0x%llx\n",
-                   final_rip_start, final_rip_end, regs.rip);
-
-       kvm_vm_free(vm);
-}
-
-static void test_zero_memory_regions(void)
-{
-       struct kvm_run *run;
-       struct kvm_vm *vm;
-
-       pr_info("Testing KVM_RUN with zero added memory regions\n");
-
-       vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
-       vm_vcpu_add(vm, VCPU_ID);
-
-       TEST_ASSERT(!ioctl(vm_get_fd(vm), KVM_SET_NR_MMU_PAGES, 64),
-                   "KVM_SET_NR_MMU_PAGES failed, errno = %d\n", errno);
-
-       vcpu_run(vm, VCPU_ID);
-
-       run = vcpu_state(vm, VCPU_ID);
-       TEST_ASSERT(run->exit_reason == KVM_EXIT_INTERNAL_ERROR,
-                   "Unexpected exit_reason = %u\n", run->exit_reason);
-
-       kvm_vm_free(vm);
-}
-
-int main(int argc, char *argv[])
-{
-       int i, loops;
-
-       /* Tell stdout not to buffer its content */
-       setbuf(stdout, NULL);
-
-       test_zero_memory_regions();
-
-       if (argc > 1)
-               loops = atoi(argv[1]);
-       else
-               loops = 10;
-
-       pr_info("Testing MOVE of in-use region, %d loops\n", loops);
-       for (i = 0; i < loops; i++)
-               test_move_memory_region();
-
-       pr_info("Testing DELETE of in-use region, %d loops\n", loops);
-       for (i = 0; i < loops; i++)
-               test_delete_memory_region();
-
-       return 0;
-}