KVM: selftests: aarch64: Add mix of tests into page_fault_test
authorRicardo Koller <ricarkol@google.com>
Mon, 17 Oct 2022 19:58:34 +0000 (19:58 +0000)
committerMarc Zyngier <maz@kernel.org>
Thu, 10 Nov 2022 19:10:27 +0000 (19:10 +0000)
Add some mix of tests into page_fault_test: memory regions with all the
pairwise combinations of read-only, userfaultfd, and dirty-logging.  For
example, writing into a read-only region which has a hole handled with
userfaultfd.

Signed-off-by: Ricardo Koller <ricarkol@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221017195834.2295901-15-ricarkol@google.com
tools/testing/selftests/kvm/aarch64/page_fault_test.c

index 727f4f2b6cc446bcc8a0fa88a80fc5c94e42002a..05bb6a6369c25bb0f32b22445f4435f8b295986d 100644 (file)
@@ -399,6 +399,12 @@ static void free_uffd(struct test_desc *test, struct uffd_desc *pt_uffd,
        free(data_args.copy);
 }
 
+static int uffd_no_handler(int mode, int uffd, struct uffd_msg *msg)
+{
+       TEST_FAIL("There was no UFFD fault expected.");
+       return -1;
+}
+
 /* Returns false if the test should be skipped. */
 static bool punch_hole_in_backing_store(struct kvm_vm *vm,
                                        struct userspace_mem_region *region)
@@ -799,6 +805,22 @@ static void help(char *name)
        .expected_events        = { 0 },                                        \
 }
 
+#define TEST_UFFD_AND_DIRTY_LOG(_access, _with_af, _uffd_data_handler,         \
+                               _uffd_faults, _test_check)                      \
+{                                                                              \
+       .name                   = SCAT3(uffd_and_dirty_log, _access, _with_af), \
+       .data_memslot_flags     = KVM_MEM_LOG_DIRTY_PAGES,                      \
+       .pt_memslot_flags       = KVM_MEM_LOG_DIRTY_PAGES,                      \
+       .guest_prepare          = { _PREPARE(_with_af),                         \
+                                   _PREPARE(_access) },                        \
+       .guest_test             = _access,                                      \
+       .mem_mark_cmd           = CMD_HOLE_DATA | CMD_HOLE_PT,                  \
+       .guest_test_check       = { _CHECK(_with_af), _test_check },            \
+       .uffd_data_handler      = _uffd_data_handler,                           \
+       .uffd_pt_handler        = uffd_pt_write_handler,                        \
+       .expected_events        = { .uffd_faults = _uffd_faults, },             \
+}
+
 #define TEST_RO_MEMSLOT(_access, _mmio_handler, _mmio_exits)                   \
 {                                                                              \
        .name                   = SCAT3(ro_memslot, _access, _with_af),         \
@@ -818,6 +840,59 @@ static void help(char *name)
        .expected_events        = { .fail_vcpu_runs = 1 },                      \
 }
 
+#define TEST_RO_MEMSLOT_AND_DIRTY_LOG(_access, _mmio_handler, _mmio_exits,     \
+                                     _test_check)                              \
+{                                                                              \
+       .name                   = SCAT3(ro_memslot, _access, _with_af),         \
+       .data_memslot_flags     = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES,   \
+       .pt_memslot_flags       = KVM_MEM_LOG_DIRTY_PAGES,                      \
+       .guest_prepare          = { _PREPARE(_access) },                        \
+       .guest_test             = _access,                                      \
+       .guest_test_check       = { _test_check },                              \
+       .mmio_handler           = _mmio_handler,                                \
+       .expected_events        = { .mmio_exits = _mmio_exits},                 \
+}
+
+#define TEST_RO_MEMSLOT_NO_SYNDROME_AND_DIRTY_LOG(_access, _test_check)                \
+{                                                                              \
+       .name                   = SCAT2(ro_memslot_no_syn_and_dlog, _access),   \
+       .data_memslot_flags     = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES,   \
+       .pt_memslot_flags       = KVM_MEM_LOG_DIRTY_PAGES,                      \
+       .guest_test             = _access,                                      \
+       .guest_test_check       = { _test_check },                              \
+       .fail_vcpu_run_handler  = fail_vcpu_run_mmio_no_syndrome_handler,       \
+       .expected_events        = { .fail_vcpu_runs = 1 },                      \
+}
+
+#define TEST_RO_MEMSLOT_AND_UFFD(_access, _mmio_handler, _mmio_exits,          \
+                                _uffd_data_handler, _uffd_faults)              \
+{                                                                              \
+       .name                   = SCAT2(ro_memslot_uffd, _access),              \
+       .data_memslot_flags     = KVM_MEM_READONLY,                             \
+       .mem_mark_cmd           = CMD_HOLE_DATA | CMD_HOLE_PT,                  \
+       .guest_prepare          = { _PREPARE(_access) },                        \
+       .guest_test             = _access,                                      \
+       .uffd_data_handler      = _uffd_data_handler,                           \
+       .uffd_pt_handler        = uffd_pt_write_handler,                        \
+       .mmio_handler           = _mmio_handler,                                \
+       .expected_events        = { .mmio_exits = _mmio_exits,                  \
+                                   .uffd_faults = _uffd_faults },              \
+}
+
+#define TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(_access, _uffd_data_handler,      \
+                                            _uffd_faults)                      \
+{                                                                              \
+       .name                   = SCAT2(ro_memslot_no_syndrome, _access),       \
+       .data_memslot_flags     = KVM_MEM_READONLY,                             \
+       .mem_mark_cmd           = CMD_HOLE_DATA | CMD_HOLE_PT,                  \
+       .guest_test             = _access,                                      \
+       .uffd_data_handler      = _uffd_data_handler,                           \
+       .uffd_pt_handler        = uffd_pt_write_handler,                        \
+       .fail_vcpu_run_handler  = fail_vcpu_run_mmio_no_syndrome_handler,       \
+       .expected_events        = { .fail_vcpu_runs = 1,                        \
+                                   .uffd_faults = _uffd_faults },              \
+}
+
 static struct test_desc tests[] = {
 
        /* Check that HW is setting the Access Flag (AF) (sanity checks). */
@@ -892,6 +967,35 @@ static struct test_desc tests[] = {
        TEST_DIRTY_LOG(guest_dc_zva, with_af, guest_check_write_in_dirty_log),
        TEST_DIRTY_LOG(guest_st_preidx, with_af, guest_check_write_in_dirty_log),
 
+       /*
+        * Access when the data and PT memory regions are both marked for
+        * dirty logging and UFFD at the same time. The expected result is
+        * that writes should mark the dirty log and trigger a userfaultfd
+        * write fault.  Reads/execs should result in a read userfaultfd
+        * fault, and nothing in the dirty log.  Any S1PTW should result in
+        * a write in the dirty log and a userfaultfd write.
+        */
+       TEST_UFFD_AND_DIRTY_LOG(guest_read64, with_af, uffd_data_read_handler, 2,
+                               guest_check_no_write_in_dirty_log),
+       /* no_af should also lead to a PT write. */
+       TEST_UFFD_AND_DIRTY_LOG(guest_read64, no_af, uffd_data_read_handler, 2,
+                               guest_check_no_write_in_dirty_log),
+       TEST_UFFD_AND_DIRTY_LOG(guest_ld_preidx, with_af, uffd_data_read_handler,
+                               2, guest_check_no_write_in_dirty_log),
+       TEST_UFFD_AND_DIRTY_LOG(guest_at, with_af, 0, 1,
+                               guest_check_no_write_in_dirty_log),
+       TEST_UFFD_AND_DIRTY_LOG(guest_exec, with_af, uffd_data_read_handler, 2,
+                               guest_check_no_write_in_dirty_log),
+       TEST_UFFD_AND_DIRTY_LOG(guest_write64, with_af, uffd_data_write_handler,
+                               2, guest_check_write_in_dirty_log),
+       TEST_UFFD_AND_DIRTY_LOG(guest_cas, with_af, uffd_data_read_handler, 2,
+                               guest_check_write_in_dirty_log),
+       TEST_UFFD_AND_DIRTY_LOG(guest_dc_zva, with_af, uffd_data_write_handler,
+                               2, guest_check_write_in_dirty_log),
+       TEST_UFFD_AND_DIRTY_LOG(guest_st_preidx, with_af,
+                               uffd_data_write_handler, 2,
+                               guest_check_write_in_dirty_log),
+
        /*
         * Try accesses when the data memory region is marked read-only
         * (with KVM_MEM_READONLY). Writes with a syndrome result in an
@@ -908,6 +1012,57 @@ static struct test_desc tests[] = {
        TEST_RO_MEMSLOT_NO_SYNDROME(guest_cas),
        TEST_RO_MEMSLOT_NO_SYNDROME(guest_st_preidx),
 
+       /*
+        * Access when both the data region is both read-only and marked
+        * for dirty logging at the same time. The expected result is that
+        * for writes there should be no write in the dirty log. The
+        * readonly handling is the same as if the memslot was not marked
+        * for dirty logging: writes with a syndrome result in an MMIO
+        * exit, and writes with no syndrome result in a failed vcpu run.
+        */
+       TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_read64, 0, 0,
+                                     guest_check_no_write_in_dirty_log),
+       TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_ld_preidx, 0, 0,
+                                     guest_check_no_write_in_dirty_log),
+       TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_at, 0, 0,
+                                     guest_check_no_write_in_dirty_log),
+       TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_exec, 0, 0,
+                                     guest_check_no_write_in_dirty_log),
+       TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_write64, mmio_on_test_gpa_handler,
+                                     1, guest_check_no_write_in_dirty_log),
+       TEST_RO_MEMSLOT_NO_SYNDROME_AND_DIRTY_LOG(guest_dc_zva,
+                                                 guest_check_no_write_in_dirty_log),
+       TEST_RO_MEMSLOT_NO_SYNDROME_AND_DIRTY_LOG(guest_cas,
+                                                 guest_check_no_write_in_dirty_log),
+       TEST_RO_MEMSLOT_NO_SYNDROME_AND_DIRTY_LOG(guest_st_preidx,
+                                                 guest_check_no_write_in_dirty_log),
+
+       /*
+        * Access when the data region is both read-only and punched with
+        * holes tracked with userfaultfd.  The expected result is the
+        * union of both userfaultfd and read-only behaviors. For example,
+        * write accesses result in a userfaultfd write fault and an MMIO
+        * exit.  Writes with no syndrome result in a failed vcpu run and
+        * no userfaultfd write fault. Reads result in userfaultfd getting
+        * triggered.
+        */
+       TEST_RO_MEMSLOT_AND_UFFD(guest_read64, 0, 0,
+                                uffd_data_read_handler, 2),
+       TEST_RO_MEMSLOT_AND_UFFD(guest_ld_preidx, 0, 0,
+                                uffd_data_read_handler, 2),
+       TEST_RO_MEMSLOT_AND_UFFD(guest_at, 0, 0,
+                                uffd_no_handler, 1),
+       TEST_RO_MEMSLOT_AND_UFFD(guest_exec, 0, 0,
+                                uffd_data_read_handler, 2),
+       TEST_RO_MEMSLOT_AND_UFFD(guest_write64, mmio_on_test_gpa_handler, 1,
+                                uffd_data_write_handler, 2),
+       TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_cas,
+                                            uffd_data_read_handler, 2),
+       TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_dc_zva,
+                                            uffd_no_handler, 1),
+       TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_st_preidx,
+                                            uffd_no_handler, 1),
+
        { 0 }
 };