KVM: arm64: selftests: Move finalize_vcpu back to run_test
authorHaibo Xu <haibo1.xu@intel.com>
Tue, 25 Jul 2023 08:41:35 +0000 (16:41 +0800)
committerAnup Patel <anup@brainfault.org>
Wed, 9 Aug 2023 06:45:16 +0000 (12:15 +0530)
No functional changes. Just move the finalize_vcpu call back to
run_test and do weak function trick to prepare for the opration
in riscv.

Suggested-by: Andrew Jones <ajones@ventanamicro.com>
Signed-off-by: Haibo Xu <haibo1.xu@intel.com>
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
Signed-off-by: Anup Patel <anup@brainfault.org>
tools/testing/selftests/kvm/aarch64/get-reg-list.c
tools/testing/selftests/kvm/get-reg-list.c
tools/testing/selftests/kvm/include/kvm_util_base.h

index f8ebc058b19142b9e462dee74771b8556c6d1b0b..709d7d72176035fedbf2131ef89babd5f16658b0 100644 (file)
@@ -84,6 +84,19 @@ bool check_reject_set(int err)
        return err == EPERM;
 }
 
+void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
+{
+       struct vcpu_reg_sublist *s;
+       int feature;
+
+       for_each_sublist(c, s) {
+               if (s->finalize) {
+                       feature = s->feature;
+                       vcpu_ioctl(vcpu, KVM_ARM_VCPU_FINALIZE, &feature);
+               }
+       }
+}
+
 #define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_COPROC_MASK)
 
 #define CORE_REGS_XX_NR_WORDS  2
index 1df94e8e9ed5f41fe4b88efec5b37e5d4d40b6af..43a919f2208ff150e23a3d81eed36af5594dfc77 100644 (file)
@@ -34,9 +34,6 @@ static __u64 *blessed_reg, blessed_n;
 extern struct vcpu_reg_list *vcpu_configs[];
 extern int vcpu_configs_n;
 
-#define for_each_sublist(c, s)                                                 \
-       for ((s) = &(c)->sublists[0]; (s)->regs; ++(s))
-
 #define for_each_reg(i)                                                                \
        for ((i) = 0; (i) < reg_list->n; ++(i))
 
@@ -109,6 +106,10 @@ bool __weak check_reject_set(int err)
        return true;
 }
 
+void __weak finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
+{
+}
+
 #ifdef __aarch64__
 static void prepare_vcpu_init(struct vcpu_reg_list *c, struct kvm_vcpu_init *init)
 {
@@ -119,19 +120,6 @@ static void prepare_vcpu_init(struct vcpu_reg_list *c, struct kvm_vcpu_init *ini
                        init->features[s->feature / 32] |= 1 << (s->feature % 32);
 }
 
-static void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
-{
-       struct vcpu_reg_sublist *s;
-       int feature;
-
-       for_each_sublist(c, s) {
-               if (s->finalize) {
-                       feature = s->feature;
-                       vcpu_ioctl(vcpu, KVM_ARM_VCPU_FINALIZE, &feature);
-               }
-       }
-}
-
 static struct kvm_vcpu *vcpu_config_get_vcpu(struct vcpu_reg_list *c, struct kvm_vm *vm)
 {
        struct kvm_vcpu_init init = { .target = -1, };
@@ -140,7 +128,6 @@ static struct kvm_vcpu *vcpu_config_get_vcpu(struct vcpu_reg_list *c, struct kvm
        prepare_vcpu_init(c, &init);
        vcpu = __vm_vcpu_add(vm, 0);
        aarch64_vcpu_setup(vcpu, &init);
-       finalize_vcpu(vcpu, c);
 
        return vcpu;
 }
@@ -180,6 +167,7 @@ static void run_test(struct vcpu_reg_list *c)
 
        vm = vm_create_barebones();
        vcpu = vcpu_config_get_vcpu(c, vm);
+       finalize_vcpu(vcpu, c);
 
        reg_list = vcpu_get_reg_list(vcpu);
 
index b5189c7df4828d9b518f55851db9fc9048746e57..bc7c08a09d30ab3133778f88fe4c282264db3736 100644 (file)
@@ -141,6 +141,9 @@ struct vcpu_reg_list {
        struct vcpu_reg_sublist sublists[];
 };
 
+#define for_each_sublist(c, s)         \
+       for ((s) = &(c)->sublists[0]; (s)->regs; ++(s))
+
 #define kvm_for_each_vcpu(vm, i, vcpu)                 \
        for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++) \
                if (!((vcpu) = vm->vcpus[i]))           \