KVM: PPC: Book3S HV P9: Move vcpu register save/restore into functions
authorNicholas Piggin <npiggin@gmail.com>
Tue, 23 Nov 2021 09:52:06 +0000 (19:52 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Wed, 24 Nov 2021 10:08:59 +0000 (21:08 +1100)
This should be no functional difference but makes the caller easier
to read.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20211123095231.1036501-29-npiggin@gmail.com
arch/powerpc/kvm/book3s_hv.c

index 53fe41102c22069dd46fd6ffe56d8a560c329964..0eb52f2732a42188d0c2d6b9fff064b2ea5f716e 100644 (file)
@@ -4108,6 +4108,44 @@ static void store_spr_state(struct kvm_vcpu *vcpu)
        vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
 }
 
+/* Returns true if current MSR and/or guest MSR may have changed */
+static bool load_vcpu_state(struct kvm_vcpu *vcpu,
+                          struct p9_host_os_sprs *host_os_sprs)
+{
+       bool ret = false;
+
+       if (cpu_has_feature(CPU_FTR_TM) ||
+           cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) {
+               kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
+               ret = true;
+       }
+
+       load_spr_state(vcpu, host_os_sprs);
+
+       load_fp_state(&vcpu->arch.fp);
+#ifdef CONFIG_ALTIVEC
+       load_vr_state(&vcpu->arch.vr);
+#endif
+       mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
+
+       return ret;
+}
+
+static void store_vcpu_state(struct kvm_vcpu *vcpu)
+{
+       store_spr_state(vcpu);
+
+       store_fp_state(&vcpu->arch.fp);
+#ifdef CONFIG_ALTIVEC
+       store_vr_state(&vcpu->arch.vr);
+#endif
+       vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
+
+       if (cpu_has_feature(CPU_FTR_TM) ||
+           cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
+               kvmppc_save_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
+}
+
 static void save_p9_host_os_sprs(struct p9_host_os_sprs *host_os_sprs)
 {
        host_os_sprs->dscr = mfspr(SPRN_DSCR);
@@ -4216,19 +4254,8 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
 
        vcpu_vpa_increment_dispatch(vcpu);
 
-       if (cpu_has_feature(CPU_FTR_TM) ||
-           cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) {
-               kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
-               msr = mfmsr(); /* TM restore can update msr */
-       }
-
-       load_spr_state(vcpu, &host_os_sprs);
-
-       load_fp_state(&vcpu->arch.fp);
-#ifdef CONFIG_ALTIVEC
-       load_vr_state(&vcpu->arch.vr);
-#endif
-       mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
+       if (unlikely(load_vcpu_state(vcpu, &host_os_sprs)))
+               msr = mfmsr(); /* MSR may have been updated */
 
        switch_pmu_to_guest(vcpu, &host_os_sprs);
 
@@ -4332,17 +4359,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
 
        switch_pmu_to_host(vcpu, &host_os_sprs);
 
-       store_spr_state(vcpu);
-
-       store_fp_state(&vcpu->arch.fp);
-#ifdef CONFIG_ALTIVEC
-       store_vr_state(&vcpu->arch.vr);
-#endif
-       vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
-
-       if (cpu_has_feature(CPU_FTR_TM) ||
-           cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
-               kvmppc_save_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
+       store_vcpu_state(vcpu);
 
        vcpu_vpa_increment_dispatch(vcpu);