KVM: s390: introduce kvm_s390_fpu_(store|load)
authorJanosch Frank <frankja@linux.ibm.com>
Tue, 20 Feb 2024 08:56:34 +0000 (08:56 +0000)
committerHeiko Carstens <hca@linux.ibm.com>
Wed, 21 Feb 2024 14:09:13 +0000 (15:09 +0100)
It's a bit nicer than having multiple lines and will help if there's
another re-work since we'll only have to change one location.

Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/kvm-s390.h

index c81708acd1f4c57572b4acc31e0f56ca3efbc38f..dc721d50a942f8e5090ca63e04160455b5327d6d 100644 (file)
@@ -584,11 +584,7 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
 
        mci.val = mchk->mcic;
        /* take care of lazy register loading */
-       fpu_stfpc(&vcpu->run->s.regs.fpc);
-       if (cpu_has_vx())
-               save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
-       else
-               save_fp_regs((freg_t *)&vcpu->run->s.regs.fprs);
+       kvm_s390_fpu_store(vcpu->run);
        save_access_regs(vcpu->run->s.regs.acrs);
        if (MACHINE_HAS_GS && vcpu->arch.gs_enabled)
                save_gs_cb(current->thread.gs_cb);
index 8c222b0dfbf2e7e68a953fd95fbee3a92cf84ae1..6500f80a70864f840ab50bfdb7e64df39bd2b4ce 100644 (file)
@@ -4949,11 +4949,7 @@ static void sync_regs(struct kvm_vcpu *vcpu)
        }
        save_access_regs(vcpu->arch.host_acrs);
        restore_access_regs(vcpu->run->s.regs.acrs);
-       fpu_lfpc_safe(&vcpu->run->s.regs.fpc);
-       if (cpu_has_vx())
-               load_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
-       else
-               load_fp_regs((freg_t *)&vcpu->run->s.regs.fprs);
+       kvm_s390_fpu_load(vcpu->run);
        /* Sync fmt2 only data */
        if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
                sync_regs_fmt2(vcpu);
@@ -5014,11 +5010,7 @@ static void store_regs(struct kvm_vcpu *vcpu)
        kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
        save_access_regs(vcpu->run->s.regs.acrs);
        restore_access_regs(vcpu->arch.host_acrs);
-       fpu_stfpc(&vcpu->run->s.regs.fpc);
-       if (cpu_has_vx())
-               save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
-       else
-               save_fp_regs((freg_t *)&vcpu->run->s.regs.fprs);
+       kvm_s390_fpu_store(vcpu->run);
        if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
                store_regs_fmt2(vcpu);
 }
@@ -5167,11 +5159,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
         * switch in the run ioctl. Let's update our copies before we save
         * it into the save area
         */
-       fpu_stfpc(&vcpu->run->s.regs.fpc);
-       if (cpu_has_vx())
-               save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
-       else
-               save_fp_regs((freg_t *)&vcpu->run->s.regs.fprs);
+       kvm_s390_fpu_store(vcpu->run);
        save_access_regs(vcpu->run->s.regs.acrs);
 
        return kvm_s390_store_status_unloaded(vcpu, addr);
index a7ea80cfa445e1a9929b2b36ccd4d7c88a0eb2bc..111eb5c7478409c4af5c8c3e8f76c730b1b5406c 100644 (file)
 #include <asm/processor.h>
 #include <asm/sclp.h>
 
+static inline void kvm_s390_fpu_store(struct kvm_run *run)
+{
+       fpu_stfpc(&run->s.regs.fpc);
+       if (cpu_has_vx())
+               save_vx_regs((__vector128 *)&run->s.regs.vrs);
+       else
+               save_fp_regs((freg_t *)&run->s.regs.fprs);
+}
+
+static inline void kvm_s390_fpu_load(struct kvm_run *run)
+{
+       fpu_lfpc_safe(&run->s.regs.fpc);
+       if (cpu_has_vx())
+               load_vx_regs((__vector128 *)&run->s.regs.vrs);
+       else
+               load_fp_regs((freg_t *)&run->s.regs.fprs);
+}
+
 /* Transactional Memory Execution related macros */
 #define IS_TE_ENABLED(vcpu)    ((vcpu->arch.sie_block->ecb & ECB_TE))
 #define TDB_FORMAT1            1