s390/kvm: convert to regular kernel fpu user
authorHeiko Carstens <hca@linux.ibm.com>
Sat, 3 Feb 2024 10:45:13 +0000 (11:45 +0100)
committerHeiko Carstens <hca@linux.ibm.com>
Fri, 16 Feb 2024 13:30:16 +0000 (14:30 +0100)
KVM modifies the kernel fpu's regs pointer to its own area to implement its
custom version of preemtible kernel fpu context. With general support for
preemptible kernel fpu context there is no need for the extra complexity in
KVM code anymore.

Therefore convert KVM to a regular kernel fpu user. In particular this
means that all TIF_FPU checks can be removed, since the fpu register
context will never be changed by other kernel fpu users, and also the fpu
register context will be restored if a thread is preempted.

Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
arch/s390/include/asm/kvm_host.h
arch/s390/kernel/entry.S
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/vsie.c

index e336715eb7d2e4cf991e226fb9013f347abde685..56c2efb41cf25e571d0d7b7b32dfed3b764b1117 100644 (file)
@@ -743,7 +743,6 @@ struct kvm_vcpu_arch {
        struct kvm_s390_sie_block *vsie_block;
        unsigned int      host_acrs[NUM_ACRS];
        struct gs_cb      *host_gscb;
-       struct fpu        host_fpregs;
        struct kvm_s390_local_interrupt local_int;
        struct hrtimer    ckc_timer;
        struct kvm_s390_pgm_info pgm;
index 00f2e1741501c102ad8ba667774e6ad42046499f..fc5277eab554bb4f315bacc87017f8ae95558f81 100644 (file)
@@ -220,8 +220,6 @@ SYM_FUNC_START(__sie64a)
        oi      __SIE_PROG0C+3(%r14),1          # we are going into SIE now
        tm      __SIE_PROG20+3(%r14),3          # last exit...
        jnz     .Lsie_skip
-       TSTMSK  __SF_SIE_FLAGS(%r15),_TIF_FPU
-       jo      .Lsie_skip                      # exit if fp/vx regs changed
        lg      %r14,__SF_SIE_CONTROL_PHYS(%r15)        # get sie block phys addr
        BPEXIT  __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
 .Lsie_entry:
index 9315203c27867f341d60facbda40ec868fce3b01..c81708acd1f4c57572b4acc31e0f56ca3efbc38f 100644 (file)
@@ -584,7 +584,11 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
 
        mci.val = mchk->mcic;
        /* take care of lazy register loading */
-       save_user_fpu_regs();
+       fpu_stfpc(&vcpu->run->s.regs.fpc);
+       if (cpu_has_vx())
+               save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
+       else
+               save_fp_regs((freg_t *)&vcpu->run->s.regs.fprs);
        save_access_regs(vcpu->run->s.regs.acrs);
        if (MACHINE_HAS_GS && vcpu->arch.gs_enabled)
                save_gs_cb(current->thread.gs_cb);
@@ -648,7 +652,7 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
        }
        rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA,
                             vcpu->run->s.regs.gprs, 128);
-       rc |= put_guest_lc(vcpu, current->thread.ufpu.fpc,
+       rc |= put_guest_lc(vcpu, vcpu->run->s.regs.fpc,
                           (u32 __user *) __LC_FP_CREG_SAVE_AREA);
        rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr,
                           (u32 __user *) __LC_TOD_PROGREG_SAVE_AREA);
index 3ce4029cabc25d0c171b0b5a9aaab819e1ef7b4e..8467945344b52c48d2cc5f0cf4775ec99c0b8923 100644 (file)
@@ -4829,8 +4829,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
                               vcpu->run->s.regs.gprs,
                               sizeof(sie_page->pv_grregs));
                }
-               if (test_thread_flag(TIF_FPU))
-                       load_user_fpu_regs();
                exit_reason = sie64a(vcpu->arch.sie_block,
                                     vcpu->run->s.regs.gprs);
                if (kvm_s390_pv_cpu_is_protected(vcpu)) {
@@ -4951,16 +4949,11 @@ static void sync_regs(struct kvm_vcpu *vcpu)
        }
        save_access_regs(vcpu->arch.host_acrs);
        restore_access_regs(vcpu->run->s.regs.acrs);
-       /* save host (userspace) fprs/vrs */
-       save_user_fpu_regs();
-       vcpu->arch.host_fpregs.fpc = current->thread.ufpu.fpc;
-       vcpu->arch.host_fpregs.regs = current->thread.ufpu.regs;
+       fpu_lfpc_safe(&vcpu->run->s.regs.fpc);
        if (cpu_has_vx())
-               current->thread.ufpu.regs = vcpu->run->s.regs.vrs;
+               load_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
        else
-               current->thread.ufpu.regs = vcpu->run->s.regs.fprs;
-       current->thread.ufpu.fpc = vcpu->run->s.regs.fpc;
-
+               load_fp_regs((freg_t *)&vcpu->run->s.regs.fprs);
        /* Sync fmt2 only data */
        if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
                sync_regs_fmt2(vcpu);
@@ -5021,12 +5014,11 @@ static void store_regs(struct kvm_vcpu *vcpu)
        kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
        save_access_regs(vcpu->run->s.regs.acrs);
        restore_access_regs(vcpu->arch.host_acrs);
-       /* Save guest register state */
-       save_user_fpu_regs();
-       vcpu->run->s.regs.fpc = current->thread.ufpu.fpc;
-       /* Restore will be done lazily at return */
-       current->thread.ufpu.fpc = vcpu->arch.host_fpregs.fpc;
-       current->thread.ufpu.regs = vcpu->arch.host_fpregs.regs;
+       fpu_stfpc(&vcpu->run->s.regs.fpc);
+       if (cpu_has_vx())
+               save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
+       else
+               save_fp_regs((freg_t *)&vcpu->run->s.regs.fprs);
        if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
                store_regs_fmt2(vcpu);
 }
@@ -5034,6 +5026,7 @@ static void store_regs(struct kvm_vcpu *vcpu)
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 {
        struct kvm_run *kvm_run = vcpu->run;
+       DECLARE_KERNEL_FPU_ONSTACK(fpu);
        int rc;
 
        /*
@@ -5075,6 +5068,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                goto out;
        }
 
+       kernel_fpu_begin(&fpu, KERNEL_FPC | KERNEL_VXR);
        sync_regs(vcpu);
        enable_cpu_timer_accounting(vcpu);
 
@@ -5098,6 +5092,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 
        disable_cpu_timer_accounting(vcpu);
        store_regs(vcpu);
+       kernel_fpu_end(&fpu, KERNEL_FPC | KERNEL_VXR);
 
        kvm_sigset_deactivate(vcpu);
 
@@ -5172,8 +5167,11 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
         * switch in the run ioctl. Let's update our copies before we save
         * it into the save area
         */
-       save_user_fpu_regs();
-       vcpu->run->s.regs.fpc = current->thread.ufpu.fpc;
+       fpu_stfpc(&vcpu->run->s.regs.fpc);
+       if (cpu_has_vx())
+               save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
+       else
+               save_fp_regs((freg_t *)&vcpu->run->s.regs.fprs);
        save_access_regs(vcpu->run->s.regs.acrs);
 
        return kvm_s390_store_status_unloaded(vcpu, addr);
index e0f79c9a48529bbc96b922c1cc093971058591e9..3ec11612805da4dce22e67c1a52c877c351bae21 100644 (file)
@@ -18,7 +18,6 @@
 #include <asm/sclp.h>
 #include <asm/nmi.h>
 #include <asm/dis.h>
-#include <asm/fpu.h>
 #include <asm/facility.h>
 #include "kvm-s390.h"
 #include "gaccess.h"
@@ -1149,8 +1148,6 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
         */
        vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
        barrier();
-       if (test_thread_flag(TIF_FPU))
-               load_user_fpu_regs();
        if (!kvm_s390_vcpu_sie_inhibited(vcpu))
                rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
        barrier();